Line data Source code
1 : #include "fd_prog_load.h"
2 : #include "fd_progcache_user.h"
3 : #include "fd_progcache_rec.h"
4 : #include "../../util/racesan/fd_racesan_target.h"
5 :
6 : FD_TL fd_progcache_metrics_t fd_progcache_metrics_default;
7 :
8 : fd_progcache_t *
9 : fd_progcache_join( fd_progcache_t * ljoin,
10 : void * shfunk,
11 : uchar * scratch,
12 45 : ulong scratch_sz ) {
13 45 : if( FD_UNLIKELY( !ljoin ) ) {
14 0 : FD_LOG_WARNING(( "NULL ljoin" ));
15 0 : return NULL;
16 0 : }
17 45 : if( FD_UNLIKELY( !shfunk ) ) {
18 0 : FD_LOG_WARNING(( "NULL shfunk" ));
19 0 : return NULL;
20 0 : }
21 45 : if( FD_LIKELY( scratch_sz ) ) {
22 45 : if( FD_UNLIKELY( !scratch ) ) {
23 0 : FD_LOG_WARNING(( "NULL scratch" ));
24 0 : return NULL;
25 0 : }
26 45 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)scratch, FD_PROGCACHE_SCRATCH_ALIGN ) ) ) {
27 0 : FD_LOG_WARNING(( "misaligned scratch" ));
28 0 : return NULL;
29 0 : }
30 45 : }
31 45 : memset( ljoin, 0, sizeof(fd_progcache_t) );
32 45 : if( FD_UNLIKELY( !fd_funk_join( ljoin->funk, shfunk ) ) ) return NULL;
33 :
34 45 : ljoin->metrics = &fd_progcache_metrics_default;
35 45 : ljoin->scratch = scratch;
36 45 : ljoin->scratch_sz = scratch_sz;
37 :
38 45 : return ljoin;
39 45 : }
40 :
41 : void *
42 : fd_progcache_leave( fd_progcache_t * cache,
43 42 : void ** opt_shfunk ) {
44 42 : if( FD_UNLIKELY( !cache ) ) {
45 0 : FD_LOG_WARNING(( "NULL cache" ));
46 0 : return NULL;
47 0 : }
48 42 : if( FD_UNLIKELY( !fd_funk_leave( cache->funk, opt_shfunk ) ) ) return NULL;
49 42 : cache->scratch = NULL;
50 42 : cache->scratch_sz = 0UL;
51 42 : return cache;
52 42 : }
53 :
54 : /* fd_progcache_load_fork pivots the progcache object to the selected
55 : fork (identified by tip XID).
56 :
57 : Populates cache->fork, which is a array-backed list of XIDs sorted
58 : newest to oldest. Cache lookups only respect records with an XID
59 : present in that list.
60 :
61 : For any given xid, the epoch_slot0 is assumed to stay constant. */
62 :
63 : static void
64 : fd_progcache_load_fork_slow( fd_progcache_t * cache,
65 : fd_funk_txn_xid_t const * xid,
66 105 : ulong epoch_slot0 ) {
67 105 : fd_funk_txn_xid_t next_xid = *xid;
68 105 : if( FD_UNLIKELY( next_xid.ul[0]<epoch_slot0 ) ) {
69 0 : FD_LOG_CRIT(( "fd_progcache_load_fork: attempted to load xid=%lu:%lu, which predates first slot of bank's epoch (epoch_slot0=%lu)",
70 0 : next_xid.ul[0], next_xid.ul[1], epoch_slot0 ));
71 0 : }
72 :
73 : /* Walk transaction graph, recovering from overruns on-the-fly */
74 105 : cache->fork_depth = 0UL;
75 :
76 105 : ulong txn_max = fd_funk_txn_pool_ele_max( cache->funk->txn_pool );
77 105 : ulong i;
78 159 : for( i=0UL; i<FD_PROGCACHE_DEPTH_MAX; i++ ) {
79 159 : fd_funk_txn_map_query_t query[1];
80 159 : fd_funk_txn_t const * candidate;
81 159 : fd_funk_txn_xid_t found_xid;
82 159 : ulong parent_idx;
83 159 : fd_funk_txn_xid_t parent_xid;
84 159 : retry:
85 : /* Speculatively look up transaction from map */
86 159 : for(;;) {
87 159 : int query_err = fd_funk_txn_map_query_try( cache->funk->txn_map, &next_xid, NULL, query, 0 );
88 159 : if( FD_UNLIKELY( query_err==FD_MAP_ERR_AGAIN ) ) {
89 : /* FIXME random backoff */
90 0 : FD_SPIN_PAUSE();
91 0 : continue;
92 0 : }
93 159 : if( query_err==FD_MAP_ERR_KEY ) goto done;
94 144 : if( FD_UNLIKELY( query_err!=FD_MAP_SUCCESS ) ) {
95 0 : FD_LOG_CRIT(( "fd_funk_txn_map_query_try failed: %i-%s", query_err, fd_map_strerror( query_err ) ));
96 0 : }
97 144 : break;
98 144 : }
99 :
100 : /* Lookup parent transaction while recovering from overruns
101 : FIXME This would be a lot easier if transactions specified
102 : parent by XID instead of by pointer ... */
103 144 : candidate = fd_funk_txn_map_query_ele_const( query );
104 144 : FD_COMPILER_MFENCE();
105 144 : do {
106 144 : found_xid = FD_VOLATILE_CONST( candidate->xid );
107 144 : parent_idx = fd_funk_txn_idx( FD_VOLATILE_CONST( candidate->parent_cidx ) );
108 144 : if( parent_idx<txn_max ) {
109 54 : FD_COMPILER_MFENCE();
110 54 : fd_funk_txn_t const * parent = &cache->funk->txn_pool->ele[ parent_idx ];
111 54 : parent_xid = FD_VOLATILE_CONST( parent->xid );
112 54 : FD_COMPILER_MFENCE();
113 54 : }
114 144 : parent_idx = fd_funk_txn_idx( FD_VOLATILE_CONST( candidate->parent_cidx ) );
115 144 : } while(0);
116 144 : FD_COMPILER_MFENCE();
117 :
118 : /* Verify speculative loads by ensuring txn still exists in map */
119 144 : if( FD_UNLIKELY( fd_funk_txn_map_query_test( query )!=FD_MAP_SUCCESS ) ) {
120 0 : FD_SPIN_PAUSE();
121 0 : goto retry;
122 0 : }
123 :
124 144 : if( FD_UNLIKELY( !fd_funk_txn_xid_eq( &found_xid, &next_xid ) ) ) {
125 0 : FD_LOG_CRIT(( "fd_progcache_load_fork_slow detected memory corruption: expected xid %lu:%lu at %p, found %lu:%lu",
126 0 : next_xid.ul[0], next_xid.ul[1],
127 0 : (void *)candidate,
128 0 : found_xid.ul[0], found_xid.ul[1] ));
129 0 : }
130 :
131 144 : cache->fork[ i ] = next_xid;
132 144 : if( fd_funk_txn_idx_is_null( parent_idx ) ||
133 144 : next_xid.ul[0]<epoch_slot0 ) {
134 : /* Reached root or fork graph node is from previous epoch */
135 90 : i++;
136 90 : break;
137 90 : }
138 54 : next_xid = parent_xid;
139 54 : }
140 :
141 105 : done:
142 105 : cache->fork_depth = i;
143 :
144 : /* Only include published/rooted records if they include at least one
145 : cache entry from the current epoch. */
146 105 : if( fd_funk_last_publish( cache->funk )->ul[0] >= epoch_slot0 &&
147 105 : cache->fork_depth < FD_PROGCACHE_DEPTH_MAX ) {
148 105 : fd_funk_txn_xid_set_root( &cache->fork[ cache->fork_depth++ ] );
149 105 : }
150 105 : }
151 :
152 : static inline void
153 : fd_progcache_load_fork( fd_progcache_t * cache,
154 : fd_funk_txn_xid_t const * xid,
155 207 : ulong epoch_slot0 ) {
156 : /* Skip if already on the correct fork */
157 207 : if( FD_LIKELY( (!!cache->fork_depth) & (!!fd_funk_txn_xid_eq( &cache->fork[ 0 ], xid ) ) ) ) return;
158 105 : cache->metrics->fork_switch_cnt++;
159 105 : fd_progcache_load_fork_slow( cache, xid, epoch_slot0 ); /* switch fork */
160 105 : }
161 :
162 : /* fd_progcache_query searches for a program cache entry on the current
163 : fork. Stops short of an epoch boundary. */
164 :
165 : static int
166 : fd_progcache_fork_has_xid( fd_progcache_t const * cache,
167 132 : fd_funk_txn_xid_t const * rec_xid ) {
168 : /* FIXME unroll this a little */
169 132 : ulong const fork_depth = cache->fork_depth;
170 234 : for( ulong i=0UL; i<fork_depth; i++ ) {
171 198 : if( fd_funk_txn_xid_eq( &cache->fork[i], rec_xid ) ) return 1;
172 198 : }
173 36 : return 0;
174 132 : }
175 :
176 : static int
177 : fd_progcache_search_chain( fd_progcache_t const * cache,
178 : ulong chain_idx,
179 : fd_funk_rec_key_t const * key,
180 : ulong epoch_slot0,
181 150 : fd_funk_rec_t ** out_rec ) {
182 150 : *out_rec = NULL;
183 :
184 150 : fd_funk_rec_map_shmem_t * shmap = cache->funk->rec_map->map;
185 150 : fd_funk_rec_map_shmem_private_chain_t const * chain_tbl = fd_funk_rec_map_shmem_private_chain_const( shmap, 0UL );
186 150 : fd_funk_rec_map_shmem_private_chain_t const * chain = chain_tbl + chain_idx;
187 150 : fd_funk_rec_t * rec_tbl = cache->funk->rec_pool->ele;
188 150 : ulong rec_max = fd_funk_rec_pool_ele_max( cache->funk->rec_pool );
189 150 : ulong ver_cnt = FD_VOLATILE_CONST( chain->ver_cnt );
190 150 : ulong root_slot = fd_funk_last_publish( cache->funk )->ul[0];
191 :
192 : /* Start a speculative transaction for the chain containing revisions
193 : of the program cache key we are looking for. */
194 150 : ulong cnt = fd_funk_rec_map_private_vcnt_cnt( ver_cnt );
195 150 : if( FD_UNLIKELY( fd_funk_rec_map_private_vcnt_ver( ver_cnt )&1 ) ) {
196 0 : return FD_MAP_ERR_AGAIN; /* chain is locked */
197 0 : }
198 150 : FD_COMPILER_MFENCE();
199 150 : uint ele_idx = chain->head_cidx;
200 :
201 : /* Walk the map chain, remember the best entry */
202 150 : fd_funk_rec_t * best = NULL;
203 150 : long best_slot = -1L;
204 330 : for( ulong i=0UL; i<cnt; i++, ele_idx=rec_tbl[ ele_idx ].map_next ) {
205 180 : fd_funk_rec_t * rec = &rec_tbl[ ele_idx ];
206 :
207 : /* Skip over unrelated records (hash collision) */
208 180 : if( FD_UNLIKELY( !fd_funk_rec_key_eq( rec->pair.key, key ) ) ) continue;
209 :
210 : /* Skip over records from an older epoch (FIXME could bail early
211 : here if the chain is ordered) */
212 180 : ulong found_slot = rec->pair.xid->ul[0];
213 180 : if( found_slot==ULONG_MAX ) found_slot = root_slot;
214 :
215 180 : if( FD_UNLIKELY( found_slot<epoch_slot0 ) ) continue;
216 :
217 : /* Skip over records that are older than what we already have */
218 171 : if( FD_UNLIKELY( (long)found_slot<best_slot ) ) continue;
219 :
220 : /* Confirm that record is part of the current fork
221 : FIXME this has bad performance / pointer-chasing */
222 132 : if( FD_UNLIKELY( !fd_progcache_fork_has_xid( cache, rec->pair.xid ) ) ) continue;
223 :
224 96 : best = rec;
225 96 : best_slot = (long)found_slot;
226 96 : if( FD_UNLIKELY( rec->map_next==ele_idx ) ) {
227 0 : FD_LOG_CRIT(( "fd_progcache_search_chain detected cycle" ));
228 0 : }
229 96 : if( rec->map_next > rec_max ) {
230 57 : if( FD_UNLIKELY( !fd_funk_rec_map_private_idx_is_null( rec->map_next ) ) ) {
231 0 : FD_LOG_CRIT(( "fd_progcache_search_chain detected memory corruption: rec->map_next %u is out of bounds (rec_max %lu)",
232 0 : rec->map_next, rec_max ));
233 0 : }
234 57 : }
235 96 : }
236 :
237 : /* Retry if we were overrun */
238 150 : if( FD_UNLIKELY( FD_VOLATILE_CONST( chain->ver_cnt )!=ver_cnt ) ) {
239 0 : return FD_MAP_ERR_AGAIN;
240 0 : }
241 :
242 150 : *out_rec = best;
243 150 : return FD_MAP_SUCCESS;
244 150 : }
245 :
246 : static fd_funk_rec_t *
247 : fd_progcache_query( fd_progcache_t * cache,
248 : fd_funk_txn_xid_t const * xid,
249 : fd_funk_rec_key_t const * key,
250 150 : ulong epoch_slot0 ) {
251 : /* Hash key to chain */
252 150 : fd_funk_xid_key_pair_t pair[1];
253 150 : fd_funk_txn_xid_copy( pair->xid, xid );
254 150 : fd_funk_rec_key_copy( pair->key, key );
255 150 : fd_funk_rec_map_t const * rec_map = cache->funk->rec_map;
256 150 : ulong hash = fd_funk_rec_map_key_hash( pair, rec_map->map->seed );
257 150 : ulong chain_idx = (hash & (rec_map->map->chain_cnt-1UL) );
258 :
259 : /* Traverse chain for candidate */
260 150 : fd_funk_rec_t * rec = NULL;
261 150 : for(;;) {
262 150 : int err = fd_progcache_search_chain( cache, chain_idx, key, epoch_slot0, &rec );
263 150 : if( FD_LIKELY( err==FD_MAP_SUCCESS ) ) break;
264 0 : FD_SPIN_PAUSE();
265 0 : fd_racesan_hook( "fd_progcache_query_wait" );
266 : /* FIXME backoff */
267 0 : }
268 :
269 150 : return rec;
270 150 : }
271 :
272 : fd_progcache_rec_t const *
273 : fd_progcache_peek( fd_progcache_t * cache,
274 : fd_funk_txn_xid_t const * xid,
275 : void const * prog_addr,
276 150 : ulong epoch_slot0 ) {
277 150 : if( FD_UNLIKELY( !cache || !cache->funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
278 150 : fd_progcache_load_fork( cache, xid, epoch_slot0 );
279 150 : fd_funk_rec_key_t key[1]; memcpy( key->uc, prog_addr, 32UL );
280 150 : fd_funk_rec_t const * rec = fd_progcache_query( cache, xid, key, epoch_slot0 );
281 150 : if( FD_UNLIKELY( !rec ) ) return NULL;
282 :
283 93 : fd_progcache_rec_t const * entry = fd_funk_val_const( rec, fd_funk_wksp( cache->funk ) );
284 93 : if( entry->slot < epoch_slot0 ) entry = NULL;
285 :
286 93 : cache->metrics->hit_cnt += !!entry;
287 :
288 93 : return entry;
289 150 : }
290 :
291 : static void
292 : fd_funk_rec_push_tail( fd_funk_rec_t * rec_pool,
293 : fd_funk_rec_t * rec,
294 : uint * rec_head_idx,
295 66 : uint * rec_tail_idx ) {
296 66 : uint rec_idx = (uint)( rec - rec_pool );
297 66 : for(;;) {
298 :
299 : /* Doubly linked list append. Robust in the event of concurrent
300 : publishes. Iteration during publish not supported. Sequence:
301 : - Identify tail element
302 : - Set new element's prev and next pointers
303 : - Set tail element's next pointer
304 : - Set tail pointer */
305 :
306 66 : uint rec_prev_idx = FD_VOLATILE_CONST( *rec_tail_idx );
307 66 : rec->prev_idx = rec_prev_idx;
308 66 : rec->next_idx = FD_FUNK_REC_IDX_NULL;
309 66 : FD_COMPILER_MFENCE();
310 :
311 66 : uint * next_idx_p;
312 66 : if( fd_funk_rec_idx_is_null( rec_prev_idx ) ) {
313 66 : next_idx_p = rec_head_idx;
314 66 : } else {
315 0 : next_idx_p = &rec_pool[ rec_prev_idx ].next_idx;
316 0 : }
317 :
318 66 : fd_racesan_hook( "fd_progcache_rec_push_tail_start" );
319 66 : if( FD_UNLIKELY( !__sync_bool_compare_and_swap( next_idx_p, FD_FUNK_REC_IDX_NULL, rec_idx ) ) ) {
320 : /* Another thread beat us to the punch */
321 0 : FD_SPIN_PAUSE();
322 0 : continue;
323 0 : }
324 :
325 66 : if( FD_UNLIKELY( !__sync_bool_compare_and_swap( rec_tail_idx, rec_prev_idx, rec_idx ) ) ) {
326 : /* This CAS is guaranteed to succeed if the previous CAS passed. */
327 0 : FD_LOG_CRIT(( "Irrecoverable data race encountered while appending to txn rec list (invariant violation?): cas(%p,%u,%u)",
328 0 : (void *)rec_tail_idx, rec_prev_idx, rec_idx ));
329 0 : }
330 :
331 66 : break;
332 66 : }
333 66 : }
334 :
335 : __attribute__((warn_unused_result))
336 : static int
337 : fd_progcache_push( fd_progcache_t * cache,
338 : fd_funk_txn_t * txn,
339 : fd_funk_rec_t * rec,
340 : void const * prog_addr,
341 78 : fd_funk_rec_t ** dup_rec ) {
342 78 : fd_funk_t * funk = cache->funk;
343 78 : *dup_rec = NULL;
344 :
345 : /* Phase 1: Determine record's xid-key pair */
346 :
347 78 : rec->tag = 0;
348 78 : rec->prev_idx = FD_FUNK_REC_IDX_NULL;
349 78 : rec->next_idx = FD_FUNK_REC_IDX_NULL;
350 78 : memcpy( rec->pair.key, prog_addr, 32UL );
351 78 : if( FD_UNLIKELY( txn ) ) {
352 69 : fd_funk_txn_xid_copy( rec->pair.xid, &txn->xid );
353 69 : } else {
354 9 : fd_funk_txn_xid_set_root( rec->pair.xid );
355 9 : }
356 :
357 : /* Phase 2: Lock rec_map chain, entering critical section */
358 :
359 78 : struct {
360 78 : fd_funk_rec_map_txn_t txn[1];
361 78 : fd_funk_rec_map_txn_private_info_t info[1];
362 78 : } _map_txn;
363 78 : fd_funk_rec_map_txn_t * map_txn = fd_funk_rec_map_txn_init( _map_txn.txn, funk->rec_map, 1UL );
364 78 : fd_funk_rec_map_txn_add( map_txn, &rec->pair, 1 );
365 78 : int txn_err = fd_funk_rec_map_txn_try( map_txn, FD_MAP_FLAG_BLOCKING );
366 78 : if( FD_UNLIKELY( txn_err!=FD_MAP_SUCCESS ) ) {
367 0 : FD_LOG_CRIT(( "Failed to insert progcache record: canont lock funk rec map chain: %i-%s", txn_err, fd_map_strerror( txn_err ) ));
368 0 : }
369 :
370 : /* Phase 3: Check if record exists */
371 :
372 78 : fd_funk_rec_map_query_t query[1];
373 78 : int query_err = fd_funk_rec_map_txn_query( funk->rec_map, &rec->pair, NULL, query, 0 );
374 78 : if( FD_UNLIKELY( query_err==FD_MAP_SUCCESS ) ) {
375 3 : fd_funk_rec_map_txn_test( map_txn );
376 3 : fd_funk_rec_map_txn_fini( map_txn );
377 3 : *dup_rec = query->ele;
378 3 : return 0; /* another thread was faster */
379 75 : } else if( FD_UNLIKELY( query_err!=FD_MAP_ERR_KEY ) ) {
380 0 : FD_LOG_CRIT(( "fd_funk_rec_map_txn_query failed: %i-%s", query_err, fd_map_strerror( query_err ) ));
381 0 : }
382 :
383 : /* Phase 4: Insert new record */
384 :
385 75 : int insert_err = fd_funk_rec_map_txn_insert( funk->rec_map, rec );
386 75 : if( FD_UNLIKELY( insert_err!=FD_MAP_SUCCESS ) ) {
387 0 : FD_LOG_CRIT(( "fd_funk_rec_map_txn_insert failed: %i-%s", insert_err, fd_map_strerror( insert_err ) ));
388 0 : }
389 :
390 : /* At this point, another thread could aggressively evict this entry.
391 : But this entry is not yet present in rec_map! This is why we hold
392 : a lock on the rec_map chain -- the rec_map_remove executed by the
393 : eviction will be sequenced after completion of phase 5. */
394 :
395 : /* Phase 5: Insert rec into rec_map */
396 :
397 75 : if( txn ) {
398 66 : fd_funk_rec_push_tail( funk->rec_pool->ele,
399 66 : rec,
400 66 : &txn->rec_head_idx,
401 66 : &txn->rec_tail_idx );
402 66 : }
403 :
404 : /* Phase 6: Finish rec_map transaction */
405 :
406 75 : int test_err = fd_funk_rec_map_txn_test( map_txn );
407 75 : if( FD_UNLIKELY( test_err!=FD_MAP_SUCCESS ) ) FD_LOG_CRIT(( "fd_funk_rec_map_txn_test failed: %i-%s", test_err, fd_map_strerror( test_err ) ));
408 75 : fd_funk_rec_map_txn_fini( map_txn );
409 75 : return 1;
410 75 : }
411 :
412 : static int
413 69 : fd_progcache_txn_try_lock( fd_funk_txn_t * txn ) {
414 69 : for(;;) {
415 69 : ushort * lock = &txn->lock->value;
416 69 : ushort value = FD_VOLATILE_CONST( *lock );
417 69 : if( FD_UNLIKELY( value>=0xFFFE ) ) return 0; /* txn is write-locked */
418 69 : if( FD_LIKELY( FD_ATOMIC_CAS( lock, value, value+1 )==value ) ) {
419 69 : return 1; /* transaction now read-locked */
420 69 : }
421 69 : }
422 69 : }
423 :
424 : static void
425 78 : fd_progcache_txn_unlock( fd_funk_txn_t * txn ) {
426 78 : if( !txn ) return;
427 69 : fd_rwlock_unread( txn->lock );
428 69 : }
429 :
430 : /* fd_progcache_lock_best_txn picks a fork graph node close to
431 : target_slot and write locks it for program cache entry insertion.
432 :
433 : The cache entry should be placed as far up the fork graph as
434 : possible (so it can be shared across more downstream forks), but not
435 : too early (or it would cause non-determinism).
436 :
437 : Influenced by a number of things:
438 :
439 : - Program modification time (cannot predate the program modification)
440 : - Epoch boundaries (cannot span across epochs)
441 : - Transaction publishing (cannot create a cache entry at a txn that
442 : is in the process of being published) */
443 :
444 : static fd_funk_txn_t *
445 : fd_progcache_lock_best_txn( fd_progcache_t * cache,
446 45 : ulong target_slot ) {
447 :
448 45 : fd_funk_txn_xid_t last_publish[1];
449 45 : fd_funk_txn_xid_ld_atomic( last_publish, fd_funk_last_publish( cache->funk ) );
450 45 : if( target_slot <= last_publish->ul[0] &&
451 45 : !fd_funk_txn_xid_eq_root( last_publish ) ) {
452 9 : return NULL; /* publishing record immediately */
453 9 : }
454 :
455 : /* Scan fork graph for oldest node (>= program update slot) */
456 36 : ulong target_xid_idx;
457 36 : ulong fork_depth = cache->fork_depth;
458 36 : for( target_xid_idx=0UL; target_xid_idx<fork_depth; target_xid_idx++ ) {
459 36 : if( cache->fork[ target_xid_idx ].ul[0]<=target_slot ) break;
460 36 : }
461 :
462 : /* Backtrack up to newer fork graph nodes (>= access slot)
463 : Very old slots could have been rooted at this point */
464 36 : fd_funk_txn_t * txn;
465 36 : do {
466 : /* Locate fork */
467 36 : fd_funk_txn_xid_t const * xid = &cache->fork[ target_xid_idx ];
468 36 : txn = fd_funk_txn_query( xid, cache->funk->txn_map );
469 36 : if( FD_LIKELY( txn ) ) {
470 : /* Attempt to read-lock transaction */
471 36 : if( FD_LIKELY( fd_progcache_txn_try_lock( txn ) ) ) return txn;
472 36 : }
473 : /* Cannot insert at this fork graph node, try one newer */
474 0 : target_xid_idx--;
475 0 : } while( target_xid_idx!=ULONG_MAX );
476 :
477 : /* There is no funk_txn in range [target_slot,load_slot] that we can
478 : create a cache entry at. */
479 0 : FD_LOG_CRIT(( "Could not find program cache fork graph node for target slot %lu", target_slot ));
480 0 : }
481 :
482 : static fd_progcache_rec_t const *
483 : fd_progcache_insert( fd_progcache_t * cache,
484 : fd_accdb_user_t * accdb,
485 : fd_funk_txn_xid_t const * load_xid,
486 : void const * prog_addr,
487 : fd_prog_load_env_t const * env,
488 51 : long slot_min ) {
489 :
490 : /* XID overview:
491 :
492 : - load_xid: tip of fork currently being executed
493 : - modify_xid: xid in which program was last modified / deployed
494 : - txn->xid: xid in which program cache entry is inserted
495 :
496 : slot(load_xid) > slot(entry_xid) >= slot(txn->xid) */
497 :
498 : /* Acquire reference to ELF binary data */
499 :
500 51 : fd_funk_txn_xid_t modify_xid;
501 51 : ulong progdata_sz;
502 51 : uchar const * progdata = fd_prog_load_elf( accdb, load_xid, prog_addr, &progdata_sz, &modify_xid );
503 51 : if( FD_UNLIKELY( !progdata ) ) return NULL;
504 45 : ulong target_slot = modify_xid.ul[0];
505 :
506 : /* Prevent cache entry from crossing epoch boundary */
507 :
508 45 : target_slot = fd_ulong_max( target_slot, env->epoch_slot0 );
509 :
510 : /* Prevent cache entry from shadowing invalidation */
511 :
512 45 : target_slot = (ulong)fd_long_max( (long)target_slot, slot_min );
513 :
514 : /* Allocate a funk_rec */
515 :
516 45 : fd_funk_t * funk = cache->funk;
517 45 : fd_funk_rec_t * funk_rec = fd_funk_rec_pool_acquire( funk->rec_pool, NULL, 0, NULL );
518 45 : if( FD_UNLIKELY( !funk_rec ) ) {
519 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_funk_rec_pool_acquire failed (rec_max=%lu)",
520 0 : fd_funk_rec_pool_ele_max( funk->rec_pool ) ));
521 0 : }
522 45 : memset( funk_rec, 0, sizeof(fd_funk_rec_t) );
523 45 : fd_funk_val_init( funk_rec );
524 :
525 : /* Pick and lock a txn in which cache entry is created at */
526 :
527 45 : fd_funk_txn_t * txn = fd_progcache_lock_best_txn( cache, target_slot );
528 :
529 : /* Load program */
530 :
531 45 : fd_features_t const * features = env->features;
532 45 : ulong const load_slot = env->slot;
533 45 : fd_prog_versions_t versions = fd_prog_versions( features, load_slot );
534 45 : fd_sbpf_loader_config_t config = {
535 45 : .sbpf_min_version = versions.min_sbpf_version,
536 45 : .sbpf_max_version = versions.max_sbpf_version,
537 45 : };
538 45 : fd_sbpf_elf_info_t elf_info[1];
539 :
540 45 : fd_progcache_rec_t * rec = NULL;
541 45 : if( FD_LIKELY( fd_sbpf_elf_peek( elf_info, progdata, progdata_sz, &config )==FD_SBPF_ELF_SUCCESS ) ) {
542 :
543 45 : fd_funk_t * funk = cache->funk;
544 45 : ulong rec_align = fd_progcache_rec_align();
545 45 : ulong rec_footprint = fd_progcache_rec_footprint( elf_info );
546 :
547 45 : void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, rec_align, rec_footprint, NULL );
548 45 : if( FD_UNLIKELY( !rec_mem ) ) {
549 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
550 0 : rec_align, rec_footprint ));
551 0 : }
552 :
553 45 : rec = fd_progcache_rec_new( rec_mem, elf_info, &config, load_slot, features, progdata, progdata_sz, cache->scratch, cache->scratch_sz );
554 45 : if( !rec ) {
555 3 : fd_funk_val_flush( funk_rec, funk->alloc, funk->wksp );
556 3 : }
557 :
558 45 : }
559 :
560 : /* Convert to tombstone if load failed */
561 :
562 45 : if( !rec ) { /* load fail */
563 3 : void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ), NULL );
564 3 : if( FD_UNLIKELY( !rec_mem ) ) {
565 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
566 0 : fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ) ));
567 0 : }
568 3 : rec = fd_progcache_rec_new_nx( rec_mem, load_slot );
569 3 : }
570 :
571 : /* Publish cache entry to funk index */
572 :
573 45 : fd_funk_rec_t * dup_rec = NULL;
574 45 : int push_ok = fd_progcache_push( cache, txn, funk_rec, prog_addr, &dup_rec );
575 :
576 : /* Done modifying transaction */
577 :
578 45 : fd_progcache_txn_unlock( txn );
579 :
580 : /* If another thread was faster publishing the same record, use that
581 : one instead. FIXME POSSIBLE RACE CONDITION WHERE THE OTHER REC IS
582 : EVICTED AFTER PEEK? */
583 :
584 45 : if( !push_ok ) {
585 0 : FD_TEST( dup_rec );
586 0 : fd_funk_val_flush( funk_rec, funk->alloc, funk->wksp );
587 0 : fd_funk_rec_pool_release( funk->rec_pool, funk_rec, 1 );
588 0 : cache->metrics->dup_insert_cnt++;
589 0 : return fd_funk_val_const( dup_rec, funk->wksp );
590 0 : }
591 :
592 45 : cache->metrics->fill_cnt++;
593 45 : cache->metrics->fill_tot_sz += rec->rodata_sz;
594 :
595 45 : return rec;
596 45 : }
597 :
598 : fd_progcache_rec_t const *
599 : fd_progcache_pull( fd_progcache_t * cache,
600 : fd_accdb_user_t * accdb,
601 : fd_funk_txn_xid_t const * xid,
602 : void const * prog_addr,
603 57 : fd_prog_load_env_t const * env ) {
604 57 : if( FD_UNLIKELY( !cache || !cache->funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
605 57 : fd_progcache_load_fork( cache, xid, env->epoch_slot0 );
606 :
607 57 : fd_progcache_rec_t const * found_rec = fd_progcache_peek( cache, xid, prog_addr, env->epoch_slot0 );
608 57 : long slot_min = -1L;
609 57 : if( !found_rec ) goto miss;
610 :
611 : /* Cache invalidation, update next slot */
612 15 : if( found_rec->invalidate ) {
613 9 : slot_min = (long)found_rec->slot+1L;
614 9 : if( FD_UNLIKELY( xid->ul[0] < (ulong)slot_min ) ) {
615 0 : FD_LOG_CRIT(( "Program cache entry %016lx%016lx%016lx%016lx invalidated at slot %lu but loaded at slot %lu",
616 0 : fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr ) ),
617 0 : fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+ 8 ) ),
618 0 : fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+16 ) ),
619 0 : fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+24 ) ),
620 0 : found_rec->slot,
621 0 : xid->ul[0] ));
622 0 : }
623 9 : goto miss;
624 9 : }
625 :
626 : /* Passed all checks */
627 6 : cache->metrics->hit_cnt++;
628 6 : cache->metrics->hit_tot_sz += found_rec->rodata_sz;
629 6 : return found_rec;
630 :
631 51 : miss:
632 51 : cache->metrics->miss_cnt++;
633 51 : return fd_progcache_insert( cache, accdb, xid, prog_addr, env, slot_min );
634 15 : }
635 :
636 : fd_progcache_rec_t const *
637 : fd_progcache_invalidate( fd_progcache_t * cache,
638 : fd_funk_txn_xid_t const * xid,
639 : void const * prog_addr,
640 33 : ulong slot ) {
641 33 : fd_funk_t * funk = cache->funk;
642 :
643 33 : if( FD_UNLIKELY( !cache || !funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
644 :
645 : /* Select a fork node to create invalidate record in
646 : Do not create invalidation records at the funk root */
647 :
648 33 : if( fd_funk_txn_xid_eq( xid, cache->funk->shmem->last_publish ) ) return NULL;
649 :
650 33 : fd_funk_txn_t * txn = fd_funk_txn_query( xid, funk->txn_map );
651 33 : if( FD_UNLIKELY( !fd_progcache_txn_try_lock( txn ) ) ) {
652 0 : FD_LOG_CRIT(( "fd_progcache_invalidate(xid=%lu,...) failed: txn is write-locked", xid->ul[0] ));
653 0 : }
654 :
655 : /* Allocate a funk_rec */
656 :
657 33 : fd_funk_rec_t * funk_rec = fd_funk_rec_pool_acquire( funk->rec_pool, NULL, 0, NULL );
658 33 : if( FD_UNLIKELY( !funk_rec ) ) {
659 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_funk_rec_pool_acquire failed (rec_max=%lu)",
660 0 : fd_funk_rec_pool_ele_max( funk->rec_pool ) ));
661 0 : }
662 33 : memset( funk_rec, 0, sizeof(fd_funk_rec_t) );
663 33 : fd_funk_val_init( funk_rec );
664 :
665 : /* Create a tombstone */
666 :
667 33 : void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ), NULL );
668 33 : if( FD_UNLIKELY( !rec_mem ) ) {
669 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
670 0 : fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ) ));
671 0 : }
672 33 : fd_progcache_rec_t * rec = fd_progcache_rec_new_nx( rec_mem, slot );
673 33 : rec->invalidate = 1;
674 :
675 : /* Publish cache entry to funk index */
676 :
677 33 : fd_funk_rec_t * dup_rec = NULL;
678 33 : int push_ok = fd_progcache_push( cache, txn, funk_rec, prog_addr, &dup_rec );
679 :
680 : /* Done modifying transaction */
681 :
682 33 : fd_progcache_txn_unlock( txn );
683 :
684 : /* If another thread was faster publishing the same record, use that
685 : one instead. FIXME POSSIBLE RACE CONDITION WHERE THE OTHER REC IS
686 : EVICTED AFTER PEEK? */
687 :
688 33 : if( !push_ok ) {
689 3 : FD_TEST( dup_rec );
690 3 : fd_funk_val_flush( funk_rec, funk->alloc, funk->wksp );
691 3 : fd_funk_rec_pool_release( funk->rec_pool, funk_rec, 1 );
692 3 : cache->metrics->dup_insert_cnt++;
693 3 : return fd_funk_val_const( dup_rec, funk->wksp );
694 3 : }
695 :
696 30 : cache->metrics->invalidate_cnt++;
697 :
698 30 : return rec;
699 33 : }
|