Line data Source code
1 : #include "fd_prog_load.h"
2 : #include "fd_progcache_user.h"
3 : #include "fd_progcache_rec.h"
4 :
5 : FD_TL fd_progcache_metrics_t fd_progcache_metrics_default;
6 :
7 : fd_progcache_t *
8 : fd_progcache_join( fd_progcache_t * ljoin,
9 : void * shfunk,
10 : uchar * scratch,
11 42 : ulong scratch_sz ) {
12 42 : if( FD_UNLIKELY( !ljoin ) ) {
13 0 : FD_LOG_WARNING(( "NULL ljoin" ));
14 0 : return NULL;
15 0 : }
16 42 : if( FD_UNLIKELY( !shfunk ) ) {
17 0 : FD_LOG_WARNING(( "NULL shfunk" ));
18 0 : return NULL;
19 0 : }
20 42 : if( FD_LIKELY( scratch_sz ) ) {
21 42 : if( FD_UNLIKELY( !scratch ) ) {
22 0 : FD_LOG_WARNING(( "NULL scratch" ));
23 0 : return NULL;
24 0 : }
25 42 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)scratch, FD_PROGCACHE_SCRATCH_ALIGN ) ) ) {
26 0 : FD_LOG_WARNING(( "misaligned scratch" ));
27 0 : return NULL;
28 0 : }
29 42 : }
30 42 : memset( ljoin, 0, sizeof(fd_progcache_t) );
31 42 : if( FD_UNLIKELY( !fd_funk_join( ljoin->funk, shfunk ) ) ) return NULL;
32 :
33 42 : ljoin->metrics = &fd_progcache_metrics_default;
34 42 : ljoin->scratch = scratch;
35 42 : ljoin->scratch_sz = scratch_sz;
36 :
37 42 : return ljoin;
38 42 : }
39 :
40 : void *
41 : fd_progcache_leave( fd_progcache_t * cache,
42 39 : void ** opt_shfunk ) {
43 39 : if( FD_UNLIKELY( !cache ) ) {
44 0 : FD_LOG_WARNING(( "NULL cache" ));
45 0 : return NULL;
46 0 : }
47 39 : if( FD_UNLIKELY( !fd_funk_leave( cache->funk, opt_shfunk ) ) ) return NULL;
48 39 : cache->scratch = NULL;
49 39 : cache->scratch_sz = 0UL;
50 39 : return cache;
51 39 : }
52 :
53 : /* fd_progcache_load_fork pivots the progcache object to the selected
54 : fork (identified by tip XID).
55 :
56 : Populates cache->fork, which is a array-backed list of XIDs sorted
57 : newest to oldest. Cache lookups only respect records with an XID
58 : present in that list.
59 :
60 : For any given xid, the epoch_slot0 is assumed to stay constant. */
61 :
62 : static void
63 : fd_progcache_load_fork_slow( fd_progcache_t * cache,
64 : fd_funk_txn_xid_t const * xid,
65 93 : ulong epoch_slot0 ) {
66 93 : fd_funk_txn_xid_t next_xid = *xid;
67 93 : if( FD_UNLIKELY( next_xid.ul[0]<epoch_slot0 ) ) {
68 0 : FD_LOG_CRIT(( "fd_progcache_load_fork: attempted to load xid=%lu:%lu, which predates first slot of bank's epoch (epoch_slot0=%lu)",
69 0 : next_xid.ul[0], next_xid.ul[1], epoch_slot0 ));
70 0 : }
71 :
72 : /* Walk transaction graph, recovering from overruns on-the-fly */
73 93 : cache->fork_depth = 0UL;
74 :
75 93 : ulong txn_max = fd_funk_txn_pool_ele_max( cache->funk->txn_pool );
76 93 : ulong i;
77 147 : for( i=0UL; i<FD_PROGCACHE_DEPTH_MAX; i++ ) {
78 147 : fd_funk_txn_map_query_t query[1];
79 147 : fd_funk_txn_t const * candidate;
80 147 : fd_funk_txn_xid_t found_xid;
81 147 : ulong parent_idx;
82 147 : fd_funk_txn_xid_t parent_xid;
83 147 : retry:
84 : /* Speculatively look up transaction from map */
85 147 : for(;;) {
86 147 : int query_err = fd_funk_txn_map_query_try( cache->funk->txn_map, &next_xid, NULL, query, 0 );
87 147 : if( FD_UNLIKELY( query_err==FD_MAP_ERR_AGAIN ) ) {
88 : /* FIXME random backoff */
89 0 : FD_SPIN_PAUSE();
90 0 : continue;
91 0 : }
92 147 : if( query_err==FD_MAP_ERR_KEY ) goto done;
93 135 : if( FD_UNLIKELY( query_err!=FD_MAP_SUCCESS ) ) {
94 0 : FD_LOG_CRIT(( "fd_funk_txn_map_query_try failed: %i-%s", query_err, fd_map_strerror( query_err ) ));
95 0 : }
96 135 : break;
97 135 : }
98 :
99 : /* Lookup parent transaction while recovering from overruns
100 : FIXME This would be a lot easier if transactions specified
101 : parent by XID instead of by pointer ... */
102 135 : candidate = fd_funk_txn_map_query_ele_const( query );
103 135 : FD_COMPILER_MFENCE();
104 135 : do {
105 135 : found_xid = FD_VOLATILE_CONST( candidate->xid );
106 135 : parent_idx = fd_funk_txn_idx( FD_VOLATILE_CONST( candidate->parent_cidx ) );
107 135 : if( parent_idx<txn_max ) {
108 54 : FD_COMPILER_MFENCE();
109 54 : fd_funk_txn_t const * parent = &cache->funk->txn_pool->ele[ parent_idx ];
110 54 : parent_xid = FD_VOLATILE_CONST( parent->xid );
111 54 : FD_COMPILER_MFENCE();
112 54 : }
113 135 : parent_idx = fd_funk_txn_idx( FD_VOLATILE_CONST( candidate->parent_cidx ) );
114 135 : } while(0);
115 135 : FD_COMPILER_MFENCE();
116 :
117 : /* Verify speculative loads by ensuring txn still exists in map */
118 135 : if( FD_UNLIKELY( fd_funk_txn_map_query_test( query )!=FD_MAP_SUCCESS ) ) {
119 0 : FD_SPIN_PAUSE();
120 0 : goto retry;
121 0 : }
122 :
123 135 : if( FD_UNLIKELY( !fd_funk_txn_xid_eq( &found_xid, &next_xid ) ) ) {
124 0 : FD_LOG_CRIT(( "fd_progcache_load_fork_slow detected memory corruption: expected xid %lu:%lu at %p, found %lu:%lu",
125 0 : next_xid.ul[0], next_xid.ul[1],
126 0 : (void *)candidate,
127 0 : found_xid.ul[0], found_xid.ul[1] ));
128 0 : }
129 :
130 135 : cache->fork[ i ] = next_xid;
131 135 : if( fd_funk_txn_idx_is_null( parent_idx ) ||
132 135 : next_xid.ul[0]<epoch_slot0 ) {
133 : /* Reached root or fork graph node is from previous epoch */
134 81 : i++;
135 81 : break;
136 81 : }
137 54 : next_xid = parent_xid;
138 54 : }
139 :
140 93 : done:
141 93 : cache->fork_depth = i;
142 :
143 : /* Only include published/rooted records if they include at least one
144 : cache entry from the current epoch. */
145 93 : if( fd_funk_last_publish( cache->funk )->ul[0] >= epoch_slot0 &&
146 93 : cache->fork_depth < FD_PROGCACHE_DEPTH_MAX ) {
147 93 : fd_funk_txn_xid_set_root( &cache->fork[ cache->fork_depth++ ] );
148 93 : }
149 93 : }
150 :
151 : static inline void
152 : fd_progcache_load_fork( fd_progcache_t * cache,
153 : fd_funk_txn_xid_t const * xid,
154 183 : ulong epoch_slot0 ) {
155 : /* Skip if already on the correct fork */
156 183 : if( FD_LIKELY( (!!cache->fork_depth) & (!!fd_funk_txn_xid_eq( &cache->fork[ 0 ], xid ) ) ) ) return;
157 93 : cache->metrics->fork_switch_cnt++;
158 93 : fd_progcache_load_fork_slow( cache, xid, epoch_slot0 ); /* switch fork */
159 93 : }
160 :
161 : /* fd_progcache_query searches for a program cache entry on the current
162 : fork. Stops short of an epoch boundary. */
163 :
164 : static int
165 : fd_progcache_fork_has_xid( fd_progcache_t const * cache,
166 126 : fd_funk_txn_xid_t const * rec_xid ) {
167 : /* FIXME unroll this a little */
168 126 : ulong const fork_depth = cache->fork_depth;
169 222 : for( ulong i=0UL; i<fork_depth; i++ ) {
170 186 : if( fd_funk_txn_xid_eq( &cache->fork[i], rec_xid ) ) return 1;
171 186 : }
172 36 : return 0;
173 126 : }
174 :
175 : static int
176 : fd_progcache_search_chain( fd_progcache_t const * cache,
177 : ulong chain_idx,
178 : fd_funk_rec_key_t const * key,
179 : ulong epoch_slot0,
180 135 : fd_funk_rec_t ** out_rec ) {
181 135 : *out_rec = NULL;
182 :
183 135 : fd_funk_rec_map_shmem_t * shmap = cache->funk->rec_map->map;
184 135 : fd_funk_rec_map_shmem_private_chain_t const * chain_tbl = fd_funk_rec_map_shmem_private_chain_const( shmap, 0UL );
185 135 : fd_funk_rec_map_shmem_private_chain_t const * chain = chain_tbl + chain_idx;
186 135 : fd_funk_rec_t * rec_tbl = cache->funk->rec_pool->ele;
187 135 : ulong rec_max = fd_funk_rec_pool_ele_max( cache->funk->rec_pool );
188 135 : ulong ver_cnt = FD_VOLATILE_CONST( chain->ver_cnt );
189 135 : ulong root_slot = fd_funk_last_publish( cache->funk )->ul[0];
190 :
191 : /* Start a speculative transaction for the chain containing revisions
192 : of the program cache key we are looking for. */
193 135 : ulong cnt = fd_funk_rec_map_private_vcnt_cnt( ver_cnt );
194 135 : if( FD_UNLIKELY( fd_funk_rec_map_private_vcnt_ver( ver_cnt )&1 ) ) {
195 0 : return FD_MAP_ERR_AGAIN; /* chain is locked */
196 0 : }
197 135 : FD_COMPILER_MFENCE();
198 135 : uint ele_idx = chain->head_cidx;
199 :
200 : /* Walk the map chain, remember the best entry */
201 135 : fd_funk_rec_t * best = NULL;
202 135 : long best_slot = -1L;
203 309 : for( ulong i=0UL; i<cnt; i++, ele_idx=rec_tbl[ ele_idx ].map_next ) {
204 174 : fd_funk_rec_t * rec = &rec_tbl[ ele_idx ];
205 :
206 : /* Skip over unrelated records (hash collision) */
207 174 : if( FD_UNLIKELY( !fd_funk_rec_key_eq( rec->pair.key, key ) ) ) continue;
208 :
209 : /* Skip over records from an older epoch (FIXME could bail early
210 : here if the chain is ordered) */
211 174 : ulong found_slot = rec->pair.xid->ul[0];
212 174 : if( found_slot==ULONG_MAX ) found_slot = root_slot;
213 :
214 174 : if( FD_UNLIKELY( found_slot<epoch_slot0 ) ) continue;
215 :
216 : /* Skip over records that are older than what we already have */
217 165 : if( FD_UNLIKELY( (long)found_slot<best_slot ) ) continue;
218 :
219 : /* Confirm that record is part of the current fork
220 : FIXME this has bad performance / pointer-chasing */
221 126 : if( FD_UNLIKELY( !fd_progcache_fork_has_xid( cache, rec->pair.xid ) ) ) continue;
222 :
223 90 : best = rec;
224 90 : best_slot = (long)found_slot;
225 90 : if( FD_UNLIKELY( rec->map_next==ele_idx ) ) {
226 0 : FD_LOG_CRIT(( "fd_progcache_search_chain detected cycle" ));
227 0 : }
228 90 : if( rec->map_next > rec_max ) {
229 51 : if( FD_UNLIKELY( !fd_funk_rec_map_private_idx_is_null( rec->map_next ) ) ) {
230 0 : FD_LOG_CRIT(( "fd_progcache_search_chain detected memory corruption: rec->map_next %u is out of bounds (rec_max %lu)",
231 0 : rec->map_next, rec_max ));
232 0 : }
233 51 : }
234 90 : }
235 :
236 : /* Retry if we were overrun */
237 135 : if( FD_UNLIKELY( FD_VOLATILE_CONST( chain->ver_cnt )!=ver_cnt ) ) {
238 0 : return FD_MAP_ERR_AGAIN;
239 0 : }
240 :
241 135 : *out_rec = best;
242 135 : return FD_MAP_SUCCESS;
243 135 : }
244 :
245 : static fd_funk_rec_t *
246 : fd_progcache_query( fd_progcache_t * cache,
247 : fd_funk_txn_xid_t const * xid,
248 : fd_funk_rec_key_t const * key,
249 135 : ulong epoch_slot0 ) {
250 : /* Hash key to chain */
251 135 : fd_funk_xid_key_pair_t pair[1];
252 135 : fd_funk_txn_xid_copy( pair->xid, xid );
253 135 : fd_funk_rec_key_copy( pair->key, key );
254 135 : fd_funk_rec_map_t const * rec_map = cache->funk->rec_map;
255 135 : ulong hash = fd_funk_rec_map_key_hash( pair, rec_map->map->seed );
256 135 : ulong chain_idx = (hash & (rec_map->map->chain_cnt-1UL) );
257 :
258 : /* Traverse chain for candidate */
259 135 : fd_funk_rec_t * rec = NULL;
260 135 : for(;;) {
261 135 : int err = fd_progcache_search_chain( cache, chain_idx, key, epoch_slot0, &rec );
262 135 : if( FD_LIKELY( err==FD_MAP_SUCCESS ) ) break;
263 0 : FD_SPIN_PAUSE();
264 : /* FIXME backoff */
265 0 : }
266 :
267 135 : return rec;
268 135 : }
269 :
270 : fd_progcache_rec_t const *
271 : fd_progcache_peek_exact( fd_progcache_t * cache,
272 : fd_funk_txn_xid_t const * xid,
273 0 : void const * prog_addr ) {
274 0 : fd_funk_xid_key_pair_t key[1];
275 0 : fd_funk_txn_xid_copy( key->xid, xid );
276 0 : memcpy( key->key->uc, prog_addr, 32UL );
277 :
278 0 : for(;;) {
279 0 : fd_funk_rec_map_query_t query[1];
280 0 : int query_err = fd_funk_rec_map_query_try( cache->funk->rec_map, key, NULL, query, 0 );
281 0 : if( query_err==FD_MAP_ERR_AGAIN ) {
282 0 : FD_SPIN_PAUSE();
283 0 : continue;
284 0 : }
285 0 : if( FD_UNLIKELY( query_err==FD_MAP_ERR_KEY ) ) return NULL;
286 0 : if( FD_UNLIKELY( query_err!=FD_MAP_SUCCESS ) ) {
287 0 : FD_LOG_CRIT(( "fd_funk_rec_map_query_try failed: %i-%s", query_err, fd_map_strerror( query_err ) ));
288 0 : }
289 0 : return fd_funk_val_const( fd_funk_rec_map_query_ele_const( query ), fd_funk_wksp( cache->funk ) );
290 0 : }
291 0 : }
292 :
293 : fd_progcache_rec_t const *
294 : fd_progcache_peek( fd_progcache_t * cache,
295 : fd_funk_txn_xid_t const * xid,
296 : void const * prog_addr,
297 135 : ulong epoch_slot0 ) {
298 135 : if( FD_UNLIKELY( !cache || !cache->funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
299 135 : fd_progcache_load_fork( cache, xid, epoch_slot0 );
300 135 : fd_funk_rec_key_t key[1]; memcpy( key->uc, prog_addr, 32UL );
301 135 : fd_funk_rec_t const * rec = fd_progcache_query( cache, xid, key, epoch_slot0 );
302 135 : if( FD_UNLIKELY( !rec ) ) return NULL;
303 :
304 87 : fd_progcache_rec_t const * entry = fd_funk_val_const( rec, fd_funk_wksp( cache->funk ) );
305 87 : if( entry->slot < epoch_slot0 ) entry = NULL;
306 :
307 87 : cache->metrics->hit_cnt += !!entry;
308 :
309 87 : return entry;
310 135 : }
311 :
312 : static void
313 : fd_funk_rec_push_tail( fd_funk_rec_t * rec_pool,
314 : fd_funk_rec_t * rec,
315 : uint * rec_head_idx,
316 60 : uint * rec_tail_idx ) {
317 60 : uint rec_idx = (uint)( rec - rec_pool );
318 60 : for(;;) {
319 :
320 : /* Doubly linked list append. Robust in the event of concurrent
321 : publishes. Iteration during publish not supported. Sequence:
322 : - Identify tail element
323 : - Set new element's prev and next pointers
324 : - Set tail element's next pointer
325 : - Set tail pointer */
326 :
327 60 : uint rec_prev_idx = FD_VOLATILE_CONST( *rec_tail_idx );
328 60 : rec->prev_idx = rec_prev_idx;
329 60 : rec->next_idx = FD_FUNK_REC_IDX_NULL;
330 60 : FD_COMPILER_MFENCE();
331 :
332 60 : uint * next_idx_p;
333 60 : if( fd_funk_rec_idx_is_null( rec_prev_idx ) ) {
334 60 : next_idx_p = rec_head_idx;
335 60 : } else {
336 0 : next_idx_p = &rec_pool[ rec_prev_idx ].next_idx;
337 0 : }
338 :
339 60 : if( FD_UNLIKELY( !__sync_bool_compare_and_swap( next_idx_p, FD_FUNK_REC_IDX_NULL, rec_idx ) ) ) {
340 : /* Another thread beat us to the punch */
341 0 : FD_SPIN_PAUSE();
342 0 : continue;
343 0 : }
344 :
345 60 : if( FD_UNLIKELY( !__sync_bool_compare_and_swap( rec_tail_idx, rec_prev_idx, rec_idx ) ) ) {
346 : /* This CAS is guaranteed to succeed if the previous CAS passed. */
347 0 : FD_LOG_CRIT(( "Irrecoverable data race encountered while appending to txn rec list (invariant violation?): cas(%p,%u,%u)",
348 0 : (void *)rec_tail_idx, rec_prev_idx, rec_idx ));
349 0 : }
350 :
351 60 : break;
352 60 : }
353 60 : }
354 :
355 : __attribute__((warn_unused_result))
356 : static int
357 : fd_progcache_push( fd_progcache_t * cache,
358 : fd_funk_txn_t * txn,
359 : fd_funk_rec_t * rec,
360 63 : void const * prog_addr ) {
361 63 : fd_funk_t * funk = cache->funk;
362 :
363 : /* Phase 1: Determine record's xid-key pair */
364 :
365 63 : rec->tag = 0;
366 63 : rec->prev_idx = FD_FUNK_REC_IDX_NULL;
367 63 : rec->next_idx = FD_FUNK_REC_IDX_NULL;
368 63 : memcpy( rec->pair.key, prog_addr, 32UL );
369 63 : if( FD_UNLIKELY( txn ) ) {
370 60 : fd_funk_txn_xid_copy( rec->pair.xid, &txn->xid );
371 60 : } else {
372 3 : fd_funk_txn_xid_set_root( rec->pair.xid );
373 3 : }
374 :
375 : /* Phase 2: Lock rec_map chain, entering critical section */
376 :
377 63 : struct {
378 63 : fd_funk_rec_map_txn_t txn[1];
379 63 : fd_funk_rec_map_txn_private_info_t info[1];
380 63 : } _map_txn;
381 63 : fd_funk_rec_map_txn_t * map_txn = fd_funk_rec_map_txn_init( _map_txn.txn, funk->rec_map, 1UL );
382 63 : fd_funk_rec_map_txn_add( map_txn, &rec->pair, 1 );
383 63 : int txn_err = fd_funk_rec_map_txn_try( map_txn, FD_MAP_FLAG_BLOCKING );
384 63 : if( FD_UNLIKELY( txn_err!=FD_MAP_SUCCESS ) ) {
385 0 : FD_LOG_CRIT(( "Failed to insert progcache record: canont lock funk rec map chain: %i-%s", txn_err, fd_map_strerror( txn_err ) ));
386 0 : }
387 :
388 : /* Phase 3: Atomically add record to funk txn's record list */
389 :
390 63 : int insert_err = fd_funk_rec_map_txn_insert( funk->rec_map, rec );
391 63 : if( FD_UNLIKELY( insert_err==FD_MAP_ERR_KEY ) ) {
392 0 : fd_funk_rec_map_txn_test( map_txn );
393 0 : fd_funk_rec_map_txn_fini( map_txn );
394 0 : return 0; /* another thread was faster */
395 0 : }
396 :
397 : /* At this point, another thread could aggressively evict this entry.
398 : But this entry is not yet present in rec_map! This is why we hold
399 : a lock on the rec_map chain -- the rec_map_remove executed by the
400 : eviction will be sequenced after completion of phase 5. */
401 :
402 : /* Phase 4: Insert rec into rec_map */
403 :
404 63 : if( txn ) {
405 60 : fd_funk_rec_push_tail( funk->rec_pool->ele,
406 60 : rec,
407 60 : &txn->rec_head_idx,
408 60 : &txn->rec_tail_idx );
409 60 : }
410 :
411 : /* Phase 5: Finish rec_map transaction */
412 :
413 63 : int test_err = fd_funk_rec_map_txn_test( map_txn );
414 63 : if( FD_UNLIKELY( test_err!=FD_MAP_SUCCESS ) ) FD_LOG_CRIT(( "fd_funk_rec_map_txn_test failed: %i-%s", test_err, fd_map_strerror( test_err ) ));
415 63 : fd_funk_rec_map_txn_fini( map_txn );
416 63 : return 1;
417 63 : }
418 :
419 : static int
420 60 : fd_progcache_txn_try_lock( fd_funk_txn_t * txn ) {
421 60 : for(;;) {
422 60 : ushort * lock = &txn->lock->value;
423 60 : ushort value = FD_VOLATILE_CONST( *lock );
424 60 : if( FD_UNLIKELY( value>=0xFFFE ) ) return 0; /* txn is write-locked */
425 60 : if( FD_LIKELY( FD_ATOMIC_CAS( lock, value, value+1 )==value ) ) {
426 60 : return 1; /* transaction now read-locked */
427 60 : }
428 60 : }
429 60 : }
430 :
431 : static void
432 63 : fd_progcache_txn_unlock( fd_funk_txn_t * txn ) {
433 63 : if( !txn ) return;
434 60 : fd_rwlock_unread( txn->lock );
435 60 : }
436 :
437 : /* fd_progcache_lock_best_txn picks a fork graph node close to
438 : target_slot and write locks it for program cache entry insertion.
439 :
440 : The cache entry should be placed as far up the fork graph as
441 : possible (so it can be shared across more downstream forks), but not
442 : too early (or it would cause non-determinism).
443 :
444 : Influenced by a number of things:
445 :
446 : - Program modification time (cannot predate the program modification)
447 : - Epoch boundaries (cannot span across epochs)
448 : - Transaction publishing (cannot create a cache entry at a txn that
449 : is in the process of being published) */
450 :
451 : static fd_funk_txn_t *
452 : fd_progcache_lock_best_txn( fd_progcache_t * cache,
453 39 : ulong target_slot ) {
454 :
455 39 : fd_funk_txn_xid_t last_publish[1];
456 39 : fd_funk_txn_xid_ld_atomic( last_publish, fd_funk_last_publish( cache->funk ) );
457 39 : if( target_slot <= last_publish->ul[0] &&
458 39 : !fd_funk_txn_xid_eq_root( last_publish ) ) {
459 3 : return NULL; /* publishing record immediately */
460 3 : }
461 :
462 : /* Scan fork graph for oldest node (>= program update slot) */
463 36 : ulong target_xid_idx;
464 36 : ulong fork_depth = cache->fork_depth;
465 36 : for( target_xid_idx=0UL; target_xid_idx<fork_depth; target_xid_idx++ ) {
466 36 : if( cache->fork[ target_xid_idx ].ul[0]<=target_slot ) break;
467 36 : }
468 :
469 : /* Backtrack up to newer fork graph nodes (>= access slot)
470 : Very old slots could have been rooted at this point */
471 36 : fd_funk_txn_t * txn;
472 36 : do {
473 : /* Locate fork */
474 36 : fd_funk_txn_xid_t const * xid = &cache->fork[ target_xid_idx ];
475 36 : txn = fd_funk_txn_query( xid, cache->funk->txn_map );
476 36 : if( FD_LIKELY( txn ) ) {
477 : /* Attempt to read-lock transaction */
478 36 : if( FD_LIKELY( fd_progcache_txn_try_lock( txn ) ) ) return txn;
479 36 : }
480 : /* Cannot insert at this fork graph node, try one newer */
481 0 : target_xid_idx--;
482 0 : } while( target_xid_idx!=ULONG_MAX );
483 :
484 : /* There is no funk_txn in range [target_slot,load_slot] that we can
485 : create a cache entry at. */
486 0 : FD_LOG_CRIT(( "Could not find program cache fork graph node for target slot %lu", target_slot ));
487 0 : }
488 :
489 : static fd_progcache_rec_t const *
490 : fd_progcache_insert( fd_progcache_t * cache,
491 : fd_funk_t * accdb,
492 : fd_funk_txn_xid_t const * load_xid,
493 : void const * prog_addr,
494 : fd_prog_load_env_t const * env,
495 45 : long slot_min ) {
496 :
497 : /* XID overview:
498 :
499 : - load_xid: tip of fork currently being executed
500 : - modify_xid: xid in which program was last modified / deployed
501 : - txn->xid: xid in which program cache entry is inserted
502 :
503 : slot(load_xid) > slot(entry_xid) >= slot(txn->xid) */
504 :
505 : /* Acquire reference to ELF binary data */
506 :
507 45 : fd_funk_txn_xid_t modify_xid;
508 45 : ulong progdata_sz;
509 45 : uchar const * progdata = fd_prog_load_elf( accdb, load_xid, prog_addr, &progdata_sz, &modify_xid );
510 45 : if( FD_UNLIKELY( !progdata ) ) return NULL;
511 39 : ulong target_slot = modify_xid.ul[0];
512 :
513 : /* Prevent cache entry from crossing epoch boundary */
514 :
515 39 : target_slot = fd_ulong_max( target_slot, env->epoch_slot0 );
516 :
517 : /* Prevent cache entry from shadowing invalidation */
518 :
519 39 : target_slot = (ulong)fd_long_max( (long)target_slot, slot_min );
520 :
521 : /* Allocate a funk_rec */
522 :
523 39 : fd_funk_t * funk = cache->funk;
524 39 : fd_funk_rec_t * funk_rec = fd_funk_rec_pool_acquire( funk->rec_pool, NULL, 0, NULL );
525 39 : if( FD_UNLIKELY( !funk_rec ) ) {
526 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_funk_rec_pool_acquire failed (rec_max=%lu)",
527 0 : fd_funk_rec_pool_ele_max( funk->rec_pool ) ));
528 0 : }
529 39 : memset( funk_rec, 0, sizeof(fd_funk_rec_t) );
530 39 : fd_funk_val_init( funk_rec );
531 :
532 : /* Pick and lock a txn in which cache entry is created at */
533 :
534 39 : fd_funk_txn_t * txn = fd_progcache_lock_best_txn( cache, target_slot );
535 :
536 : /* Load program */
537 :
538 39 : fd_features_t const * features = env->features;
539 39 : ulong const load_slot = env->slot;
540 39 : fd_prog_versions_t versions = fd_prog_versions( features, load_slot );
541 39 : fd_sbpf_loader_config_t config = {
542 39 : .sbpf_min_version = versions.min_sbpf_version,
543 39 : .sbpf_max_version = versions.max_sbpf_version,
544 39 : };
545 39 : fd_sbpf_elf_info_t elf_info[1];
546 :
547 39 : fd_progcache_rec_t * rec = NULL;
548 39 : if( FD_LIKELY( fd_sbpf_elf_peek( elf_info, progdata, progdata_sz, &config )==FD_SBPF_ELF_SUCCESS ) ) {
549 :
550 39 : fd_funk_t * funk = cache->funk;
551 39 : ulong rec_align = fd_progcache_rec_align();
552 39 : ulong rec_footprint = fd_progcache_rec_footprint( elf_info );
553 :
554 39 : void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, rec_align, rec_footprint, NULL );
555 39 : if( FD_UNLIKELY( !rec_mem ) ) {
556 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
557 0 : rec_align, rec_footprint ));
558 0 : }
559 :
560 39 : rec = fd_progcache_rec_new( rec_mem, elf_info, &config, load_slot, features, progdata, progdata_sz, cache->scratch, cache->scratch_sz );
561 39 : if( !rec ) {
562 3 : fd_funk_val_flush( funk_rec, funk->alloc, funk->wksp );
563 3 : }
564 :
565 39 : }
566 :
567 : /* Convert to tombstone if load failed */
568 :
569 39 : if( !rec ) { /* load fail */
570 3 : void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ), NULL );
571 3 : if( FD_UNLIKELY( !rec_mem ) ) {
572 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
573 0 : fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ) ));
574 0 : }
575 3 : rec = fd_progcache_rec_new_nx( rec_mem, load_slot );
576 3 : }
577 :
578 : /* Publish cache entry to funk index */
579 :
580 39 : int push_ok = fd_progcache_push( cache, txn, funk_rec, prog_addr );
581 :
582 : /* Done modifying transaction */
583 :
584 39 : fd_progcache_txn_unlock( txn );
585 :
586 : /* If another thread was faster publishing the same record, use that
587 : one instead. FIXME POSSIBLE RACE CONDITION WHERE THE OTHER REC IS
588 : EVICTED AFTER PEEK? */
589 :
590 39 : if( !push_ok ) {
591 0 : fd_progcache_rec_t const * other = fd_progcache_peek( cache, load_xid, prog_addr, env->epoch_slot0 );
592 0 : if( FD_UNLIKELY( !other ) ) {
593 0 : FD_LOG_CRIT(( "fd_progcache_push/fd_progcache_peek data race detected" ));
594 0 : }
595 0 : cache->metrics->dup_insert_cnt++;
596 0 : return other;
597 0 : }
598 :
599 39 : cache->metrics->fill_cnt++;
600 39 : cache->metrics->fill_tot_sz += rec->rodata_sz;
601 :
602 39 : return rec;
603 39 : }
604 :
605 : fd_progcache_rec_t const *
606 : fd_progcache_pull( fd_progcache_t * cache,
607 : fd_funk_t * accdb,
608 : fd_funk_txn_xid_t const * xid,
609 : void const * prog_addr,
610 48 : fd_prog_load_env_t const * env ) {
611 48 : if( FD_UNLIKELY( !cache || !cache->funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
612 48 : fd_progcache_load_fork( cache, xid, env->epoch_slot0 );
613 :
614 48 : fd_progcache_rec_t const * found_rec = fd_progcache_peek( cache, xid, prog_addr, env->epoch_slot0 );
615 48 : long slot_min = -1L;
616 48 : if( !found_rec ) goto miss;
617 :
618 : /* Cache invalidation, update next slot */
619 12 : if( found_rec->invalidate ) {
620 9 : slot_min = (long)found_rec->slot+1L;
621 9 : if( FD_UNLIKELY( xid->ul[0] < (ulong)slot_min ) ) {
622 0 : FD_LOG_CRIT(( "Program cache entry %016lx%016lx%016lx%016lx invalidated at slot %lu but loaded at slot %lu",
623 0 : fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr ) ),
624 0 : fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+ 8 ) ),
625 0 : fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+16 ) ),
626 0 : fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+24 ) ),
627 0 : found_rec->slot,
628 0 : xid->ul[0] ));
629 0 : }
630 9 : goto miss;
631 9 : }
632 :
633 : /* Passed all checks */
634 3 : cache->metrics->hit_cnt++;
635 3 : cache->metrics->hit_tot_sz += found_rec->rodata_sz;
636 3 : return found_rec;
637 :
638 45 : miss:
639 45 : cache->metrics->miss_cnt++;
640 45 : return fd_progcache_insert( cache, accdb, xid, prog_addr, env, slot_min );
641 12 : }
642 :
643 : fd_progcache_rec_t const *
644 : fd_progcache_invalidate( fd_progcache_t * cache,
645 : fd_funk_txn_xid_t const * xid,
646 : void const * prog_addr,
647 24 : ulong slot ) {
648 24 : fd_funk_t * funk = cache->funk;
649 :
650 24 : if( FD_UNLIKELY( !cache || !funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
651 :
652 24 : fd_funk_txn_t * txn = fd_funk_txn_query( xid, funk->txn_map );
653 24 : if( FD_UNLIKELY( !fd_progcache_txn_try_lock( txn ) ) ) {
654 0 : FD_LOG_CRIT(( "fd_progcache_invalidate(xid=%lu,...) failed: txn is write-locked", xid->ul[0] ));
655 0 : }
656 :
657 : /* Allocate a funk_rec */
658 :
659 24 : fd_funk_rec_t * funk_rec = fd_funk_rec_pool_acquire( funk->rec_pool, NULL, 0, NULL );
660 24 : if( FD_UNLIKELY( !funk_rec ) ) {
661 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_funk_rec_pool_acquire failed (rec_max=%lu)",
662 0 : fd_funk_rec_pool_ele_max( funk->rec_pool ) ));
663 0 : }
664 24 : memset( funk_rec, 0, sizeof(fd_funk_rec_t) );
665 24 : fd_funk_val_init( funk_rec );
666 :
667 : /* Create a tombstone */
668 :
669 24 : void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ), NULL );
670 24 : if( FD_UNLIKELY( !rec_mem ) ) {
671 0 : FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
672 0 : fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ) ));
673 0 : }
674 24 : fd_progcache_rec_t * rec = fd_progcache_rec_new_nx( rec_mem, slot );
675 24 : rec->invalidate = 1;
676 :
677 : /* Publish cache entry to funk index */
678 :
679 24 : int push_ok = fd_progcache_push( cache, txn, funk_rec, prog_addr );
680 :
681 : /* Done modifying transaction */
682 :
683 24 : fd_progcache_txn_unlock( txn );
684 :
685 : /* If another thread was faster publishing the same record, use that
686 : one instead. FIXME POSSIBLE RACE CONDITION WHERE THE OTHER REC IS
687 : EVICTED AFTER PEEK? */
688 :
689 24 : if( !push_ok ) {
690 0 : fd_progcache_rec_t const * other = fd_progcache_peek_exact( cache, xid, prog_addr );
691 0 : if( FD_UNLIKELY( !other ) ) {
692 0 : FD_LOG_CRIT(( "fd_progcache_push/fd_progcache_peek data race detected" ));
693 0 : }
694 0 : cache->metrics->dup_insert_cnt++;
695 0 : return other;
696 0 : }
697 :
698 24 : cache->metrics->invalidate_cnt++;
699 :
700 24 : return rec;
701 24 : }
|