Line data Source code
1 : #include "fd_txncache.h"
2 : #include "fd_txncache_private.h"
3 : #include "../../util/log/fd_log.h"
4 :
5 : struct blockcache {
6 : fd_txncache_blockcache_shmem_t * shmem;
7 :
8 : uint * heads; /* The hash table for the blockhash. Each entry is a pointer to the head of a linked list of
9 : transactions that reference this blockhash. As we add transactions to the bucket, the head
10 : pointer is updated to the new item, and the new item is pointed to the previous head. */
11 : uint * pages; /* A list of the txnpages containing the transactions for this blockcache. */
12 :
13 : descends_set_t * descends; /* Each fork can descend from other forks in the txncache, and this bit vector contains one
14 : value for each fork in the txncache. If this fork descends from some other fork F, then
15 : the bit at index F in descends[] is set. */
16 : };
17 :
18 : typedef struct blockcache blockcache_t;
19 :
20 : struct fd_txncache_private {
21 : fd_txncache_shmem_t * shmem;
22 :
23 : fd_txncache_blockcache_shmem_t * blockcache_shmem_pool;
24 : blockcache_t * blockcache_pool;
25 : blockhash_map_t * blockhash_map;
26 :
27 : ushort * txnpages_free; /* The index in the txnpages array that is free, for each of the free pages. */
28 :
29 : fd_txncache_txnpage_t * txnpages; /* The actual storage for the transactions. The blockcache points to these
30 : pages when storing transactions. Transaction are grouped into pages of
31 : size 16384 to make certain allocation and deallocation operations faster
32 : (just the pages are acquired/released, rather than each txn). */
33 : };
34 :
35 : FD_FN_CONST ulong
36 0 : fd_txncache_align( void ) {
37 0 : return FD_TXNCACHE_ALIGN;
38 0 : }
39 :
40 : FD_FN_CONST ulong
41 0 : fd_txncache_footprint( ulong max_live_slots ) {
42 0 : ulong max_active_slots = FD_TXNCACHE_MAX_BLOCKHASH_DISTANCE+max_live_slots;
43 :
44 0 : ulong l;
45 0 : l = FD_LAYOUT_INIT;
46 0 : l = FD_LAYOUT_APPEND( l, FD_TXNCACHE_SHMEM_ALIGN, sizeof(fd_txncache_t) );
47 0 : l = FD_LAYOUT_APPEND( l, alignof(blockcache_t), max_active_slots*sizeof(blockcache_t) );
48 0 : return FD_LAYOUT_FINI( l, FD_TXNCACHE_ALIGN );
49 0 : }
50 :
51 : void *
52 : fd_txncache_new( void * ljoin,
53 0 : fd_txncache_shmem_t * shmem ) {
54 0 : if( FD_UNLIKELY( !ljoin ) ) {
55 0 : FD_LOG_WARNING(( "NULL ljoin" ));
56 0 : return NULL;
57 0 : }
58 :
59 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)ljoin, fd_txncache_align() ) ) ) {
60 0 : FD_LOG_WARNING(( "misaligned ljoin" ));
61 0 : return NULL;
62 0 : }
63 :
64 0 : ulong max_active_slots = shmem->active_slots_max;
65 0 : ulong blockhash_map_chains = fd_ulong_pow2_up( 2UL*shmem->active_slots_max );
66 :
67 0 : ushort _max_txnpages = fd_txncache_max_txnpages( max_active_slots, shmem->txn_per_slot_max );
68 0 : ushort _max_txnpages_per_blockhash = fd_txncache_max_txnpages_per_blockhash( max_active_slots, shmem->txn_per_slot_max );
69 :
70 0 : ulong _descends_footprint = descends_set_footprint( max_active_slots );
71 0 : if( FD_UNLIKELY( !_descends_footprint ) ) {
72 0 : FD_LOG_WARNING(( "invalid max_active_slots" ));
73 0 : return NULL;
74 0 : }
75 :
76 0 : FD_SCRATCH_ALLOC_INIT( l, shmem );
77 0 : fd_txncache_shmem_t * tc = FD_SCRATCH_ALLOC_APPEND( l, FD_TXNCACHE_SHMEM_ALIGN, sizeof(fd_txncache_shmem_t) );
78 0 : void * _blockhash_map = FD_SCRATCH_ALLOC_APPEND( l, blockhash_map_align(), blockhash_map_footprint( blockhash_map_chains ) );
79 0 : void * _blockcache_pool = FD_SCRATCH_ALLOC_APPEND( l, blockcache_pool_align(), blockcache_pool_footprint( max_active_slots ) );
80 0 : void * _blockcache_pages = FD_SCRATCH_ALLOC_APPEND( l, alignof(uint), max_active_slots*_max_txnpages_per_blockhash*sizeof(uint) );
81 0 : void * _blockcache_heads = FD_SCRATCH_ALLOC_APPEND( l, alignof(uint), max_active_slots*shmem->txn_per_slot_max*sizeof(uint) );
82 0 : void * _blockcache_descends = FD_SCRATCH_ALLOC_APPEND( l, descends_set_align(), max_active_slots*_descends_footprint );
83 0 : void * _txnpages_free = FD_SCRATCH_ALLOC_APPEND( l, alignof(ushort), _max_txnpages*sizeof(ushort) );
84 0 : void * _txnpages = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_txncache_txnpage_t), _max_txnpages*sizeof(fd_txncache_txnpage_t) );
85 :
86 0 : FD_SCRATCH_ALLOC_INIT( l2, ljoin );
87 0 : fd_txncache_t * ltc = FD_SCRATCH_ALLOC_APPEND( l2, FD_TXNCACHE_ALIGN, sizeof(fd_txncache_t) );
88 0 : void * _local_blockcache_pool = FD_SCRATCH_ALLOC_APPEND( l2, alignof(blockcache_t), max_active_slots*sizeof(blockcache_t) );
89 :
90 0 : ltc->shmem = tc;
91 :
92 0 : ltc->blockcache_pool = (blockcache_t*)_local_blockcache_pool;
93 0 : ltc->blockcache_shmem_pool = blockcache_pool_join( _blockcache_pool );
94 :
95 0 : for( ulong i=0UL; i<shmem->active_slots_max; i++ ) {
96 0 : ltc->blockcache_pool[ i ].pages = (uint *)_blockcache_pages + i*_max_txnpages_per_blockhash;
97 0 : ltc->blockcache_pool[ i ].heads = (uint *)_blockcache_heads + i*shmem->txn_per_slot_max;
98 0 : ltc->blockcache_pool[ i ].descends = descends_set_join( (uchar *)_blockcache_descends + i*_descends_footprint );
99 0 : ltc->blockcache_pool[ i ].shmem = ltc->blockcache_shmem_pool + i;
100 0 : FD_TEST( ltc->blockcache_pool[ i ].shmem );
101 0 : }
102 :
103 0 : FD_TEST( ltc->blockcache_shmem_pool );
104 :
105 0 : ltc->blockhash_map = blockhash_map_join( _blockhash_map );
106 0 : FD_TEST( ltc->blockhash_map );
107 :
108 0 : ltc->txnpages_free = (ushort *)_txnpages_free;
109 0 : ltc->txnpages = (fd_txncache_txnpage_t *)_txnpages;
110 :
111 0 : return (void *)ltc;
112 0 : }
113 :
114 : fd_txncache_t *
115 0 : fd_txncache_join( void * ljoin ) {
116 0 : if( FD_UNLIKELY( !ljoin ) ) {
117 0 : FD_LOG_WARNING(( "NULL ljoin" ));
118 0 : return NULL;
119 0 : }
120 :
121 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)ljoin, fd_txncache_align() ) ) ) {
122 0 : FD_LOG_WARNING(( "misaligned ljoin" ));
123 0 : return NULL;
124 0 : }
125 :
126 0 : fd_txncache_t * tc = (fd_txncache_t *)ljoin;
127 :
128 0 : return tc;
129 0 : }
130 :
131 : void
132 0 : fd_txncache_reset( fd_txncache_t * tc ) {
133 0 : fd_rwlock_write( tc->shmem->lock );
134 :
135 0 : tc->shmem->root_cnt = 0UL;
136 0 : root_slist_remove_all( tc->shmem->root_ll, tc->blockcache_shmem_pool );
137 :
138 0 : tc->shmem->txnpages_free_cnt = tc->shmem->max_txnpages;
139 0 : for( ushort i=0; i<tc->shmem->max_txnpages; i++ ) tc->txnpages_free[ i ] = i;
140 :
141 0 : blockcache_pool_reset( tc->blockcache_shmem_pool );
142 0 : blockhash_map_reset( tc->blockhash_map );
143 :
144 0 : fd_rwlock_unwrite( tc->shmem->lock );
145 0 : }
146 :
147 : static fd_txncache_txnpage_t *
148 : fd_txncache_ensure_txnpage( fd_txncache_t * tc,
149 0 : blockcache_t * blockcache ) {
150 0 : ushort page_cnt = blockcache->shmem->pages_cnt;
151 0 : if( FD_UNLIKELY( page_cnt>tc->shmem->txnpages_per_blockhash_max ) ) return NULL;
152 :
153 0 : if( FD_LIKELY( page_cnt ) ) {
154 0 : uint txnpage_idx = blockcache->pages[ page_cnt-1 ];
155 0 : ushort txnpage_free = tc->txnpages[ txnpage_idx ].free;
156 0 : if( FD_LIKELY( txnpage_free ) ) return &tc->txnpages[ txnpage_idx ];
157 0 : }
158 :
159 0 : if( FD_UNLIKELY( page_cnt==tc->shmem->txnpages_per_blockhash_max ) ) return NULL;
160 0 : if( FD_LIKELY( FD_ATOMIC_CAS( &blockcache->pages[ page_cnt ], UINT_MAX, UINT_MAX-1UL )==UINT_MAX ) ) {
161 0 : ulong txnpages_free_cnt = tc->shmem->txnpages_free_cnt;
162 0 : for(;;) {
163 0 : if( FD_UNLIKELY( !txnpages_free_cnt ) ) return NULL;
164 0 : ulong old_txnpages_free_cnt = FD_ATOMIC_CAS( &tc->shmem->txnpages_free_cnt, (ushort)txnpages_free_cnt, (ushort)(txnpages_free_cnt-1UL) );
165 0 : if( FD_LIKELY( old_txnpages_free_cnt==txnpages_free_cnt ) ) break;
166 0 : txnpages_free_cnt = old_txnpages_free_cnt;
167 0 : FD_SPIN_PAUSE();
168 0 : }
169 :
170 0 : ushort txnpage_idx = tc->txnpages_free[ txnpages_free_cnt-1UL ];
171 0 : fd_txncache_txnpage_t * txnpage = &tc->txnpages[ txnpage_idx ];
172 0 : txnpage->free = FD_TXNCACHE_TXNS_PER_PAGE;
173 0 : FD_COMPILER_MFENCE();
174 0 : blockcache->pages[ page_cnt ] = txnpage_idx;
175 0 : FD_COMPILER_MFENCE();
176 0 : blockcache->shmem->pages_cnt = (ushort)(page_cnt+1);
177 0 : return txnpage;
178 0 : } else {
179 0 : uint txnpage_idx = blockcache->pages[ page_cnt ];
180 0 : while( FD_UNLIKELY( txnpage_idx>=UINT_MAX-1UL ) ) {
181 0 : txnpage_idx = blockcache->pages[ page_cnt ];
182 0 : FD_SPIN_PAUSE();
183 0 : }
184 0 : return &tc->txnpages[ txnpage_idx ];
185 0 : }
186 0 : }
187 :
188 : static int
189 : fd_txncache_insert_txn( fd_txncache_t * tc,
190 : blockcache_t * blockcache,
191 : fd_txncache_txnpage_t * txnpage,
192 : fd_txncache_fork_id_t fork_id,
193 0 : uchar const * txnhash ) {
194 0 : ulong txnpage_idx = (ulong)(txnpage - tc->txnpages);
195 :
196 0 : for(;;) {
197 0 : ushort txnpage_free = txnpage->free;
198 0 : if( FD_UNLIKELY( !txnpage_free ) ) return 0;
199 0 : if( FD_UNLIKELY( FD_ATOMIC_CAS( &txnpage->free, txnpage_free, txnpage_free-1UL )!=txnpage_free ) ) {
200 0 : FD_SPIN_PAUSE();
201 0 : continue;
202 0 : }
203 :
204 0 : ulong txn_idx = FD_TXNCACHE_TXNS_PER_PAGE-txnpage_free;
205 0 : ulong txnhash_offset = blockcache->shmem->txnhash_offset;
206 0 : memcpy( txnpage->txns[ txn_idx ]->txnhash, txnhash+txnhash_offset, 20UL );
207 0 : txnpage->txns[ txn_idx ]->fork_id = fork_id;
208 0 : FD_COMPILER_MFENCE();
209 :
210 0 : ulong txn_bucket = FD_LOAD( ulong, txnhash+txnhash_offset )%tc->shmem->txn_per_slot_max;
211 0 : for(;;) {
212 0 : uint head = blockcache->heads[ txn_bucket ];
213 0 : txnpage->txns[ txn_idx ]->blockcache_next = head;
214 0 : FD_COMPILER_MFENCE();
215 0 : if( FD_LIKELY( FD_ATOMIC_CAS( &blockcache->heads[ txn_bucket ], head, (uint)(FD_TXNCACHE_TXNS_PER_PAGE*txnpage_idx+txn_idx) )==head ) ) break;
216 0 : FD_SPIN_PAUSE();
217 0 : }
218 :
219 0 : return 1;
220 0 : }
221 0 : }
222 :
223 : fd_txncache_fork_id_t
224 : fd_txncache_attach_child( fd_txncache_t * tc,
225 0 : fd_txncache_fork_id_t parent_fork_id ) {
226 0 : fd_rwlock_write( tc->shmem->lock );
227 :
228 0 : FD_TEST( blockcache_pool_free( tc->blockcache_shmem_pool ) );
229 0 : ulong idx = blockcache_pool_idx_acquire( tc->blockcache_shmem_pool );
230 :
231 0 : blockcache_t * fork = &tc->blockcache_pool[ idx ];
232 0 : fd_txncache_fork_id_t fork_id = { .val = (ushort)idx };
233 :
234 0 : fork->shmem->child_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
235 :
236 0 : if( FD_LIKELY( parent_fork_id.val==USHORT_MAX ) ) {
237 0 : FD_TEST( blockcache_pool_free( tc->blockcache_shmem_pool )==blockcache_pool_max( tc->blockcache_shmem_pool )-1UL );
238 0 : fork->shmem->parent_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
239 0 : fork->shmem->sibling_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
240 :
241 0 : descends_set_null( fork->descends );
242 0 : root_slist_ele_push_tail( tc->shmem->root_ll, fork->shmem, tc->blockcache_shmem_pool );
243 0 : } else {
244 0 : blockcache_t * parent = &tc->blockcache_pool[ parent_fork_id.val ];
245 0 : FD_TEST( parent );
246 : /* We might be tempted to freeze the parent here, and it's valid to
247 : do this ordinarily, but not when loading from a snapshot, when
248 : we need to load many transactions into a root parent chain at
249 : once. */
250 0 : fork->shmem->sibling_id = parent->shmem->child_id;
251 0 : fork->shmem->parent_id = parent_fork_id;
252 0 : parent->shmem->child_id = fork_id;
253 :
254 0 : descends_set_copy( fork->descends, parent->descends );
255 0 : descends_set_insert( fork->descends, parent_fork_id.val );
256 0 : }
257 :
258 0 : fork->shmem->txnhash_offset = 0UL;
259 0 : fork->shmem->frozen = 0;
260 0 : memset( fork->heads, 0xFF, tc->shmem->txn_per_slot_max*sizeof(uint) );
261 0 : fork->shmem->pages_cnt = 0;
262 0 : memset( fork->pages, 0xFF, tc->shmem->txnpages_per_blockhash_max*sizeof(uint) );
263 :
264 0 : fd_rwlock_unwrite( tc->shmem->lock );
265 0 : return fork_id;
266 0 : }
267 :
268 : void
269 : fd_txncache_attach_blockhash( fd_txncache_t * tc,
270 : fd_txncache_fork_id_t fork_id,
271 0 : uchar const * blockhash ) {
272 0 : fd_rwlock_write( tc->shmem->lock );
273 :
274 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
275 0 : FD_TEST( !fork->shmem->frozen );
276 0 : fork->shmem->frozen = 1;
277 :
278 0 : memcpy( fork->shmem->blockhash.uc, blockhash, 32UL );
279 :
280 0 : blockhash_map_ele_insert( tc->blockhash_map, fork->shmem, tc->blockcache_shmem_pool );
281 :
282 0 : fd_rwlock_unwrite( tc->shmem->lock );
283 0 : }
284 :
285 : void
286 : fd_txncache_finalize_fork( fd_txncache_t * tc,
287 : fd_txncache_fork_id_t fork_id,
288 : ulong txnhash_offset,
289 0 : uchar const * blockhash ) {
290 0 : fd_rwlock_write( tc->shmem->lock );
291 :
292 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
293 0 : FD_TEST( fork->shmem->frozen<=1 );
294 0 : fork->shmem->txnhash_offset = txnhash_offset;
295 :
296 0 : memcpy( fork->shmem->blockhash.uc, blockhash, 32UL );
297 :
298 0 : if( FD_LIKELY( !fork->shmem->frozen ) ) blockhash_map_ele_insert( tc->blockhash_map, fork->shmem, tc->blockcache_shmem_pool );
299 0 : fork->shmem->frozen = 2;
300 :
301 0 : fd_rwlock_unwrite( tc->shmem->lock );
302 0 : }
303 :
304 : static inline void
305 : remove_blockcache( fd_txncache_t * tc,
306 0 : blockcache_t * blockcache ) {
307 0 : memcpy( tc->txnpages_free+tc->shmem->txnpages_free_cnt, blockcache->pages, blockcache->shmem->pages_cnt*sizeof(ushort) );
308 0 : tc->shmem->txnpages_free_cnt = (ushort)(tc->shmem->txnpages_free_cnt+blockcache->shmem->pages_cnt);
309 :
310 0 : ulong idx = blockcache_pool_idx( tc->blockcache_shmem_pool, blockcache->shmem );
311 0 : for( ulong i=0UL; i<tc->shmem->active_slots_max; i++ ) descends_set_remove( tc->blockcache_pool[ i ].descends, idx );
312 :
313 0 : blockhash_map_ele_remove_fast( tc->blockhash_map, blockcache->shmem, tc->blockcache_shmem_pool );
314 0 : blockcache_pool_ele_release( tc->blockcache_shmem_pool, blockcache->shmem );
315 0 : }
316 :
317 : static inline void
318 : remove_children( fd_txncache_t * tc,
319 : blockcache_t const * fork,
320 0 : blockcache_t const * except ) {
321 0 : fd_txncache_fork_id_t sibling_idx = fork->shmem->child_id;
322 0 : while( sibling_idx.val!=USHORT_MAX ) {
323 0 : blockcache_t * sibling = &tc->blockcache_pool[ sibling_idx.val ];
324 0 : FD_TEST( sibling );
325 :
326 0 : sibling_idx = sibling->shmem->sibling_id;
327 0 : if( FD_UNLIKELY( sibling==except ) ) continue;
328 :
329 0 : remove_children( tc, sibling, except );
330 0 : remove_blockcache( tc, sibling );
331 0 : }
332 0 : }
333 :
334 : void
335 : fd_txncache_advance_root( fd_txncache_t * tc,
336 0 : fd_txncache_fork_id_t fork_id ) {
337 0 : fd_rwlock_write( tc->shmem->lock );
338 :
339 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
340 0 : FD_TEST( fork );
341 :
342 0 : blockcache_t * parent_fork = &tc->blockcache_pool[ fork->shmem->parent_id.val ];
343 0 : if( FD_UNLIKELY( root_slist_ele_peek_tail( tc->shmem->root_ll, tc->blockcache_shmem_pool )!=parent_fork->shmem ) ) {
344 0 : FD_LOG_CRIT(( "advancing root from %s to %s but that is not valid, last root is %s",
345 0 : FD_BASE58_ENC_32_ALLOCA( parent_fork->shmem->blockhash.uc ),
346 0 : FD_BASE58_ENC_32_ALLOCA( fork->shmem->blockhash.uc ),
347 0 : FD_BASE58_ENC_32_ALLOCA( root_slist_ele_peek_tail( tc->shmem->root_ll, tc->blockcache_shmem_pool )->blockhash.uc ) ));
348 0 : }
349 :
350 0 : FD_LOG_DEBUG(( "advancing root from %s to %s",
351 0 : FD_BASE58_ENC_32_ALLOCA( parent_fork->shmem->blockhash.uc ),
352 0 : FD_BASE58_ENC_32_ALLOCA( fork->shmem->blockhash.uc ) ));
353 :
354 : /* When a fork is rooted, any competing forks can be immediately
355 : removed as they will not be needed again. This includes child
356 : forks of the pruned siblings as well. */
357 0 : remove_children( tc, parent_fork, fork );
358 :
359 : /* Now, the earliest known rooted fork can likely be removed since its
360 : blockhashes cannot be referenced anymore (they are older than 151
361 : blockhashes away). */
362 0 : tc->shmem->root_cnt++;
363 0 : root_slist_ele_push_tail( tc->shmem->root_ll, fork->shmem, tc->blockcache_shmem_pool );
364 0 : if( FD_LIKELY( tc->shmem->root_cnt>FD_TXNCACHE_MAX_BLOCKHASH_DISTANCE ) ) {
365 0 : fd_txncache_blockcache_shmem_t * old_root_shmem = root_slist_ele_pop_head( tc->shmem->root_ll, tc->blockcache_shmem_pool );
366 0 : FD_TEST( old_root_shmem );
367 0 : blockcache_t * old_root = &tc->blockcache_pool[ blockcache_pool_idx( tc->blockcache_shmem_pool, old_root_shmem ) ];
368 :
369 0 : root_slist_ele_peek_head( tc->shmem->root_ll, tc->blockcache_shmem_pool )->parent_id.val = USHORT_MAX;
370 :
371 0 : remove_blockcache( tc, old_root );
372 0 : tc->shmem->root_cnt--;
373 0 : }
374 :
375 0 : fd_rwlock_unwrite( tc->shmem->lock );
376 0 : }
377 :
378 : static inline blockcache_t *
379 : blockhash_on_fork( fd_txncache_t * tc,
380 : blockcache_t const * fork,
381 0 : uchar const * blockhash ) {
382 0 : fd_txncache_blockcache_shmem_t const * candidate = blockhash_map_ele_query_const( tc->blockhash_map, fd_type_pun_const( blockhash ), NULL, tc->blockcache_shmem_pool );
383 0 : if( FD_UNLIKELY( !candidate ) ) return NULL;
384 :
385 0 : while( candidate ) {
386 0 : ulong candidate_idx = blockcache_pool_idx( tc->blockcache_shmem_pool, candidate );
387 0 : if( FD_LIKELY( descends_set_test( fork->descends, candidate_idx ) ) ) return &tc->blockcache_pool[ candidate_idx ];
388 0 : candidate = blockhash_map_ele_next_const( candidate, NULL, tc->blockcache_shmem_pool );
389 0 : }
390 0 : return NULL;
391 0 : }
392 :
393 : void
394 : fd_txncache_insert( fd_txncache_t * tc,
395 : fd_txncache_fork_id_t fork_id,
396 : uchar const * blockhash,
397 0 : uchar const * txnhash ) {
398 0 : fd_rwlock_read( tc->shmem->lock );
399 :
400 0 : blockcache_t const * fork = &tc->blockcache_pool[ fork_id.val ];
401 0 : FD_TEST( fork->shmem->frozen<=1 );
402 0 : blockcache_t * blockcache = blockhash_on_fork( tc, fork, blockhash );
403 :
404 : /* TODO: We can't print the full txnhash here typically because we
405 : might only be able to see 20 bytes, but we need to print it for
406 : diagnostic purposes. Remove once bug is identified. */
407 0 : if( FD_UNLIKELY( !blockcache ) ) FD_LOG_CRIT(( "transaction %s refers to blockhash %s which does not exist on fork", FD_BASE58_ENC_32_ALLOCA( txnhash ), FD_BASE58_ENC_32_ALLOCA( blockhash ) ));
408 :
409 0 : for(;;) {
410 0 : fd_txncache_txnpage_t * txnpage = fd_txncache_ensure_txnpage( tc, blockcache );
411 0 : FD_TEST( txnpage );
412 :
413 0 : int success = fd_txncache_insert_txn( tc, blockcache, txnpage, fork_id, txnhash );
414 0 : if( FD_LIKELY( success ) ) break;
415 :
416 0 : FD_SPIN_PAUSE();
417 0 : }
418 :
419 0 : fd_rwlock_unread( tc->shmem->lock );
420 0 : }
421 :
422 : int
423 : fd_txncache_query( fd_txncache_t * tc,
424 : fd_txncache_fork_id_t fork_id,
425 : uchar const * blockhash,
426 0 : uchar const * txnhash ) {
427 0 : fd_rwlock_read( tc->shmem->lock );
428 :
429 0 : blockcache_t const * fork = &tc->blockcache_pool[ fork_id.val ];
430 0 : blockcache_t const * blockcache = blockhash_on_fork( tc, fork, blockhash );
431 :
432 : /* TODO: We can't print the full txnhash here typically because we
433 : might only be able to see 20 bytes, but we need to print it for
434 : diagnostic purposes. Remove once bug is identified. */
435 0 : if( FD_UNLIKELY( !blockcache ) ) FD_LOG_CRIT(( "transaction %s refers to blockhash %s which does not exist on fork", FD_BASE58_ENC_32_ALLOCA( txnhash ), FD_BASE58_ENC_32_ALLOCA( blockhash ) ));
436 :
437 0 : int found = 0;
438 :
439 0 : ulong txnhash_offset = blockcache->shmem->txnhash_offset;
440 0 : ulong head_hash = FD_LOAD( ulong, txnhash+txnhash_offset ) % tc->shmem->txn_per_slot_max;
441 0 : for( uint head=blockcache->heads[ head_hash ]; head!=UINT_MAX; head=tc->txnpages[ head/FD_TXNCACHE_TXNS_PER_PAGE ].txns[ head%FD_TXNCACHE_TXNS_PER_PAGE ]->blockcache_next ) {
442 0 : fd_txncache_single_txn_t * txn = tc->txnpages[ head/FD_TXNCACHE_TXNS_PER_PAGE ].txns[ head%FD_TXNCACHE_TXNS_PER_PAGE ];
443 :
444 0 : int descends = txn->fork_id.val==fork_id.val || descends_set_test( fork->descends, txn->fork_id.val );
445 0 : if( FD_LIKELY( descends && !memcmp( txnhash+txnhash_offset, txn->txnhash, 20UL ) ) ) {
446 0 : found = 1;
447 0 : break;
448 0 : }
449 0 : }
450 :
451 0 : fd_rwlock_unread( tc->shmem->lock );
452 0 : return found;
453 0 : }
|