Line data Source code
1 : #include "fd_txncache.h"
2 : #include "fd_txncache_private.h"
3 : #include "../../util/log/fd_log.h"
4 :
5 : struct blockcache {
6 : fd_txncache_blockcache_shmem_t * shmem;
7 :
8 : uint * heads; /* The hash table for the blockhash. Each entry is a pointer to the head of a linked list of
9 : transactions that reference this blockhash. As we add transactions to the bucket, the head
10 : pointer is updated to the new item, and the new item is pointed to the previous head. */
11 : uint * pages; /* A list of the txnpages containing the transactions for this blockcache. */
12 :
13 : uchar * descends; /* Each fork can descend from other forks in the txncache, and this array contains one value
14 : for each fork in the txncache. If this fork descends from the fork at position id,
15 : then descends[ id ] will be 1, otherwise 1. */
16 : };
17 :
18 : typedef struct blockcache blockcache_t;
19 :
20 : struct fd_txncache_private {
21 : fd_txncache_shmem_t * shmem;
22 :
23 : fd_txncache_blockcache_shmem_t * blockcache_shmem_pool;
24 : blockcache_t * blockcache_pool;
25 : blockhash_map_t * blockhash_map;
26 :
27 : ushort * txnpages_free; /* The index in the txnpages array that is free, for each of the free pages. */
28 :
29 : fd_txncache_txnpage_t * txnpages; /* The actual storage for the transactions. The blockcache points to these
30 : pages when storing transactions. Transaction are grouped into pages of
31 : size 16384 to make certain allocation and deallocation operations faster
32 : (just the pages are acquired/released, rather than each txn). */
33 : };
34 :
35 : FD_FN_CONST ulong
36 0 : fd_txncache_align( void ) {
37 0 : return FD_TXNCACHE_ALIGN;
38 0 : }
39 :
40 : FD_FN_CONST ulong
41 0 : fd_txncache_footprint( ulong max_live_slots ) {
42 0 : ulong max_active_slots = FD_TXNCACHE_MAX_BLOCKHASH_DISTANCE+max_live_slots;
43 :
44 0 : ulong l;
45 0 : l = FD_LAYOUT_INIT;
46 0 : l = FD_LAYOUT_APPEND( l, FD_TXNCACHE_SHMEM_ALIGN, sizeof(fd_txncache_t) );
47 0 : l = FD_LAYOUT_APPEND( l, alignof(blockcache_t), max_active_slots*sizeof(blockcache_t) );
48 0 : return FD_LAYOUT_FINI( l, FD_TXNCACHE_ALIGN );
49 0 : }
50 :
51 : void *
52 : fd_txncache_new( void * ljoin,
53 0 : fd_txncache_shmem_t * shmem ) {
54 0 : if( FD_UNLIKELY( !ljoin ) ) {
55 0 : FD_LOG_WARNING(( "NULL ljoin" ));
56 0 : return NULL;
57 0 : }
58 :
59 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)ljoin, fd_txncache_align() ) ) ) {
60 0 : FD_LOG_WARNING(( "misaligned ljoin" ));
61 0 : return NULL;
62 0 : }
63 :
64 0 : ulong max_active_slots = shmem->active_slots_max;
65 0 : ulong blockhash_map_chains = fd_ulong_pow2_up( 2UL*shmem->active_slots_max );
66 :
67 0 : ushort _max_txnpages = fd_txncache_max_txnpages( max_active_slots, shmem->txn_per_slot_max );
68 0 : ushort _max_txnpages_per_blockhash = fd_txncache_max_txnpages_per_blockhash( max_active_slots, shmem->txn_per_slot_max );
69 :
70 0 : FD_SCRATCH_ALLOC_INIT( l, shmem );
71 0 : fd_txncache_shmem_t * tc = FD_SCRATCH_ALLOC_APPEND( l, FD_TXNCACHE_SHMEM_ALIGN, sizeof(fd_txncache_shmem_t) );
72 0 : void * _blockhash_map = FD_SCRATCH_ALLOC_APPEND( l, blockhash_map_align(), blockhash_map_footprint( blockhash_map_chains ) );
73 0 : void * _blockcache_pool = FD_SCRATCH_ALLOC_APPEND( l, blockcache_pool_align(), blockcache_pool_footprint( max_active_slots ) );
74 0 : void * _blockcache_pages = FD_SCRATCH_ALLOC_APPEND( l, alignof(uint), max_active_slots*_max_txnpages_per_blockhash*sizeof(uint) );
75 0 : void * _blockcache_heads = FD_SCRATCH_ALLOC_APPEND( l, alignof(uint), max_active_slots*shmem->txn_per_slot_max*sizeof(uint) );
76 0 : void * _blockcache_descends = FD_SCRATCH_ALLOC_APPEND( l, alignof(uchar), max_active_slots*max_active_slots*sizeof(uchar) );
77 0 : void * _txnpages_free = FD_SCRATCH_ALLOC_APPEND( l, alignof(ushort), _max_txnpages*sizeof(ushort) );
78 0 : void * _txnpages = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_txncache_txnpage_t), _max_txnpages*sizeof(fd_txncache_txnpage_t) );
79 :
80 0 : FD_SCRATCH_ALLOC_INIT( l2, ljoin );
81 0 : fd_txncache_t * ltc = FD_SCRATCH_ALLOC_APPEND( l2, FD_TXNCACHE_ALIGN, sizeof(fd_txncache_t) );
82 0 : void * _local_blockcache_pool = FD_SCRATCH_ALLOC_APPEND( l2, alignof(blockcache_t), max_active_slots*sizeof(blockcache_t) );
83 :
84 0 : ltc->shmem = tc;
85 :
86 0 : ltc->blockcache_pool = (blockcache_t*)_local_blockcache_pool;
87 0 : ltc->blockcache_shmem_pool = blockcache_pool_join( _blockcache_pool );
88 :
89 0 : for( ulong i=0UL; i<shmem->active_slots_max; i++ ) {
90 0 : ltc->blockcache_pool[ i ].pages = (uint *)_blockcache_pages + i*_max_txnpages_per_blockhash;
91 0 : ltc->blockcache_pool[ i ].heads = (uint *)_blockcache_heads + i*shmem->txn_per_slot_max;
92 0 : ltc->blockcache_pool[ i ].descends = (uchar *)_blockcache_descends + i*max_active_slots;
93 0 : ltc->blockcache_pool[ i ].shmem = ltc->blockcache_shmem_pool + i;
94 0 : }
95 :
96 0 : FD_TEST( ltc->blockcache_shmem_pool );
97 :
98 0 : ltc->blockhash_map = blockhash_map_join( _blockhash_map );
99 0 : FD_TEST( ltc->blockhash_map );
100 :
101 0 : ltc->txnpages_free = (ushort *)_txnpages_free;
102 0 : ltc->txnpages = (fd_txncache_txnpage_t *)_txnpages;
103 :
104 0 : return (void *)ltc;
105 0 : }
106 :
107 : fd_txncache_t *
108 0 : fd_txncache_join( void * ljoin ) {
109 0 : if( FD_UNLIKELY( !ljoin ) ) {
110 0 : FD_LOG_WARNING(( "NULL ljoin" ));
111 0 : return NULL;
112 0 : }
113 :
114 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)ljoin, fd_txncache_align() ) ) ) {
115 0 : FD_LOG_WARNING(( "misaligned ljoin" ));
116 0 : return NULL;
117 0 : }
118 :
119 0 : fd_txncache_t * tc = (fd_txncache_t *)ljoin;
120 :
121 0 : return tc;
122 0 : }
123 :
124 : void
125 0 : fd_txncache_reset( fd_txncache_t * tc ) {
126 0 : fd_rwlock_write( tc->shmem->lock );
127 :
128 0 : tc->shmem->root_cnt = 0UL;
129 0 : root_slist_remove_all( tc->shmem->root_ll, tc->blockcache_shmem_pool );
130 :
131 0 : tc->shmem->txnpages_free_cnt = tc->shmem->max_txnpages;
132 0 : for( ushort i=0; i<tc->shmem->max_txnpages; i++ ) tc->txnpages_free[ i ] = i;
133 :
134 0 : blockcache_pool_reset( tc->blockcache_shmem_pool );
135 0 : blockhash_map_reset( tc->blockhash_map );
136 :
137 0 : fd_rwlock_unwrite( tc->shmem->lock );
138 0 : }
139 :
140 : static fd_txncache_txnpage_t *
141 : fd_txncache_ensure_txnpage( fd_txncache_t * tc,
142 0 : blockcache_t * blockcache ) {
143 0 : ushort page_cnt = blockcache->shmem->pages_cnt;
144 0 : if( FD_UNLIKELY( page_cnt>tc->shmem->txnpages_per_blockhash_max ) ) return NULL;
145 :
146 0 : if( FD_LIKELY( page_cnt ) ) {
147 0 : uint txnpage_idx = blockcache->pages[ page_cnt-1 ];
148 0 : ushort txnpage_free = tc->txnpages[ txnpage_idx ].free;
149 0 : if( FD_LIKELY( txnpage_free ) ) return &tc->txnpages[ txnpage_idx ];
150 0 : }
151 :
152 0 : if( FD_UNLIKELY( page_cnt==tc->shmem->txnpages_per_blockhash_max ) ) return NULL;
153 0 : if( FD_LIKELY( FD_ATOMIC_CAS( &blockcache->pages[ page_cnt ], UINT_MAX, UINT_MAX-1UL )==UINT_MAX ) ) {
154 0 : ulong txnpages_free_cnt = tc->shmem->txnpages_free_cnt;
155 0 : for(;;) {
156 0 : if( FD_UNLIKELY( !txnpages_free_cnt ) ) return NULL;
157 0 : ulong old_txnpages_free_cnt = FD_ATOMIC_CAS( &tc->shmem->txnpages_free_cnt, (ushort)txnpages_free_cnt, (ushort)(txnpages_free_cnt-1UL) );
158 0 : if( FD_LIKELY( old_txnpages_free_cnt==txnpages_free_cnt ) ) break;
159 0 : txnpages_free_cnt = old_txnpages_free_cnt;
160 0 : FD_SPIN_PAUSE();
161 0 : }
162 :
163 0 : ushort txnpage_idx = tc->txnpages_free[ txnpages_free_cnt-1UL ];
164 0 : fd_txncache_txnpage_t * txnpage = &tc->txnpages[ txnpage_idx ];
165 0 : txnpage->free = FD_TXNCACHE_TXNS_PER_PAGE;
166 0 : FD_COMPILER_MFENCE();
167 0 : blockcache->pages[ page_cnt ] = txnpage_idx;
168 0 : FD_COMPILER_MFENCE();
169 0 : blockcache->shmem->pages_cnt = (ushort)(page_cnt+1);
170 0 : return txnpage;
171 0 : } else {
172 0 : uint txnpage_idx = blockcache->pages[ page_cnt ];
173 0 : while( FD_UNLIKELY( txnpage_idx>=UINT_MAX-1UL ) ) {
174 0 : txnpage_idx = blockcache->pages[ page_cnt ];
175 0 : FD_SPIN_PAUSE();
176 0 : }
177 0 : return &tc->txnpages[ txnpage_idx ];
178 0 : }
179 0 : }
180 :
181 : static int
182 : fd_txncache_insert_txn( fd_txncache_t * tc,
183 : blockcache_t * blockcache,
184 : fd_txncache_txnpage_t * txnpage,
185 : fd_txncache_fork_id_t fork_id,
186 0 : uchar const * txnhash ) {
187 0 : ulong txnpage_idx = (ulong)(txnpage - tc->txnpages);
188 :
189 0 : for(;;) {
190 0 : ushort txnpage_free = txnpage->free;
191 0 : if( FD_UNLIKELY( !txnpage_free ) ) return 0;
192 0 : if( FD_UNLIKELY( FD_ATOMIC_CAS( &txnpage->free, txnpage_free, txnpage_free-1UL )!=txnpage_free ) ) {
193 0 : FD_SPIN_PAUSE();
194 0 : continue;
195 0 : }
196 :
197 0 : ulong txn_idx = FD_TXNCACHE_TXNS_PER_PAGE-txnpage_free;
198 0 : ulong txnhash_offset = blockcache->shmem->txnhash_offset;
199 0 : memcpy( txnpage->txns[ txn_idx ]->txnhash, txnhash+txnhash_offset, 20UL );
200 0 : txnpage->txns[ txn_idx ]->fork_id = fork_id;
201 0 : FD_COMPILER_MFENCE();
202 :
203 0 : ulong txn_bucket = FD_LOAD( ulong, txnhash+txnhash_offset )%tc->shmem->txn_per_slot_max;
204 0 : for(;;) {
205 0 : uint head = blockcache->heads[ txn_bucket ];
206 0 : txnpage->txns[ txn_idx ]->blockcache_next = head;
207 0 : FD_COMPILER_MFENCE();
208 0 : if( FD_LIKELY( FD_ATOMIC_CAS( &blockcache->heads[ txn_bucket ], head, (uint)(FD_TXNCACHE_TXNS_PER_PAGE*txnpage_idx+txn_idx) )==head ) ) break;
209 0 : FD_SPIN_PAUSE();
210 0 : }
211 :
212 0 : return 1;
213 0 : }
214 0 : }
215 :
216 : fd_txncache_fork_id_t
217 : fd_txncache_attach_child( fd_txncache_t * tc,
218 0 : fd_txncache_fork_id_t parent_fork_id ) {
219 0 : fd_rwlock_write( tc->shmem->lock );
220 :
221 0 : FD_TEST( blockcache_pool_free( tc->blockcache_shmem_pool ) );
222 0 : ulong idx = blockcache_pool_idx_acquire( tc->blockcache_shmem_pool );
223 :
224 0 : blockcache_t * fork = &tc->blockcache_pool[ idx ];
225 0 : fd_txncache_fork_id_t fork_id = { .val = (ushort)idx };
226 :
227 0 : fork->shmem->child_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
228 :
229 0 : if( FD_LIKELY( parent_fork_id.val==USHORT_MAX ) ) {
230 0 : FD_TEST( blockcache_pool_free( tc->blockcache_shmem_pool )==blockcache_pool_max( tc->blockcache_shmem_pool )-1UL );
231 0 : fork->shmem->parent_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
232 0 : fork->shmem->sibling_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
233 :
234 0 : fd_memset( fork->descends, 0, tc->shmem->active_slots_max*sizeof(uchar) );
235 0 : root_slist_ele_push_tail( tc->shmem->root_ll, fork->shmem, tc->blockcache_shmem_pool );
236 0 : } else {
237 0 : blockcache_t * parent = &tc->blockcache_pool[ parent_fork_id.val ];
238 0 : FD_TEST( parent );
239 : /* We might be tempted to freeze the parent here, and it's valid to
240 : do this ordinarily, but not when loading from a snapshot, when
241 : we need to load many transactions into a root parent chain at
242 : once. */
243 0 : fork->shmem->sibling_id = parent->shmem->child_id;
244 0 : fork->shmem->parent_id = parent_fork_id;
245 0 : parent->shmem->child_id = fork_id;
246 :
247 0 : fd_memcpy( fork->descends, parent->descends, tc->shmem->active_slots_max*sizeof(uchar) );
248 0 : fork->descends[ parent_fork_id.val ] = 1;
249 0 : }
250 :
251 0 : fork->shmem->txnhash_offset = 0UL;
252 0 : fork->shmem->frozen = 0;
253 0 : memset( fork->heads, 0xFF, tc->shmem->txn_per_slot_max*sizeof(uint) );
254 0 : fork->shmem->pages_cnt = 0;
255 0 : memset( fork->pages, 0xFF, tc->shmem->txnpages_per_blockhash_max*sizeof(uint) );
256 :
257 0 : fd_rwlock_unwrite( tc->shmem->lock );
258 0 : return fork_id;
259 0 : }
260 :
261 : void
262 : fd_txncache_attach_blockhash( fd_txncache_t * tc,
263 : fd_txncache_fork_id_t fork_id,
264 0 : uchar const * blockhash ) {
265 0 : fd_rwlock_write( tc->shmem->lock );
266 :
267 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
268 0 : FD_TEST( !fork->shmem->frozen );
269 0 : fork->shmem->frozen = 1;
270 :
271 0 : memcpy( fork->shmem->blockhash.uc, blockhash, 32UL );
272 :
273 0 : blockhash_map_ele_insert( tc->blockhash_map, fork->shmem, tc->blockcache_shmem_pool );
274 :
275 0 : fd_rwlock_unwrite( tc->shmem->lock );
276 0 : }
277 :
278 : void
279 : fd_txncache_finalize_fork( fd_txncache_t * tc,
280 : fd_txncache_fork_id_t fork_id,
281 : ulong txnhash_offset,
282 0 : uchar const * blockhash ) {
283 0 : fd_rwlock_write( tc->shmem->lock );
284 :
285 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
286 0 : FD_TEST( fork->shmem->frozen<=1 );
287 0 : fork->shmem->txnhash_offset = txnhash_offset;
288 :
289 0 : memcpy( fork->shmem->blockhash.uc, blockhash, 32UL );
290 :
291 0 : if( FD_LIKELY( !fork->shmem->frozen ) ) blockhash_map_ele_insert( tc->blockhash_map, fork->shmem, tc->blockcache_shmem_pool );
292 0 : fork->shmem->frozen = 2;
293 :
294 0 : fd_rwlock_unwrite( tc->shmem->lock );
295 0 : }
296 :
297 : static inline void
298 : remove_blockcache( fd_txncache_t * tc,
299 0 : blockcache_t * blockcache ) {
300 0 : memcpy( tc->txnpages_free+tc->shmem->txnpages_free_cnt, blockcache->pages, blockcache->shmem->pages_cnt*sizeof(ushort) );
301 0 : tc->shmem->txnpages_free_cnt = (ushort)(tc->shmem->txnpages_free_cnt+blockcache->shmem->pages_cnt);
302 :
303 0 : ulong idx = blockcache_pool_idx( tc->blockcache_shmem_pool, blockcache->shmem );
304 0 : for( ulong i=0UL; i<tc->shmem->active_slots_max; i++ ) tc->blockcache_pool[ i ].descends[ idx ] = 0;
305 :
306 0 : blockhash_map_ele_remove_fast( tc->blockhash_map, blockcache->shmem, tc->blockcache_shmem_pool );
307 0 : blockcache_pool_ele_release( tc->blockcache_shmem_pool, blockcache->shmem );
308 0 : }
309 :
310 : static inline void
311 : remove_children( fd_txncache_t * tc,
312 : blockcache_t const * fork,
313 0 : blockcache_t const * except ) {
314 0 : fd_txncache_fork_id_t sibling_idx = fork->shmem->child_id;
315 0 : while( sibling_idx.val!=USHORT_MAX ) {
316 0 : blockcache_t * sibling = &tc->blockcache_pool[ sibling_idx.val ];
317 0 : FD_TEST( sibling );
318 :
319 0 : sibling_idx = sibling->shmem->sibling_id;
320 0 : if( FD_UNLIKELY( sibling==except ) ) continue;
321 :
322 0 : remove_children( tc, sibling, except );
323 0 : remove_blockcache( tc, sibling );
324 0 : }
325 0 : }
326 :
327 : void
328 : fd_txncache_advance_root( fd_txncache_t * tc,
329 0 : fd_txncache_fork_id_t fork_id ) {
330 0 : fd_rwlock_write( tc->shmem->lock );
331 :
332 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
333 0 : FD_TEST( fork );
334 :
335 0 : blockcache_t * parent_fork = &tc->blockcache_pool[ fork->shmem->parent_id.val ];
336 0 : if( FD_UNLIKELY( root_slist_ele_peek_tail( tc->shmem->root_ll, tc->blockcache_shmem_pool )!=parent_fork->shmem ) ) {
337 0 : FD_LOG_CRIT(( "advancing root from %s to %s but that is not valid, last root is %s",
338 0 : FD_BASE58_ENC_32_ALLOCA( parent_fork->shmem->blockhash.uc ),
339 0 : FD_BASE58_ENC_32_ALLOCA( fork->shmem->blockhash.uc ),
340 0 : FD_BASE58_ENC_32_ALLOCA( root_slist_ele_peek_tail( tc->shmem->root_ll, tc->blockcache_shmem_pool )->blockhash.uc ) ));
341 0 : }
342 :
343 0 : FD_LOG_DEBUG(( "advancing root from %s to %s",
344 0 : FD_BASE58_ENC_32_ALLOCA( parent_fork->shmem->blockhash.uc ),
345 0 : FD_BASE58_ENC_32_ALLOCA( fork->shmem->blockhash.uc ) ));
346 :
347 : /* When a fork is rooted, any competing forks can be immediately
348 : removed as they will not be needed again. This includes child
349 : forks of the pruned siblings as well. */
350 0 : remove_children( tc, parent_fork, fork );
351 :
352 : /* Now, the earliest known rooted fork can likely be removed since its
353 : blockhashes cannot be referenced anymore (they are older than 151
354 : blockhashes away). */
355 0 : tc->shmem->root_cnt++;
356 0 : root_slist_ele_push_tail( tc->shmem->root_ll, fork->shmem, tc->blockcache_shmem_pool );
357 0 : if( FD_LIKELY( tc->shmem->root_cnt>FD_TXNCACHE_MAX_BLOCKHASH_DISTANCE ) ) {
358 0 : fd_txncache_blockcache_shmem_t * old_root_shmem = root_slist_ele_pop_head( tc->shmem->root_ll, tc->blockcache_shmem_pool );
359 0 : FD_TEST( old_root_shmem );
360 0 : blockcache_t * old_root = &tc->blockcache_pool[ blockcache_pool_idx( tc->blockcache_shmem_pool, old_root_shmem ) ];
361 :
362 0 : root_slist_ele_peek_head( tc->shmem->root_ll, tc->blockcache_shmem_pool )->parent_id.val = USHORT_MAX;
363 :
364 0 : remove_blockcache( tc, old_root );
365 0 : tc->shmem->root_cnt--;
366 0 : }
367 :
368 0 : fd_rwlock_unwrite( tc->shmem->lock );
369 0 : }
370 :
371 : static inline blockcache_t *
372 : blockhash_on_fork( fd_txncache_t * tc,
373 : blockcache_t const * fork,
374 0 : uchar const * blockhash ) {
375 0 : fd_txncache_blockcache_shmem_t const * candidate = blockhash_map_ele_query_const( tc->blockhash_map, fd_type_pun_const( blockhash ), NULL, tc->blockcache_shmem_pool );
376 0 : if( FD_UNLIKELY( !candidate ) ) FD_LOG_CRIT(( "transaction refers to blockhash %s which does not exist", FD_BASE58_ENC_32_ALLOCA( blockhash ) ));
377 :
378 0 : while( candidate ) {
379 0 : ulong candidate_idx = blockcache_pool_idx( tc->blockcache_shmem_pool, candidate );
380 0 : if( FD_LIKELY( fork->descends[ candidate_idx ] ) ) return &tc->blockcache_pool[ candidate_idx ];
381 0 : candidate = blockhash_map_ele_next_const( candidate, NULL, tc->blockcache_shmem_pool );
382 0 : }
383 0 : return NULL;
384 0 : }
385 :
386 : void
387 : fd_txncache_insert( fd_txncache_t * tc,
388 : fd_txncache_fork_id_t fork_id,
389 : uchar const * blockhash,
390 0 : uchar const * txnhash ) {
391 0 : fd_rwlock_read( tc->shmem->lock );
392 :
393 0 : blockcache_t const * fork = &tc->blockcache_pool[ fork_id.val ];
394 0 : FD_TEST( fork->shmem->frozen<=1 );
395 0 : blockcache_t * blockcache = blockhash_on_fork( tc, fork, blockhash );
396 0 : FD_TEST( blockcache );
397 :
398 0 : for(;;) {
399 0 : fd_txncache_txnpage_t * txnpage = fd_txncache_ensure_txnpage( tc, blockcache );
400 0 : FD_TEST( txnpage );
401 :
402 0 : int success = fd_txncache_insert_txn( tc, blockcache, txnpage, fork_id, txnhash );
403 0 : if( FD_LIKELY( success ) ) break;
404 :
405 0 : FD_SPIN_PAUSE();
406 0 : }
407 :
408 0 : fd_rwlock_unread( tc->shmem->lock );
409 0 : }
410 :
411 : int
412 : fd_txncache_query( fd_txncache_t * tc,
413 : fd_txncache_fork_id_t fork_id,
414 : uchar const * blockhash,
415 0 : uchar const * txnhash ) {
416 0 : fd_rwlock_read( tc->shmem->lock );
417 :
418 0 : blockcache_t const * fork = &tc->blockcache_pool[ fork_id.val ];
419 0 : blockcache_t const * blockcache = blockhash_on_fork( tc, fork, blockhash );
420 0 : if( FD_UNLIKELY( !blockcache ) ) FD_LOG_CRIT(( "transaction refers to blockhash %s which is not in ancestors", FD_BASE58_ENC_32_ALLOCA( blockhash ) ));
421 :
422 0 : int found = 0;
423 :
424 0 : ulong txnhash_offset = blockcache->shmem->txnhash_offset;
425 0 : ulong head_hash = FD_LOAD( ulong, txnhash+txnhash_offset ) % tc->shmem->txn_per_slot_max;
426 0 : for( uint head=blockcache->heads[ head_hash ]; head!=UINT_MAX; head=tc->txnpages[ head/FD_TXNCACHE_TXNS_PER_PAGE ].txns[ head%FD_TXNCACHE_TXNS_PER_PAGE ]->blockcache_next ) {
427 0 : fd_txncache_single_txn_t * txn = tc->txnpages[ head/FD_TXNCACHE_TXNS_PER_PAGE ].txns[ head%FD_TXNCACHE_TXNS_PER_PAGE ];
428 :
429 0 : int descends = txn->fork_id.val==fork_id.val || fork->descends[ txn->fork_id.val ];
430 0 : if( FD_LIKELY( descends && !memcmp( txnhash+txnhash_offset, txn->txnhash, 20UL ) ) ) {
431 0 : found = 1;
432 0 : break;
433 0 : }
434 0 : }
435 :
436 0 : fd_rwlock_unread( tc->shmem->lock );
437 0 : return found;
438 0 : }
|