Line data Source code
1 : #include "fd_txncache.h"
2 : #include "fd_txncache_private.h"
3 : #include "../../util/log/fd_log.h"
4 :
5 : struct blockcache {
6 : fd_txncache_blockcache_shmem_t * shmem;
7 :
8 : uint * heads; /* The hash table for the blockhash. Each entry is a pointer to the head of a linked list of
9 : transactions that reference this blockhash. As we add transactions to the bucket, the head
10 : pointer is updated to the new item, and the new item is pointed to the previous head. */
11 : ushort * pages; /* A list of the txnpages containing the transactions for this blockcache. */
12 :
13 : descends_set_t * descends; /* Each fork can descend from other forks in the txncache, and this bit vector contains one
14 : value for each fork in the txncache. If this fork descends from some other fork F, then
15 : the bit at index F in descends[] is set. */
16 : };
17 :
18 : typedef struct blockcache blockcache_t;
19 :
20 : struct fd_txncache_private {
21 : fd_txncache_shmem_t * shmem;
22 :
23 : fd_txncache_blockcache_shmem_t * blockcache_shmem_pool;
24 : blockcache_t * blockcache_pool;
25 : blockhash_map_t * blockhash_map;
26 :
27 : ushort * txnpages_free; /* The index in the txnpages array that is free, for each of the free pages. */
28 :
29 : fd_txncache_txnpage_t * txnpages; /* The actual storage for the transactions. The blockcache points to these
30 : pages when storing transactions. Transaction are grouped into pages of
31 : size 16384 to make certain allocation and deallocation operations faster
32 : (just the pages are acquired/released, rather than each txn). */
33 : };
34 :
35 : FD_FN_CONST ulong
36 0 : fd_txncache_align( void ) {
37 0 : return FD_TXNCACHE_ALIGN;
38 0 : }
39 :
40 : FD_FN_CONST ulong
41 0 : fd_txncache_footprint( ulong max_live_slots ) {
42 0 : ulong max_active_slots = FD_TXNCACHE_MAX_BLOCKHASH_DISTANCE+max_live_slots;
43 :
44 0 : ulong l;
45 0 : l = FD_LAYOUT_INIT;
46 0 : l = FD_LAYOUT_APPEND( l, FD_TXNCACHE_SHMEM_ALIGN, sizeof(fd_txncache_t) );
47 0 : l = FD_LAYOUT_APPEND( l, alignof(blockcache_t), max_active_slots*sizeof(blockcache_t) );
48 0 : return FD_LAYOUT_FINI( l, FD_TXNCACHE_ALIGN );
49 0 : }
50 :
51 : void *
52 : fd_txncache_new( void * ljoin,
53 0 : fd_txncache_shmem_t * shmem ) {
54 0 : if( FD_UNLIKELY( !ljoin ) ) {
55 0 : FD_LOG_WARNING(( "NULL ljoin" ));
56 0 : return NULL;
57 0 : }
58 :
59 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)ljoin, fd_txncache_align() ) ) ) {
60 0 : FD_LOG_WARNING(( "misaligned ljoin" ));
61 0 : return NULL;
62 0 : }
63 :
64 0 : ulong max_active_slots = shmem->active_slots_max;
65 0 : ulong blockhash_map_chains = fd_ulong_pow2_up( 2UL*shmem->active_slots_max );
66 :
67 0 : ushort _max_txnpages = fd_txncache_max_txnpages( max_active_slots, shmem->txn_per_slot_max );
68 0 : ushort _max_txnpages_per_blockhash = fd_txncache_max_txnpages_per_blockhash( max_active_slots, shmem->txn_per_slot_max );
69 :
70 0 : ulong _descends_footprint = descends_set_footprint( max_active_slots );
71 0 : if( FD_UNLIKELY( !_descends_footprint ) ) {
72 0 : FD_LOG_WARNING(( "invalid max_active_slots" ));
73 0 : return NULL;
74 0 : }
75 :
76 0 : FD_SCRATCH_ALLOC_INIT( l, shmem );
77 0 : fd_txncache_shmem_t * tc = FD_SCRATCH_ALLOC_APPEND( l, FD_TXNCACHE_SHMEM_ALIGN, sizeof(fd_txncache_shmem_t) );
78 0 : void * _blockhash_map = FD_SCRATCH_ALLOC_APPEND( l, blockhash_map_align(), blockhash_map_footprint( blockhash_map_chains ) );
79 0 : void * _blockcache_pool = FD_SCRATCH_ALLOC_APPEND( l, blockcache_pool_align(), blockcache_pool_footprint( max_active_slots ) );
80 0 : void * _blockcache_pages = FD_SCRATCH_ALLOC_APPEND( l, alignof(ushort), max_active_slots*_max_txnpages_per_blockhash*sizeof(ushort) );
81 0 : void * _blockcache_heads = FD_SCRATCH_ALLOC_APPEND( l, alignof(uint), max_active_slots*shmem->txn_per_slot_max*sizeof(uint) );
82 0 : void * _blockcache_descends = FD_SCRATCH_ALLOC_APPEND( l, descends_set_align(), max_active_slots*_descends_footprint );
83 0 : void * _txnpages_free = FD_SCRATCH_ALLOC_APPEND( l, alignof(ushort), _max_txnpages*sizeof(ushort) );
84 0 : void * _txnpages = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_txncache_txnpage_t), _max_txnpages*sizeof(fd_txncache_txnpage_t) );
85 :
86 0 : FD_SCRATCH_ALLOC_INIT( l2, ljoin );
87 0 : fd_txncache_t * ltc = FD_SCRATCH_ALLOC_APPEND( l2, FD_TXNCACHE_ALIGN, sizeof(fd_txncache_t) );
88 0 : void * _local_blockcache_pool = FD_SCRATCH_ALLOC_APPEND( l2, alignof(blockcache_t), max_active_slots*sizeof(blockcache_t) );
89 :
90 0 : ltc->shmem = tc;
91 :
92 0 : ltc->blockcache_pool = (blockcache_t*)_local_blockcache_pool;
93 0 : ltc->blockcache_shmem_pool = blockcache_pool_join( _blockcache_pool );
94 :
95 0 : for( ulong i=0UL; i<shmem->active_slots_max; i++ ) {
96 0 : ltc->blockcache_pool[ i ].pages = (ushort *)_blockcache_pages + i*_max_txnpages_per_blockhash;
97 0 : ltc->blockcache_pool[ i ].heads = (uint *)_blockcache_heads + i*shmem->txn_per_slot_max;
98 0 : ltc->blockcache_pool[ i ].descends = descends_set_join( (uchar *)_blockcache_descends + i*_descends_footprint );
99 0 : ltc->blockcache_pool[ i ].shmem = ltc->blockcache_shmem_pool + i;
100 0 : FD_TEST( ltc->blockcache_pool[ i ].shmem );
101 0 : }
102 :
103 0 : FD_TEST( ltc->blockcache_shmem_pool );
104 :
105 0 : ltc->blockhash_map = blockhash_map_join( _blockhash_map );
106 0 : FD_TEST( ltc->blockhash_map );
107 :
108 0 : ltc->txnpages_free = (ushort *)_txnpages_free;
109 0 : ltc->txnpages = (fd_txncache_txnpage_t *)_txnpages;
110 :
111 0 : return (void *)ltc;
112 0 : }
113 :
114 : fd_txncache_t *
115 0 : fd_txncache_join( void * ljoin ) {
116 0 : if( FD_UNLIKELY( !ljoin ) ) {
117 0 : FD_LOG_WARNING(( "NULL ljoin" ));
118 0 : return NULL;
119 0 : }
120 :
121 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)ljoin, fd_txncache_align() ) ) ) {
122 0 : FD_LOG_WARNING(( "misaligned ljoin" ));
123 0 : return NULL;
124 0 : }
125 :
126 0 : fd_txncache_t * tc = (fd_txncache_t *)ljoin;
127 :
128 0 : return tc;
129 0 : }
130 :
131 : void
132 0 : fd_txncache_reset( fd_txncache_t * tc ) {
133 0 : fd_rwlock_write( tc->shmem->lock );
134 :
135 0 : tc->shmem->root_cnt = 0UL;
136 0 : root_slist_remove_all( tc->shmem->root_ll, tc->blockcache_shmem_pool );
137 :
138 0 : tc->shmem->txnpages_free_cnt = tc->shmem->max_txnpages;
139 0 : for( ushort i=0; i<tc->shmem->max_txnpages; i++ ) tc->txnpages_free[ i ] = i;
140 :
141 0 : blockcache_pool_reset( tc->blockcache_shmem_pool );
142 0 : blockhash_map_reset( tc->blockhash_map );
143 :
144 0 : fd_rwlock_unwrite( tc->shmem->lock );
145 0 : }
146 :
147 : static fd_txncache_txnpage_t *
148 : fd_txncache_ensure_txnpage( fd_txncache_t * tc,
149 0 : blockcache_t * blockcache ) {
150 0 : ushort page_cnt = blockcache->shmem->pages_cnt;
151 0 : if( FD_UNLIKELY( page_cnt>tc->shmem->txnpages_per_blockhash_max ) ) return NULL;
152 :
153 0 : if( FD_LIKELY( page_cnt ) ) {
154 0 : ushort txnpage_idx = blockcache->pages[ page_cnt-1 ];
155 0 : ushort txnpage_free = tc->txnpages[ txnpage_idx ].free;
156 0 : if( FD_LIKELY( txnpage_free ) ) return &tc->txnpages[ txnpage_idx ];
157 0 : }
158 :
159 0 : if( FD_UNLIKELY( page_cnt==tc->shmem->txnpages_per_blockhash_max ) ) return NULL;
160 0 : if( FD_LIKELY( FD_ATOMIC_CAS( &blockcache->pages[ page_cnt ], (ushort)USHORT_MAX, (ushort)(USHORT_MAX-1UL) )==(ushort)USHORT_MAX ) ) {
161 0 : ulong txnpages_free_cnt = tc->shmem->txnpages_free_cnt;
162 0 : for(;;) {
163 0 : if( FD_UNLIKELY( !txnpages_free_cnt ) ) return NULL;
164 0 : ulong old_txnpages_free_cnt = FD_ATOMIC_CAS( &tc->shmem->txnpages_free_cnt, (ushort)txnpages_free_cnt, (ushort)(txnpages_free_cnt-1UL) );
165 0 : if( FD_LIKELY( old_txnpages_free_cnt==txnpages_free_cnt ) ) break;
166 0 : txnpages_free_cnt = old_txnpages_free_cnt;
167 0 : FD_SPIN_PAUSE();
168 0 : }
169 :
170 0 : ushort txnpage_idx = tc->txnpages_free[ txnpages_free_cnt-1UL ];
171 0 : fd_txncache_txnpage_t * txnpage = &tc->txnpages[ txnpage_idx ];
172 0 : txnpage->free = FD_TXNCACHE_TXNS_PER_PAGE;
173 0 : FD_COMPILER_MFENCE();
174 0 : blockcache->pages[ page_cnt ] = txnpage_idx;
175 0 : FD_COMPILER_MFENCE();
176 0 : blockcache->shmem->pages_cnt = (ushort)(page_cnt+1);
177 0 : return txnpage;
178 0 : } else {
179 0 : ushort txnpage_idx = blockcache->pages[ page_cnt ];
180 0 : while( FD_UNLIKELY( txnpage_idx>=USHORT_MAX-1UL ) ) {
181 0 : txnpage_idx = blockcache->pages[ page_cnt ];
182 0 : FD_SPIN_PAUSE();
183 0 : }
184 0 : return &tc->txnpages[ txnpage_idx ];
185 0 : }
186 0 : }
187 :
188 : static int
189 : fd_txncache_insert_txn( fd_txncache_t * tc,
190 : blockcache_t * blockcache,
191 : fd_txncache_txnpage_t * txnpage,
192 : fd_txncache_fork_id_t fork_id,
193 0 : uchar const * txnhash ) {
194 0 : ulong txnpage_idx = (ulong)(txnpage - tc->txnpages);
195 :
196 0 : for(;;) {
197 0 : ushort txnpage_free = txnpage->free;
198 0 : if( FD_UNLIKELY( !txnpage_free ) ) return 0;
199 0 : if( FD_UNLIKELY( FD_ATOMIC_CAS( &txnpage->free, txnpage_free, txnpage_free-1UL )!=txnpage_free ) ) {
200 0 : FD_SPIN_PAUSE();
201 0 : continue;
202 0 : }
203 :
204 0 : ulong txn_idx = FD_TXNCACHE_TXNS_PER_PAGE-txnpage_free;
205 0 : ulong txnhash_offset = blockcache->shmem->txnhash_offset;
206 0 : memcpy( txnpage->txns[ txn_idx ]->txnhash, txnhash+txnhash_offset, 20UL );
207 0 : txnpage->txns[ txn_idx ]->fork_id = fork_id;
208 0 : txnpage->txns[ txn_idx ]->generation = tc->blockcache_pool[ fork_id.val ].shmem->generation;
209 0 : FD_COMPILER_MFENCE();
210 :
211 0 : ulong txn_bucket = FD_LOAD( ulong, txnhash+txnhash_offset )%tc->shmem->txn_per_slot_max;
212 0 : for(;;) {
213 0 : uint head = blockcache->heads[ txn_bucket ];
214 0 : txnpage->txns[ txn_idx ]->blockcache_next = head;
215 0 : FD_COMPILER_MFENCE();
216 0 : if( FD_LIKELY( FD_ATOMIC_CAS( &blockcache->heads[ txn_bucket ], head, (uint)(FD_TXNCACHE_TXNS_PER_PAGE*txnpage_idx+txn_idx) )==head ) ) break;
217 0 : FD_SPIN_PAUSE();
218 0 : }
219 :
220 0 : return 1;
221 0 : }
222 0 : }
223 :
224 : fd_txncache_fork_id_t
225 : fd_txncache_attach_child( fd_txncache_t * tc,
226 0 : fd_txncache_fork_id_t parent_fork_id ) {
227 0 : fd_rwlock_write( tc->shmem->lock );
228 :
229 0 : FD_TEST( blockcache_pool_free( tc->blockcache_shmem_pool ) );
230 0 : ulong idx = blockcache_pool_idx_acquire( tc->blockcache_shmem_pool );
231 :
232 0 : blockcache_t * fork = &tc->blockcache_pool[ idx ];
233 0 : fd_txncache_fork_id_t fork_id = { .val = (ushort)idx };
234 :
235 0 : fork->shmem->generation = tc->shmem->blockcache_generation++;
236 0 : fork->shmem->child_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
237 :
238 0 : if( FD_LIKELY( parent_fork_id.val==USHORT_MAX ) ) {
239 0 : FD_TEST( blockcache_pool_free( tc->blockcache_shmem_pool )==blockcache_pool_max( tc->blockcache_shmem_pool )-1UL );
240 0 : fork->shmem->parent_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
241 0 : fork->shmem->sibling_id = (fd_txncache_fork_id_t){ .val = USHORT_MAX };
242 :
243 0 : descends_set_null( fork->descends );
244 0 : root_slist_ele_push_tail( tc->shmem->root_ll, fork->shmem, tc->blockcache_shmem_pool );
245 0 : } else {
246 0 : blockcache_t * parent = &tc->blockcache_pool[ parent_fork_id.val ];
247 0 : FD_TEST( parent );
248 : /* We might be tempted to freeze the parent here, and it's valid to
249 : do this ordinarily, but not when loading from a snapshot, when
250 : we need to load many transactions into a root parent chain at
251 : once. */
252 0 : fork->shmem->sibling_id = parent->shmem->child_id;
253 0 : fork->shmem->parent_id = parent_fork_id;
254 0 : parent->shmem->child_id = fork_id;
255 :
256 0 : descends_set_copy( fork->descends, parent->descends );
257 0 : descends_set_insert( fork->descends, parent_fork_id.val );
258 0 : }
259 :
260 0 : fork->shmem->txnhash_offset = 0UL;
261 0 : fork->shmem->frozen = 0;
262 0 : memset( fork->heads, 0xFF, tc->shmem->txn_per_slot_max*sizeof(uint) );
263 0 : fork->shmem->pages_cnt = 0;
264 0 : memset( fork->pages, 0xFF, tc->shmem->txnpages_per_blockhash_max*sizeof(fork->pages[ 0 ]) );
265 :
266 0 : fd_rwlock_unwrite( tc->shmem->lock );
267 0 : return fork_id;
268 0 : }
269 :
270 : void
271 : fd_txncache_attach_blockhash( fd_txncache_t * tc,
272 : fd_txncache_fork_id_t fork_id,
273 0 : uchar const * blockhash ) {
274 0 : fd_rwlock_write( tc->shmem->lock );
275 :
276 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
277 0 : FD_TEST( !fork->shmem->frozen );
278 0 : fork->shmem->frozen = 1;
279 :
280 0 : memcpy( fork->shmem->blockhash.uc, blockhash, 32UL );
281 :
282 0 : blockhash_map_ele_insert( tc->blockhash_map, fork->shmem, tc->blockcache_shmem_pool );
283 :
284 0 : fd_rwlock_unwrite( tc->shmem->lock );
285 0 : }
286 :
287 : void
288 : fd_txncache_finalize_fork( fd_txncache_t * tc,
289 : fd_txncache_fork_id_t fork_id,
290 : ulong txnhash_offset,
291 0 : uchar const * blockhash ) {
292 0 : fd_rwlock_write( tc->shmem->lock );
293 :
294 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
295 0 : FD_TEST( fork->shmem->frozen<=1 );
296 0 : FD_TEST( fork->shmem->frozen>=0 );
297 0 : fork->shmem->txnhash_offset = txnhash_offset;
298 :
299 0 : memcpy( fork->shmem->blockhash.uc, blockhash, 32UL );
300 :
301 0 : if( FD_LIKELY( !fork->shmem->frozen ) ) blockhash_map_ele_insert( tc->blockhash_map, fork->shmem, tc->blockcache_shmem_pool );
302 0 : fork->shmem->frozen = 2;
303 :
304 0 : fd_rwlock_unwrite( tc->shmem->lock );
305 0 : }
306 :
307 : static inline void
308 : remove_blockcache( fd_txncache_t * tc,
309 0 : blockcache_t * blockcache ) {
310 0 : memcpy( tc->txnpages_free+tc->shmem->txnpages_free_cnt, blockcache->pages, blockcache->shmem->pages_cnt*sizeof(tc->txnpages_free[ 0 ]) );
311 0 : tc->shmem->txnpages_free_cnt = (ushort)(tc->shmem->txnpages_free_cnt+blockcache->shmem->pages_cnt);
312 :
313 0 : ulong idx = blockcache_pool_idx( tc->blockcache_shmem_pool, blockcache->shmem );
314 0 : for( ulong i=0UL; i<tc->shmem->active_slots_max; i++ ) descends_set_remove( tc->blockcache_pool[ i ].descends, idx );
315 :
316 0 : if( FD_LIKELY( blockcache->shmem->frozen ) ) blockhash_map_ele_remove_fast( tc->blockhash_map, blockcache->shmem, tc->blockcache_shmem_pool );
317 0 : blockcache->shmem->frozen = -1;
318 0 : blockcache_pool_ele_release( tc->blockcache_shmem_pool, blockcache->shmem );
319 0 : }
320 :
321 : static inline void
322 : remove_children( fd_txncache_t * tc,
323 : blockcache_t const * fork,
324 0 : blockcache_t const * except ) {
325 0 : fd_txncache_fork_id_t sibling_idx = fork->shmem->child_id;
326 0 : while( sibling_idx.val!=USHORT_MAX ) {
327 0 : blockcache_t * sibling = &tc->blockcache_pool[ sibling_idx.val ];
328 0 : FD_TEST( sibling );
329 :
330 0 : sibling_idx = sibling->shmem->sibling_id;
331 0 : if( FD_UNLIKELY( sibling==except ) ) continue;
332 :
333 0 : remove_children( tc, sibling, except );
334 0 : remove_blockcache( tc, sibling );
335 0 : }
336 0 : }
337 :
338 : void
339 : fd_txncache_advance_root( fd_txncache_t * tc,
340 0 : fd_txncache_fork_id_t fork_id ) {
341 0 : fd_rwlock_write( tc->shmem->lock );
342 :
343 0 : blockcache_t * fork = &tc->blockcache_pool[ fork_id.val ];
344 0 : FD_TEST( fork );
345 :
346 0 : blockcache_t * parent_fork = &tc->blockcache_pool[ fork->shmem->parent_id.val ];
347 0 : if( FD_UNLIKELY( root_slist_ele_peek_tail( tc->shmem->root_ll, tc->blockcache_shmem_pool )!=parent_fork->shmem ) ) {
348 0 : FD_LOG_CRIT(( "advancing root from %s to %s but that is not valid, last root is %s",
349 0 : FD_BASE58_ENC_32_ALLOCA( parent_fork->shmem->blockhash.uc ),
350 0 : FD_BASE58_ENC_32_ALLOCA( fork->shmem->blockhash.uc ),
351 0 : FD_BASE58_ENC_32_ALLOCA( root_slist_ele_peek_tail( tc->shmem->root_ll, tc->blockcache_shmem_pool )->blockhash.uc ) ));
352 0 : }
353 :
354 0 : FD_LOG_DEBUG(( "advancing root from %s to %s",
355 0 : FD_BASE58_ENC_32_ALLOCA( parent_fork->shmem->blockhash.uc ),
356 0 : FD_BASE58_ENC_32_ALLOCA( fork->shmem->blockhash.uc ) ));
357 :
358 : /* When a fork is rooted, any competing forks can be immediately
359 : removed as they will not be needed again. This includes child
360 : forks of the pruned siblings as well. */
361 0 : remove_children( tc, parent_fork, fork );
362 :
363 : /* Now, the earliest known rooted fork can likely be removed since its
364 : blockhashes cannot be referenced anymore (they are older than 151
365 : blockhashes away). */
366 0 : tc->shmem->root_cnt++;
367 0 : root_slist_ele_push_tail( tc->shmem->root_ll, fork->shmem, tc->blockcache_shmem_pool );
368 0 : if( FD_LIKELY( tc->shmem->root_cnt>FD_TXNCACHE_MAX_BLOCKHASH_DISTANCE ) ) {
369 0 : fd_txncache_blockcache_shmem_t * old_root_shmem = root_slist_ele_pop_head( tc->shmem->root_ll, tc->blockcache_shmem_pool );
370 0 : FD_TEST( old_root_shmem );
371 0 : blockcache_t * old_root = &tc->blockcache_pool[ blockcache_pool_idx( tc->blockcache_shmem_pool, old_root_shmem ) ];
372 :
373 0 : root_slist_ele_peek_head( tc->shmem->root_ll, tc->blockcache_shmem_pool )->parent_id.val = USHORT_MAX;
374 :
375 0 : remove_blockcache( tc, old_root );
376 0 : tc->shmem->root_cnt--;
377 0 : }
378 :
379 0 : fd_rwlock_unwrite( tc->shmem->lock );
380 0 : }
381 :
382 : static inline blockcache_t *
383 : blockhash_on_fork( fd_txncache_t * tc,
384 : blockcache_t const * fork,
385 0 : uchar const * blockhash ) {
386 0 : fd_txncache_blockcache_shmem_t const * candidate = blockhash_map_ele_query_const( tc->blockhash_map, fd_type_pun_const( blockhash ), NULL, tc->blockcache_shmem_pool );
387 0 : if( FD_UNLIKELY( !candidate ) ) return NULL;
388 :
389 0 : while( candidate ) {
390 0 : ulong candidate_idx = blockcache_pool_idx( tc->blockcache_shmem_pool, candidate );
391 0 : if( FD_LIKELY( descends_set_test( fork->descends, candidate_idx ) ) ) return &tc->blockcache_pool[ candidate_idx ];
392 0 : candidate = blockhash_map_ele_next_const( candidate, NULL, tc->blockcache_shmem_pool );
393 0 : }
394 0 : return NULL;
395 0 : }
396 :
397 : static void
398 0 : fd_txncache_purge_stale( fd_txncache_t * tc ) {
399 0 : (void)tc;
400 0 : FD_LOG_ERR(( "txncache full, purging stale transactions" ));
401 : // TODO: Implement eviction of any txn with generation!=fork->generation
402 0 : }
403 :
404 : void
405 : fd_txncache_insert( fd_txncache_t * tc,
406 : fd_txncache_fork_id_t fork_id,
407 : uchar const * blockhash,
408 0 : uchar const * txnhash ) {
409 0 : fd_rwlock_read( tc->shmem->lock );
410 :
411 0 : blockcache_t const * fork = &tc->blockcache_pool[ fork_id.val ];
412 0 : FD_TEST( fork->shmem->frozen<=1 );
413 0 : FD_TEST( fork->shmem->frozen>=0 );
414 0 : blockcache_t * blockcache = blockhash_on_fork( tc, fork, blockhash );
415 :
416 : /* TODO: We can't print the full txnhash here typically because we
417 : might only be able to see 20 bytes, but we need to print it for
418 : diagnostic purposes. Remove once bug is identified. */
419 0 : if( FD_UNLIKELY( !blockcache ) ) FD_LOG_CRIT(( "transaction %s refers to blockhash %s which does not exist on fork", FD_BASE58_ENC_32_ALLOCA( txnhash ), FD_BASE58_ENC_32_ALLOCA( blockhash ) ));
420 :
421 0 : for(;;) {
422 0 : fd_txncache_txnpage_t * txnpage = fd_txncache_ensure_txnpage( tc, blockcache );
423 0 : if( FD_UNLIKELY( !txnpage ) ) {
424 : /* Because of sizing invariants when creating the structure, it is
425 : not typically possible to fill it, unless there are stale
426 : transactions from minority forks that were purged floating
427 : around, in which case we can purge them here and try again. */
428 0 : fd_txncache_purge_stale( tc );
429 0 : continue;
430 0 : }
431 :
432 0 : int success = fd_txncache_insert_txn( tc, blockcache, txnpage, fork_id, txnhash );
433 0 : if( FD_LIKELY( success ) ) break;
434 :
435 0 : FD_SPIN_PAUSE();
436 0 : }
437 :
438 0 : fd_rwlock_unread( tc->shmem->lock );
439 0 : }
440 :
441 : int
442 : fd_txncache_query( fd_txncache_t * tc,
443 : fd_txncache_fork_id_t fork_id,
444 : uchar const * blockhash,
445 0 : uchar const * txnhash ) {
446 0 : fd_rwlock_read( tc->shmem->lock );
447 :
448 0 : blockcache_t const * fork = &tc->blockcache_pool[ fork_id.val ];
449 0 : blockcache_t const * blockcache = blockhash_on_fork( tc, fork, blockhash );
450 0 : FD_TEST( fork->shmem->frozen>=0 );
451 0 : FD_TEST( blockcache->shmem->frozen==2 );
452 :
453 : /* TODO: We can't print the full txnhash here typically because we
454 : might only be able to see 20 bytes, but we need to print it for
455 : diagnostic purposes. Remove once bug is identified. */
456 0 : if( FD_UNLIKELY( !blockcache ) ) FD_LOG_CRIT(( "transaction %s refers to blockhash %s which does not exist on fork", FD_BASE58_ENC_32_ALLOCA( txnhash ), FD_BASE58_ENC_32_ALLOCA( blockhash ) ));
457 :
458 0 : int found = 0;
459 :
460 0 : ulong txnhash_offset = blockcache->shmem->txnhash_offset;
461 0 : ulong head_hash = FD_LOAD( ulong, txnhash+txnhash_offset ) % tc->shmem->txn_per_slot_max;
462 0 : for( uint head=blockcache->heads[ head_hash ]; head!=UINT_MAX; head=tc->txnpages[ head/FD_TXNCACHE_TXNS_PER_PAGE ].txns[ head%FD_TXNCACHE_TXNS_PER_PAGE ]->blockcache_next ) {
463 0 : fd_txncache_single_txn_t * txn = tc->txnpages[ head/FD_TXNCACHE_TXNS_PER_PAGE ].txns[ head%FD_TXNCACHE_TXNS_PER_PAGE ];
464 :
465 0 : blockcache_t const * txn_fork = &tc->blockcache_pool[ txn->fork_id.val ];
466 0 : int descends = (txn->fork_id.val==fork_id.val || descends_set_test( fork->descends, txn->fork_id.val )) && txn_fork->shmem->frozen>=0 && txn_fork->shmem->generation==txn->generation;
467 0 : if( FD_LIKELY( descends && !memcmp( txnhash+txnhash_offset, txn->txnhash, 20UL ) ) ) {
468 0 : found = 1;
469 0 : break;
470 0 : }
471 0 : }
472 :
473 0 : fd_rwlock_unread( tc->shmem->lock );
474 0 : return found;
475 0 : }
|