Line data Source code
1 : #include "fd_blockstore.h"
2 :
3 : /* Check if we're seeing a different payload for the same shred key,
4 : which indicates equivocation. */
5 :
6 : static int
7 0 : is_eqvoc_shred( fd_shred_t * old, fd_shred_t const * new ) {
8 0 : if( FD_UNLIKELY( fd_shred_type( old->variant ) != fd_shred_type( new->variant ) ) ) {
9 0 : FD_LOG_WARNING(( "[%s] shred %lu %u not both resigned", __func__, old->slot, old->idx ));
10 0 : return 1;
11 0 : }
12 :
13 0 : if( FD_UNLIKELY( fd_shred_payload_sz( old ) != fd_shred_payload_sz( new ) ) ) {
14 0 : FD_LOG_WARNING(( "[%s] shred %lu %u payload_sz not eq", __func__, old->slot, old->idx ));
15 0 : return 1;
16 0 : }
17 :
18 0 : ulong memcmp_sz = fd_ulong_if( fd_shred_payload_sz( old ) > FD_SHRED_SIGNATURE_SZ &&
19 0 : fd_shred_is_resigned( fd_shred_type( old->variant ) ),
20 0 : fd_shred_payload_sz( old ) - FD_SHRED_SIGNATURE_SZ,
21 0 : fd_shred_payload_sz( old ) );
22 0 : if( FD_UNLIKELY( 0 != memcmp( fd_shred_data_payload( old ), fd_shred_data_payload( new ), memcmp_sz ) ) ) {
23 0 : FD_LOG_WARNING(( "[%s] shred %lu %u payload not eq", __func__, old->slot, old->idx ));
24 0 : return 1;
25 0 : }
26 :
27 0 : return 0;
28 0 : }
29 :
30 : ulong
31 0 : fd_blockstore_align( void ) {
32 0 : return alignof( fd_blockstore_t );
33 0 : }
34 :
35 : ulong
36 0 : fd_blockstore_footprint( void ) {
37 0 : return sizeof( fd_blockstore_t );
38 0 : }
39 :
40 : void *
41 : fd_blockstore_new( void * shmem,
42 : ulong wksp_tag,
43 : ulong seed,
44 : ulong shred_max,
45 : ulong slot_max,
46 0 : ulong lg_txn_max ) {
47 0 : fd_blockstore_t * blockstore = (fd_blockstore_t *)shmem;
48 :
49 0 : if( FD_UNLIKELY( !blockstore ) ) {
50 0 : FD_LOG_WARNING( ( "NULL blockstore" ) );
51 0 : return NULL;
52 0 : }
53 :
54 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)blockstore, fd_blockstore_align() ) ) ) {
55 0 : FD_LOG_WARNING( ( "misaligned blockstore" ) );
56 0 : return NULL;
57 0 : }
58 :
59 0 : if( FD_UNLIKELY( !wksp_tag ) ) {
60 0 : FD_LOG_WARNING( ( "bad wksp_tag" ) );
61 0 : return NULL;
62 0 : }
63 :
64 0 : fd_wksp_t * wksp = fd_wksp_containing( blockstore );
65 0 : if( FD_UNLIKELY( !wksp ) ) {
66 0 : FD_LOG_WARNING( ( "shmem must be part of a workspace" ) );
67 0 : return NULL;
68 0 : }
69 :
70 0 : void * shred_pool_shmem = fd_wksp_alloc_laddr( wksp,
71 0 : fd_buf_shred_pool_align(),
72 0 : fd_buf_shred_pool_footprint( shred_max ),
73 0 : wksp_tag );
74 0 : if( FD_UNLIKELY( !shred_pool_shmem ) ) {
75 0 : FD_LOG_WARNING( ( "shred_max too large for workspace" ) );
76 0 : return NULL;
77 0 : }
78 :
79 0 : void * shred_shpool = fd_buf_shred_pool_new( shred_pool_shmem, shred_max );
80 0 : if( FD_UNLIKELY( !shred_shpool ) ) {
81 0 : FD_LOG_WARNING( ( "fd_buf_shred_pool_new failed" ) );
82 0 : fd_wksp_free_laddr( shred_pool_shmem );
83 0 : return NULL;
84 0 : }
85 :
86 0 : fd_buf_shred_t * shred_pool = fd_buf_shred_pool_join( shred_shpool );
87 0 : if( FD_UNLIKELY( !shred_pool ) ) {
88 0 : FD_LOG_WARNING( ( "fd_buf_shred_pool_join failed" ) );
89 0 : goto buf_shred_pool_delete;
90 0 : return NULL;
91 0 : }
92 :
93 0 : void * shred_map_shmem = fd_wksp_alloc_laddr( wksp,
94 0 : fd_buf_shred_map_align(),
95 0 : fd_buf_shred_map_footprint( shred_max ),
96 0 : wksp_tag );
97 0 : if( FD_UNLIKELY( !shred_map_shmem ) ) {
98 0 : FD_LOG_WARNING( ( "shred_max too large for workspace" ) );
99 0 : goto buf_shred_pool_delete;
100 0 : return NULL;
101 0 : }
102 :
103 0 : void * shred_shmap = fd_buf_shred_map_new( shred_map_shmem, shred_max, seed );
104 0 : if( FD_UNLIKELY( !shred_shmap ) ) {
105 0 : FD_LOG_WARNING( ( "fd_buf_shred_map_new failed" ) );
106 0 : fd_wksp_free_laddr( shred_map_shmem );
107 0 : goto buf_shred_pool_delete;
108 0 : return NULL;
109 0 : }
110 :
111 0 : fd_buf_shred_map_t * shred_map = fd_buf_shred_map_join( shred_shmap );
112 0 : if( FD_UNLIKELY( !shred_map ) ) {
113 0 : FD_LOG_WARNING( ( "fd_buf_shred_map_join failed" ) );
114 0 : goto buf_shred_map_delete;
115 0 : return NULL;
116 0 : }
117 :
118 0 : void * block_map_shmem = fd_wksp_alloc_laddr( wksp,
119 0 : fd_block_map_align(),
120 0 : fd_block_map_footprint( slot_max ),
121 0 : wksp_tag );
122 0 : if( FD_UNLIKELY( !block_map_shmem ) ) {
123 0 : FD_LOG_WARNING( ( "lg_slot_max too large for workspace" ) );
124 0 : goto buf_shred_map_delete;
125 0 : return NULL;
126 0 : }
127 :
128 0 : void * block_map_shmap = fd_block_map_new( block_map_shmem, slot_max, 0 );
129 0 : if( FD_UNLIKELY( !block_map_shmap ) ) {
130 0 : FD_LOG_WARNING( ( "fd_block_map_new failed" ) );
131 0 : fd_wksp_free_laddr( block_map_shmem );
132 0 : goto buf_shred_map_delete;
133 0 : return NULL;
134 0 : }
135 :
136 0 : fd_block_map_t * block_map = fd_block_map_join( block_map_shmap );
137 0 : if( FD_UNLIKELY( !block_map_shmap ) ) {
138 0 : FD_LOG_WARNING( ( "fd_block_map_join failed" ) );
139 0 : goto slot_map_delete;
140 0 : return NULL;
141 0 : }
142 :
143 0 : void * slot_deque_shmem = fd_wksp_alloc_laddr( wksp,
144 0 : fd_blockstore_slot_deque_align(),
145 0 : fd_blockstore_slot_deque_footprint( slot_max ),
146 0 : wksp_tag );
147 0 : if( FD_UNLIKELY( !slot_deque_shmem ) ) {
148 0 : FD_LOG_WARNING( ( "slot_max too large for workspace" ) );
149 0 : goto slot_map_delete;
150 0 : return NULL;
151 0 : }
152 :
153 0 : void * slot_prune_shdeque = fd_blockstore_slot_deque_new( slot_deque_shmem, slot_max );
154 0 : if( FD_UNLIKELY( !slot_prune_shdeque ) ) {
155 0 : FD_LOG_WARNING( ( "fd_blockstore_slot_deque_new failed" ) );
156 0 : fd_wksp_free_laddr( slot_deque_shmem );
157 0 : goto slot_map_delete;
158 0 : return NULL;
159 0 : }
160 :
161 0 : ulong * slot_deque = fd_blockstore_slot_deque_join( slot_prune_shdeque );
162 0 : if( FD_UNLIKELY( !slot_deque ) ) {
163 0 : FD_LOG_WARNING( ( "fd_blockstore_slot_deque_join failed" ) );
164 0 : goto slot_deque_delete;
165 0 : return NULL;
166 0 : }
167 :
168 0 : void * txn_shmem = fd_wksp_alloc_laddr( wksp,
169 0 : fd_blockstore_txn_map_align(),
170 0 : fd_blockstore_txn_map_footprint( 1LU << lg_txn_max ),
171 0 : wksp_tag );
172 0 : if( FD_UNLIKELY( !txn_shmem ) ) {
173 0 : FD_LOG_WARNING( ( "lg_txn_max too large for workspace" ) );
174 0 : goto slot_deque_delete;
175 0 : return NULL;
176 0 : }
177 :
178 0 : void * txn_shmap = fd_blockstore_txn_map_new( txn_shmem, 1LU << lg_txn_max, 0 );
179 0 : if( FD_UNLIKELY( !txn_shmap ) ) {
180 0 : FD_LOG_WARNING( ( "fd_blockstore_txn_map_new failed" ) );
181 0 : fd_wksp_free_laddr( txn_shmem );
182 0 : goto slot_deque_delete;
183 0 : return NULL;
184 0 : }
185 :
186 0 : fd_blockstore_txn_map_t * txn_map = fd_blockstore_txn_map_join( txn_shmap );
187 0 : if( FD_UNLIKELY( !txn_map ) ) {
188 0 : FD_LOG_WARNING( ( "fd_blockstore_txn_map_join failed" ) );
189 0 : goto txn_map_delete;
190 0 : return NULL;
191 0 : }
192 :
193 0 : void * alloc_shmem = fd_wksp_alloc_laddr( wksp,
194 0 : fd_alloc_align(),
195 0 : fd_alloc_footprint(),
196 0 : FD_BLOCKSTORE_MAGIC );
197 0 : if( FD_UNLIKELY( !alloc_shmem ) ) {
198 0 : FD_LOG_WARNING( ( "fd_alloc too large for workspace" ) );
199 0 : goto txn_map_delete;
200 0 : return NULL;
201 0 : }
202 :
203 0 : void * alloc_shalloc = fd_alloc_new( alloc_shmem, FD_BLOCKSTORE_MAGIC );
204 0 : if( FD_UNLIKELY( !alloc_shalloc ) ) {
205 0 : FD_LOG_WARNING( ( "fd_allow_new failed" ) );
206 0 : fd_wksp_free_laddr( alloc_shalloc );
207 0 : goto txn_map_delete;
208 0 : return NULL;
209 0 : }
210 :
211 0 : fd_alloc_t * alloc = fd_alloc_join( alloc_shalloc, 0UL ); /* TODO: pass through cgroup hint */
212 0 : if( FD_UNLIKELY( !alloc ) ) {
213 0 : FD_LOG_WARNING( ( "fd_alloc_join failed" ) );
214 0 : fd_wksp_free_laddr( fd_alloc_delete( alloc_shalloc ) );
215 0 : goto txn_map_delete;
216 0 : return NULL;
217 0 : }
218 :
219 0 : fd_memset( blockstore, 0, fd_blockstore_footprint() );
220 :
221 0 : FD_COMPILER_MFENCE();
222 0 : FD_VOLATILE( blockstore->magic ) = FD_BLOCKSTORE_MAGIC;
223 0 : FD_COMPILER_MFENCE();
224 0 : blockstore->blockstore_gaddr = fd_wksp_gaddr_fast( wksp, blockstore );
225 0 : blockstore->wksp_tag = wksp_tag;
226 0 : blockstore->seed = seed;
227 :
228 0 : FD_COMPILER_MFENCE();
229 0 : fd_rwseq_new( &blockstore->lock );
230 0 : FD_COMPILER_MFENCE();
231 :
232 0 : blockstore->smr = 0;
233 0 : blockstore->min = 0;
234 0 : blockstore->max = 0;
235 :
236 0 : blockstore->shred_max = shred_max;
237 0 : blockstore->shred_pool_gaddr = fd_wksp_gaddr_fast( wksp, shred_pool );
238 0 : blockstore->shred_map_gaddr = fd_wksp_gaddr_fast( wksp, shred_map );
239 :
240 0 : blockstore->slot_max = slot_max;
241 0 : blockstore->slot_map_gaddr = fd_wksp_gaddr_fast( wksp, block_map );
242 0 : blockstore->slot_deque_gaddr = fd_wksp_gaddr_fast( wksp, slot_deque );
243 :
244 0 : blockstore->lg_txn_max = lg_txn_max;
245 0 : blockstore->txn_map_gaddr = fd_wksp_gaddr_fast( wksp, txn_map );
246 :
247 0 : blockstore->alloc_gaddr = fd_wksp_gaddr_fast( wksp, alloc );
248 :
249 0 : return (void *)blockstore;
250 :
251 0 : txn_map_delete:
252 0 : fd_wksp_free_laddr(
253 0 : fd_blockstore_txn_map_delete( txn_map ) );
254 0 : slot_deque_delete:
255 0 : fd_wksp_free_laddr( fd_blockstore_slot_deque_delete( slot_deque ) );
256 0 : slot_map_delete:
257 0 : fd_wksp_free_laddr( fd_block_map_delete( block_map ) );
258 0 : buf_shred_map_delete:
259 0 : fd_wksp_free_laddr( fd_buf_shred_map_delete( shred_map ) );
260 0 : buf_shred_pool_delete:
261 0 : fd_wksp_free_laddr( fd_buf_shred_pool_delete( shred_pool ) );
262 0 : return NULL;
263 0 : }
264 :
265 : fd_blockstore_t *
266 0 : fd_blockstore_join( void * shblockstore ) {
267 0 : fd_blockstore_t * blockstore = (fd_blockstore_t *)shblockstore;
268 :
269 0 : if( FD_UNLIKELY( !blockstore ) ) {
270 0 : FD_LOG_WARNING( ( "NULL shblockstore" ) );
271 0 : return NULL;
272 0 : }
273 :
274 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)blockstore, fd_blockstore_align() ) ) ) {
275 0 : FD_LOG_WARNING( ( "misaligned shblockstore" ) );
276 0 : return NULL;
277 0 : }
278 :
279 0 : fd_wksp_t * wksp = fd_wksp_containing( blockstore );
280 0 : if( FD_UNLIKELY( !wksp ) ) {
281 0 : FD_LOG_WARNING( ( "shblockstore must be part of a workspace" ) );
282 0 : return NULL;
283 0 : }
284 :
285 0 : if( FD_UNLIKELY( blockstore->magic != FD_BLOCKSTORE_MAGIC ) ) {
286 0 : FD_LOG_WARNING( ( "bad magic" ) );
287 0 : return NULL;
288 0 : }
289 :
290 0 : return blockstore;
291 0 : }
292 :
293 : void *
294 0 : fd_blockstore_leave( fd_blockstore_t * blockstore ) {
295 :
296 0 : if( FD_UNLIKELY( !blockstore ) ) {
297 0 : FD_LOG_WARNING( ( "NULL blockstore" ) );
298 0 : return NULL;
299 0 : }
300 :
301 0 : return (void *)blockstore;
302 0 : }
303 :
304 : void *
305 0 : fd_blockstore_delete( void * shblockstore ) {
306 0 : fd_blockstore_t * blockstore = (fd_blockstore_t *)shblockstore;
307 :
308 0 : if( FD_UNLIKELY( !blockstore ) ) {
309 0 : FD_LOG_WARNING( ( "NULL shblockstore" ) );
310 0 : return NULL;
311 0 : }
312 :
313 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)blockstore, fd_blockstore_align() ) ) ) {
314 0 : FD_LOG_WARNING( ( "misaligned shblockstore" ) );
315 0 : return NULL;
316 0 : }
317 :
318 0 : fd_wksp_t * wksp = fd_wksp_containing( blockstore );
319 0 : if( FD_UNLIKELY( !wksp ) ) {
320 0 : FD_LOG_WARNING( ( "shblockstore must be part of a workspace" ) );
321 0 : return NULL;
322 0 : }
323 :
324 0 : if( FD_UNLIKELY( blockstore->magic != FD_BLOCKSTORE_MAGIC ) ) {
325 0 : FD_LOG_WARNING( ( "bad magic" ) );
326 0 : return NULL;
327 0 : }
328 :
329 : /* Free all blocks. */
330 :
331 0 : ulong * q = fd_wksp_laddr_fast( wksp, blockstore->slot_deque_gaddr );
332 0 : fd_blockstore_slot_deque_remove_all( q );
333 0 : fd_blockstore_slot_deque_push_tail( q, blockstore->smr );
334 0 : while( !fd_blockstore_slot_deque_empty( q ) ) {
335 0 : ulong curr = fd_blockstore_slot_deque_pop_head( q );
336 :
337 0 : ulong * child_slots = NULL;
338 0 : ulong child_slot_cnt = 0;
339 0 : int rc = fd_blockstore_child_slots_query( blockstore, curr, &child_slots, &child_slot_cnt );
340 0 : if( FD_UNLIKELY( rc != FD_BLOCKSTORE_OK ) ) {
341 0 : FD_LOG_ERR( ( "[fd_blockstore_delete] failed to query children in slot %lu", curr ) );
342 0 : }
343 :
344 0 : for( ulong i = 0; i < FD_BLOCKSTORE_CHILD_SLOT_MAX; i++ ) {
345 0 : if( FD_LIKELY( child_slots[i] != FD_SLOT_NULL ) ) {
346 0 : fd_blockstore_slot_deque_push_tail( q, child_slots[i] );
347 0 : }
348 0 : }
349 :
350 0 : fd_blockstore_slot_remove( blockstore, curr );
351 0 : if( FD_UNLIKELY( rc != FD_BLOCKSTORE_OK ) ) {
352 0 : FD_LOG_ERR( ( "[fd_blockstore_remove] failed to remove slot %lu", curr ) );
353 0 : }
354 0 : }
355 :
356 : /* Free all structures. */
357 :
358 0 : fd_wksp_free_laddr( fd_alloc_delete( fd_wksp_laddr_fast( wksp, blockstore->alloc_gaddr ) ) );
359 0 : fd_wksp_free_laddr( fd_blockstore_txn_map_delete( fd_wksp_laddr_fast( wksp, blockstore->txn_map_gaddr ) ) );
360 0 : fd_wksp_free_laddr( fd_block_map_delete( fd_wksp_laddr_fast( wksp, blockstore->slot_map_gaddr ) ) );
361 0 : fd_wksp_free_laddr( fd_blockstore_slot_deque_delete( fd_wksp_laddr_fast( wksp, blockstore->slot_deque_gaddr ) ) );
362 0 : fd_wksp_free_laddr( fd_buf_shred_map_delete( fd_wksp_laddr_fast( wksp, blockstore->shred_map_gaddr ) ) );
363 0 : fd_wksp_free_laddr( fd_buf_shred_pool_delete( fd_wksp_laddr_fast( wksp, blockstore->shred_pool_gaddr ) ) );
364 :
365 0 : FD_COMPILER_MFENCE();
366 0 : FD_VOLATILE( blockstore->magic ) = 0UL;
367 0 : FD_COMPILER_MFENCE();
368 :
369 0 : return blockstore;
370 0 : }
371 :
372 : /* txn map helpers */
373 :
374 : int
375 0 : fd_blockstore_txn_key_equal( fd_blockstore_txn_key_t const * k0, fd_blockstore_txn_key_t const * k1 ) {
376 0 : for( ulong i = 0; i < FD_ED25519_SIG_SZ / sizeof( ulong ); ++i )
377 0 : if( k0->v[i] != k1->v[i] ) return 0;
378 0 : return 1;
379 0 : }
380 :
381 : ulong
382 0 : fd_blockstore_txn_key_hash( fd_blockstore_txn_key_t const * k, ulong seed ) {
383 0 : ulong h = seed;
384 0 : for( ulong i = 0; i < FD_ED25519_SIG_SZ / sizeof( ulong ); ++i )
385 0 : h ^= k->v[i];
386 0 : return h;
387 0 : }
388 :
389 : static void
390 0 : fd_blockstore_scan_block( fd_blockstore_t * blockstore, ulong slot, fd_block_t * block ) {
391 :
392 0 : #define MAX_MICROS ( 16 << 10 )
393 0 : fd_block_micro_t micros[MAX_MICROS];
394 0 : ulong micros_cnt = 0;
395 0 : #define MAX_TXNS ( 1 << 17 )
396 0 : fd_block_txn_ref_t txns[MAX_TXNS];
397 0 : ulong txns_cnt = 0;
398 :
399 0 : uchar * data = fd_wksp_laddr_fast( fd_blockstore_wksp( blockstore ), block->data_gaddr );
400 0 : ulong sz = block->data_sz;
401 0 : FD_LOG_DEBUG( ( "scanning slot %lu, ptr %p, sz %lu", slot, (void *)data, sz ) );
402 :
403 0 : ulong blockoff = 0;
404 0 : while( blockoff < sz ) {
405 0 : if( blockoff + sizeof( ulong ) > sz ) FD_LOG_ERR( ( "premature end of block" ) );
406 0 : ulong mcount = FD_LOAD( ulong, (const uchar *)data + blockoff );
407 0 : blockoff += sizeof( ulong );
408 :
409 : /* Loop across microblocks */
410 0 : for( ulong mblk = 0; mblk < mcount; ++mblk ) {
411 0 : if( blockoff + sizeof( fd_microblock_hdr_t ) > sz )
412 0 : FD_LOG_ERR( ( "premature end of block" ) );
413 0 : if( micros_cnt < MAX_MICROS ) {
414 0 : fd_block_micro_t * m = micros + ( micros_cnt++ );
415 0 : m->off = blockoff;
416 0 : }
417 0 : fd_microblock_hdr_t * hdr = (fd_microblock_hdr_t *)( (const uchar *)data + blockoff );
418 0 : blockoff += sizeof( fd_microblock_hdr_t );
419 :
420 : /* Loop across transactions */
421 0 : for( ulong txn_idx = 0; txn_idx < hdr->txn_cnt; txn_idx++ ) {
422 0 : uchar txn_out[FD_TXN_MAX_SZ];
423 0 : uchar const * raw = (uchar const *)data + blockoff;
424 0 : ulong pay_sz = 0;
425 0 : ulong txn_sz = fd_txn_parse_core( (uchar const *)raw,
426 0 : fd_ulong_min( sz - blockoff, FD_TXN_MTU ),
427 0 : txn_out,
428 0 : NULL,
429 0 : &pay_sz );
430 0 : if( txn_sz == 0 || txn_sz > FD_TXN_MTU ) {
431 0 : FD_LOG_ERR( ( "failed to parse transaction %lu in microblock %lu in slot %lu. txn size: %lu",
432 0 : txn_idx,
433 0 : mblk,
434 0 : slot,
435 0 : txn_sz ) );
436 0 : }
437 0 : fd_txn_t const * txn = (fd_txn_t const *)txn_out;
438 :
439 0 : if( pay_sz == 0UL )
440 0 : FD_LOG_ERR( ( "failed to parse transaction %lu in microblock %lu in slot %lu",
441 0 : txn_idx,
442 0 : mblk,
443 0 : slot ) );
444 :
445 0 : fd_blockstore_txn_key_t const * sigs =
446 0 : (fd_blockstore_txn_key_t const *)( (ulong)raw + (ulong)txn->signature_off );
447 0 : fd_blockstore_txn_map_t * txn_map = fd_blockstore_txn_map( blockstore );
448 0 : for( ulong j = 0; j < txn->signature_cnt; j++ ) {
449 0 : if( FD_UNLIKELY( fd_blockstore_txn_map_key_cnt( txn_map ) ==
450 0 : fd_blockstore_txn_map_key_max( txn_map ) ) ) {
451 0 : break;
452 0 : }
453 0 : fd_blockstore_txn_key_t sig;
454 0 : fd_memcpy( &sig, sigs + j, sizeof( sig ) );
455 0 : fd_blockstore_txn_map_t * elem = fd_blockstore_txn_map_insert( txn_map, &sig );
456 0 : if( elem == NULL ) { break; }
457 0 : elem->slot = slot;
458 0 : elem->offset = blockoff;
459 0 : elem->sz = pay_sz;
460 0 : elem->meta_gaddr = 0;
461 0 : elem->meta_sz = 0;
462 :
463 0 : if( txns_cnt < MAX_TXNS ) {
464 0 : fd_block_txn_ref_t * ref = &txns[txns_cnt++];
465 0 : ref->txn_off = blockoff;
466 0 : ref->id_off = (ulong)( sigs + j ) - (ulong)data;
467 0 : ref->sz = pay_sz;
468 0 : }
469 0 : }
470 :
471 0 : blockoff += pay_sz;
472 0 : }
473 0 : }
474 0 : }
475 :
476 0 : fd_block_micro_t * micros_laddr =
477 0 : fd_alloc_malloc( fd_blockstore_alloc( blockstore ),
478 0 : alignof( fd_block_micro_t ),
479 0 : sizeof( fd_block_micro_t ) * micros_cnt );
480 0 : fd_memcpy( micros_laddr, micros, sizeof( fd_block_micro_t ) * micros_cnt );
481 0 : block->micros_gaddr = fd_wksp_gaddr_fast( fd_blockstore_wksp( blockstore ), micros_laddr );
482 0 : block->micros_cnt = micros_cnt;
483 :
484 0 : fd_block_txn_ref_t * txns_laddr =
485 0 : fd_alloc_malloc( fd_blockstore_alloc( blockstore ),
486 0 : alignof( fd_block_txn_ref_t ),
487 0 : sizeof( fd_block_txn_ref_t ) * txns_cnt );
488 0 : fd_memcpy( txns_laddr, txns, sizeof( fd_block_txn_ref_t ) * txns_cnt );
489 0 : block->txns_gaddr = fd_wksp_gaddr_fast( fd_blockstore_wksp( blockstore ), txns_laddr );
490 0 : block->txns_cnt = txns_cnt;
491 0 : }
492 :
493 : /* Remove a slot from blockstore */
494 : void
495 0 : fd_blockstore_slot_remove( fd_blockstore_t * blockstore, ulong slot ) {
496 0 : FD_LOG_DEBUG(( "[%s] slot %lu", __func__, slot ));
497 0 : fd_block_map_t * block_map_entry = fd_block_map_remove( fd_blockstore_block_map( blockstore ), &slot );
498 0 : if( FD_UNLIKELY( !block_map_entry ) ) return;
499 :
500 : /* Update min */
501 0 : while( blockstore->min <= blockstore->max &&
502 0 : fd_blockstore_block_map_query( blockstore, blockstore->min ) == NULL ) {
503 0 : blockstore->min ++;
504 0 : }
505 :
506 : /* It is not safe to remove a replaying block. */
507 :
508 0 : if( FD_UNLIKELY( fd_uchar_extract_bit( block_map_entry->flags, FD_BLOCK_FLAG_REPLAYING ) ) ) {
509 0 : FD_LOG_WARNING(( "[%s] slot %lu has replay in progress. not removing.", __func__, slot ));
510 0 : }
511 :
512 : /* Unlink slot from its parent only if it is not published. */
513 :
514 0 : fd_block_map_t * parent_block_map_entry =
515 0 : fd_blockstore_block_map_query( blockstore, block_map_entry->parent_slot );
516 0 : if( FD_LIKELY( parent_block_map_entry ) ) {
517 0 : for( ulong i = 0; i < parent_block_map_entry->child_slot_cnt; i++ ) {
518 0 : if( FD_LIKELY( parent_block_map_entry->child_slots[i] == slot ) ) {
519 0 : parent_block_map_entry->child_slots[i] =
520 0 : parent_block_map_entry->child_slots[--parent_block_map_entry->child_slot_cnt];
521 0 : }
522 0 : }
523 0 : }
524 :
525 : /* block_gaddr 0 indicates it hasn't received all shreds yet.
526 :
527 : TODO refactor to use FD_BLOCK_FLAG_COMPLETED. */
528 :
529 0 : if( FD_LIKELY( block_map_entry->block_gaddr == 0 ) ) {
530 :
531 : /* Remove buf_shreds if there's no block yet (we haven't received all shreds). */
532 :
533 0 : fd_buf_shred_map_t * map = fd_blockstore_buf_shred_map( blockstore );
534 0 : fd_buf_shred_t * pool = fd_blockstore_buf_shred_pool( blockstore );
535 0 : for( uint idx = 0; idx < block_map_entry->received_idx; idx++ ) {
536 0 : fd_shred_key_t key = { .slot = slot, .idx = idx };
537 0 : fd_buf_shred_t * buf_shred = fd_buf_shred_map_ele_remove( map, &key, NULL, pool );
538 0 : if ( FD_LIKELY( buf_shred ) ) {
539 0 : fd_buf_shred_pool_ele_release( pool, buf_shred );
540 0 : }
541 0 : }
542 :
543 : /* Return early because there are no allocations without a block. */
544 :
545 0 : return;
546 0 : }
547 :
548 : /* Remove all the allocations relating to a block. */
549 :
550 0 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
551 0 : fd_alloc_t * alloc = fd_blockstore_alloc( blockstore );
552 :
553 0 : fd_blockstore_txn_map_t * txn_map = fd_wksp_laddr_fast( wksp, blockstore->txn_map_gaddr );
554 0 : fd_block_t * block = fd_wksp_laddr_fast( wksp, block_map_entry->block_gaddr );
555 :
556 : /* DO THIS FIRST FOR THREAD SAFETY */
557 0 : FD_COMPILER_MFENCE();
558 0 : block_map_entry->block_gaddr = 0;
559 :
560 0 : uchar * data = fd_wksp_laddr_fast( wksp, block->data_gaddr );
561 0 : fd_block_txn_ref_t * txns = fd_wksp_laddr_fast( wksp, block->txns_gaddr );
562 0 : for( ulong j = 0; j < block->txns_cnt; ++j ) {
563 0 : fd_blockstore_txn_key_t sig;
564 0 : fd_memcpy( &sig, data + txns[j].id_off, sizeof( sig ) );
565 0 : fd_blockstore_txn_map_remove( txn_map, &sig );
566 0 : }
567 0 : if( block->micros_gaddr ) fd_alloc_free( alloc, fd_wksp_laddr_fast( wksp, block->micros_gaddr ) );
568 0 : if( block->txns_gaddr ) fd_alloc_free( alloc, txns );
569 0 : ulong mgaddr = block->txns_meta_gaddr;
570 0 : while( mgaddr ) {
571 0 : ulong * laddr = fd_wksp_laddr_fast( wksp, mgaddr );
572 0 : ulong mgaddr2 = laddr[0]; /* link to next allocation */
573 0 : fd_alloc_free( alloc, laddr );
574 0 : mgaddr = mgaddr2;
575 0 : }
576 0 : fd_alloc_free( alloc, block );
577 0 : return;
578 0 : }
579 :
580 : /* Remove all the unassembled shreds for a slot */
581 : int
582 0 : fd_blockstore_buffered_shreds_remove( fd_blockstore_t * blockstore, ulong slot ) {
583 0 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
584 0 : fd_block_map_t * slot_map = fd_wksp_laddr_fast( wksp, blockstore->slot_map_gaddr );
585 0 : fd_block_map_t * block_map_entry = fd_block_map_query( slot_map, &slot, NULL );
586 0 : if( FD_UNLIKELY( !block_map_entry ) ) return FD_BLOCKSTORE_OK;
587 0 : fd_buf_shred_t * shred_pool = fd_blockstore_buf_shred_pool( blockstore );
588 0 : fd_buf_shred_map_t * shred_map = fd_blockstore_buf_shred_map( blockstore );
589 0 : ulong shred_cnt = block_map_entry->complete_idx + 1;
590 0 : for( uint i = 0; i < shred_cnt; i++ ) {
591 0 : fd_shred_key_t key = { .slot = slot, .idx = i };
592 0 : fd_buf_shred_t * ele;
593 0 : while( FD_UNLIKELY(
594 0 : ele = fd_buf_shred_map_ele_remove( shred_map, &key, NULL, shred_pool ) ) )
595 0 : fd_buf_shred_pool_ele_release( shred_pool, ele );
596 0 : }
597 0 : fd_block_map_remove( slot_map, &slot );
598 0 : return FD_BLOCKSTORE_OK;
599 0 : }
600 :
601 : int
602 0 : fd_blockstore_publish( fd_blockstore_t * blockstore, ulong smr ) {
603 0 : long prune_time_ns = -fd_log_wallclock();
604 0 : ulong prune_cnt = 0UL;
605 :
606 0 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
607 0 : ulong * q = fd_wksp_laddr_fast( wksp, blockstore->slot_deque_gaddr );
608 :
609 : /* If root is missing, return an error. */
610 :
611 0 : if( FD_UNLIKELY( !fd_blockstore_block_map_query( blockstore, smr ) ) ) {
612 0 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
613 0 : }
614 :
615 : /* If trying to re-publish current root, return an error. */
616 :
617 0 : if( FD_UNLIKELY( smr == blockstore->smr ) ) {
618 0 : FD_LOG_WARNING(( "[fd_blockstore_publish] attempting to re-publish current blockstore root %lu", blockstore->smr ));
619 0 : return FD_BLOCKSTORE_ERR_UNKNOWN;
620 0 : }
621 :
622 : /* If trying to publish a root older than current, return an error. */
623 :
624 0 : if( FD_UNLIKELY( smr < blockstore->smr ) ) {
625 0 : FD_LOG_WARNING(( "[fd_blockstore_publish] attempting to publish a root older than the current root. new: %lu, curr: %lu", smr, blockstore->smr ));
626 0 : return FD_BLOCKSTORE_ERR_UNKNOWN;
627 0 : }
628 :
629 : /* Clear the deque, preparing it to be reused. */
630 :
631 0 : fd_blockstore_slot_deque_remove_all( q );
632 :
633 : /* Push the root onto the queue. */
634 :
635 0 : fd_blockstore_slot_deque_push_tail( q, blockstore->smr );
636 :
637 : /* Conduct a BFS, stopping the search at the new root. */
638 :
639 0 : while( !fd_blockstore_slot_deque_empty( q ) ) {
640 0 : ulong slot = fd_blockstore_slot_deque_pop_head( q );
641 :
642 0 : fd_block_map_t * block_map_entry = fd_blockstore_block_map_query( blockstore, slot );
643 :
644 : /* Add slot's children to the queue. */
645 :
646 0 : for( ulong i = 0; i < block_map_entry->child_slot_cnt; i++ ) {
647 0 : if( FD_LIKELY( block_map_entry->child_slots[i] != smr ) ) {
648 0 : fd_blockstore_slot_deque_push_tail( q, block_map_entry->child_slots[i] );
649 0 : }
650 0 : }
651 :
652 0 : if( !fd_uchar_extract_bit( block_map_entry->flags, FD_BLOCK_FLAG_FINALIZED ) ) {
653 : /* Remove the slot only if it is not finalized. */
654 :
655 0 : FD_LOG_NOTICE(( "[%s] pruning slot %lu", __func__, slot ));
656 0 : fd_blockstore_slot_remove( blockstore, slot );
657 0 : prune_cnt++;
658 0 : }
659 0 : }
660 :
661 0 : prune_time_ns += fd_log_wallclock();
662 :
663 0 : FD_LOG_NOTICE( ( "[fd_blockstore_publish] new root: %lu, old root: %lu, prune cnt: %lu, block cnt: %lu, took: %6.6f ms",
664 0 : smr,
665 0 : blockstore->smr,
666 0 : prune_cnt,
667 0 : fd_block_map_key_cnt( fd_blockstore_block_map( blockstore ) ),
668 0 : (double)prune_time_ns * 1e-6 ) );
669 :
670 0 : blockstore->smr = smr;
671 :
672 0 : return FD_BLOCKSTORE_OK;
673 0 : }
674 :
675 : /* Deshred into a block once we've received all shreds for a slot. */
676 :
677 : static int
678 0 : deshred( fd_blockstore_t * blockstore, ulong slot ) {
679 0 : FD_LOG_DEBUG(( "[%s] slot %lu", __func__, slot ));
680 :
681 0 : fd_block_map_t * block_map_entry = fd_blockstore_block_map_query( blockstore, slot );
682 0 : FD_TEST( block_map_entry->block_gaddr == 0 ); /* FIXME duplicate blocks are not supported */
683 :
684 0 : block_map_entry->ts = fd_log_wallclock();
685 :
686 :
687 0 : fd_buf_shred_t * shred_pool = fd_blockstore_buf_shred_pool( blockstore );
688 0 : fd_buf_shred_map_t * shred_map = fd_blockstore_buf_shred_map( blockstore );
689 :
690 0 : ulong block_sz = 0;
691 0 : ulong shred_cnt = block_map_entry->complete_idx + 1;
692 0 : for( uint idx = 0; idx < shred_cnt; idx++ ) {
693 0 : fd_shred_key_t key = { .slot = slot, .idx = idx };
694 0 : fd_buf_shred_t const * query = fd_buf_shred_map_ele_query_const( shred_map, &key, NULL, shred_pool );
695 0 : if( FD_UNLIKELY( !query ) ) {
696 0 : FD_LOG_ERR(( "[%s] missing shred slot: %lu idx: %u while deshredding", __func__, slot, idx ));
697 0 : }
698 0 : block_sz += fd_shred_payload_sz( &query->hdr );
699 0 : }
700 :
701 : // alloc mem for the block
702 0 : ulong data_off = fd_ulong_align_up( sizeof(fd_block_t), 128UL );
703 0 : ulong shred_off = fd_ulong_align_up( data_off + block_sz, alignof( fd_block_shred_t ) );
704 0 : ulong tot_sz = shred_off + sizeof( fd_block_shred_t ) * shred_cnt;
705 :
706 0 : fd_alloc_t * alloc = fd_blockstore_alloc( blockstore );
707 0 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
708 0 : fd_block_t * block = fd_alloc_malloc( alloc, 128UL, tot_sz );
709 0 : if( FD_UNLIKELY( block == NULL ) ) {
710 0 : return FD_BLOCKSTORE_ERR_SLOT_FULL;
711 0 : }
712 :
713 0 : fd_memset( block, 0, sizeof(fd_block_t) );
714 :
715 0 : uchar * data_laddr = (uchar *)((ulong)block + data_off);
716 0 : block->data_gaddr = fd_wksp_gaddr_fast( wksp, data_laddr );
717 0 : block->data_sz = block_sz;
718 0 : fd_block_shred_t * shreds_laddr = (fd_block_shred_t *)((ulong)block + shred_off);
719 0 : block->shreds_gaddr = fd_wksp_gaddr_fast( wksp, shreds_laddr );
720 0 : block->shreds_cnt = shred_cnt;
721 :
722 : /* deshred the shreds into the block mem */
723 0 : fd_deshredder_t deshredder = { 0 };
724 0 : fd_shred_t const * shreds[1] = { 0 };
725 0 : fd_deshredder_init( &deshredder, data_laddr, block->data_sz, shreds, 0 );
726 0 : long rc = -FD_SHRED_EPIPE;
727 0 : ulong off = 0;
728 0 : for( uint i = 0; i < shred_cnt; i++ ) {
729 : // TODO can do this in one iteration with block sz loop... massage with deshredder API
730 0 : fd_shred_key_t key = { .slot = slot, .idx = i };
731 0 : fd_buf_shred_t const * query =
732 0 : fd_buf_shred_map_ele_query_const( shred_map, &key, NULL, shred_pool );
733 0 : if( FD_UNLIKELY( !query ) ) FD_LOG_ERR( ( "missing shred idx %u during deshred. slot %lu.", i, slot ) );
734 0 : fd_shred_t const * shred = &query->hdr;
735 0 : deshredder.shreds = &shred;
736 0 : deshredder.shred_cnt = 1;
737 0 : rc = fd_deshredder_next( &deshredder );
738 0 : FD_TEST( rc >= 0 );
739 :
740 0 : shreds_laddr[i].hdr = *shred;
741 0 : ulong merkle_sz = shreds_laddr[i].merkle_sz = fd_shred_merkle_sz( shred->variant );
742 0 : FD_TEST( merkle_sz <= sizeof(shreds_laddr[i].merkle) );
743 0 : if( merkle_sz ) {
744 0 : fd_memcpy( shreds_laddr[i].merkle, (uchar const*)shred + fd_shred_merkle_off( shred ), merkle_sz );
745 0 : }
746 0 : shreds_laddr[i].off = off;
747 :
748 0 : FD_TEST( !memcmp( &shreds_laddr[i].hdr, shred, sizeof( fd_shred_t ) ) );
749 0 : FD_TEST( !memcmp( data_laddr + shreds_laddr[i].off,
750 0 : fd_shred_data_payload( shred ),
751 0 : fd_shred_payload_sz( shred ) ) );
752 :
753 0 : off += fd_shred_payload_sz( shred );
754 0 : fd_buf_shred_t * ele = NULL;
755 0 : while( FD_UNLIKELY( ele = fd_buf_shred_map_ele_remove( shred_map, &key, NULL, shred_pool ) ) ) {
756 0 : fd_buf_shred_pool_ele_release( shred_pool, ele );
757 0 : }
758 0 : }
759 :
760 : /* deshredder error handling */
761 0 : int err;
762 0 : switch( rc ) {
763 0 : case -FD_SHRED_EINVAL:
764 0 : err = FD_BLOCKSTORE_ERR_SHRED_INVALID;
765 0 : goto fail_deshred;
766 0 : case -FD_SHRED_ENOMEM:
767 0 : FD_LOG_ERR(
768 0 : ( "should have alloc'd enough memory above. likely indicates memory corruption." ) );
769 0 : }
770 :
771 0 : switch( deshredder.result ) {
772 0 : case FD_SHRED_ESLOT:
773 0 : fd_blockstore_scan_block( blockstore, slot, block );
774 :
775 : /* Do this last when it's safe */
776 0 : FD_COMPILER_MFENCE();
777 0 : block_map_entry->block_gaddr = fd_wksp_gaddr_fast( wksp, block );
778 0 : fd_block_micro_t * micros = fd_wksp_laddr_fast( wksp, block->micros_gaddr );
779 0 : uchar * data = fd_wksp_laddr_fast( wksp, block->data_gaddr );
780 0 : fd_microblock_hdr_t * last_micro = (fd_microblock_hdr_t *)( data +
781 0 : micros[block->micros_cnt - 1].off );
782 0 : memcpy( &block_map_entry->block_hash, last_micro->hash, sizeof( fd_hash_t ) );
783 :
784 0 : block_map_entry->flags = fd_uchar_clear_bit( block_map_entry->flags, FD_BLOCK_FLAG_SHREDDING );
785 0 : block_map_entry->flags = fd_uchar_set_bit( block_map_entry->flags, FD_BLOCK_FLAG_COMPLETED );
786 :
787 0 : if( slot < blockstore->min ) {
788 0 : blockstore->min = slot;
789 0 : }
790 0 : if( slot > blockstore->max ) {
791 0 : blockstore->max = blockstore->hcs = slot;
792 0 : }
793 :
794 0 : return FD_BLOCKSTORE_OK;
795 0 : case FD_SHRED_EBATCH:
796 0 : case FD_SHRED_EPIPE:
797 0 : FD_LOG_WARNING( ( "deshredding slot %lu produced invalid block", slot ) );
798 0 : err = FD_BLOCKSTORE_ERR_DESHRED_INVALID;
799 0 : goto fail_deshred;
800 0 : case FD_SHRED_EINVAL:
801 0 : err = FD_BLOCKSTORE_ERR_SHRED_INVALID;
802 0 : goto fail_deshred;
803 0 : case FD_SHRED_ENOMEM:
804 0 : err = FD_BLOCKSTORE_ERR_NO_MEM;
805 0 : goto fail_deshred;
806 0 : default:
807 0 : err = FD_BLOCKSTORE_ERR_UNKNOWN;
808 0 : }
809 :
810 0 : fail_deshred:
811 : /* We failed to deshred the block. Throw it away, and try again from scratch. */
812 0 : FD_LOG_WARNING(( "[%s] failed to deshred slot %lu. err: %d", __func__, slot, err ));
813 0 : fd_alloc_free( alloc, block );
814 0 : fd_blockstore_slot_remove( blockstore, slot );
815 0 : for( uint i = 0; i < shred_cnt; i++ ) {
816 0 : fd_shred_key_t key = { .slot = slot, .idx = i };
817 0 : fd_buf_shred_map_ele_remove( shred_map, &key, NULL, shred_pool );
818 0 : }
819 0 : return err;
820 0 : }
821 :
822 : int
823 0 : fd_buf_shred_insert( fd_blockstore_t * blockstore, fd_shred_t const * shred ) {
824 0 : FD_LOG_DEBUG(( "[%s] slot %lu idx %u", __func__, shred->slot, shred->idx ));
825 :
826 : /* Check this shred > SMR. We ignore shreds before the SMR because by
827 : it is invariant that we must have a connected, linear chain for the
828 : SMR and its ancestors. */
829 :
830 0 : if( FD_UNLIKELY( shred->slot <= blockstore->smr ) ) {
831 0 : return FD_BLOCKSTORE_OK;
832 0 : }
833 :
834 : /* Check if we already have this shred */
835 :
836 0 : fd_buf_shred_t * shred_pool = fd_blockstore_buf_shred_pool( blockstore );
837 0 : fd_buf_shred_map_t * shred_map = fd_blockstore_buf_shred_map( blockstore );
838 0 : fd_shred_key_t shred_key = { .slot = shred->slot, .idx = shred->idx };
839 0 : fd_buf_shred_t * shred_ = fd_buf_shred_map_ele_query( shred_map, &shred_key, NULL, shred_pool );
840 0 : if( FD_UNLIKELY( shred_ ) ) {
841 :
842 : /* FIXME we currently cannot handle equivocating shreds. */
843 :
844 0 : if( FD_UNLIKELY( is_eqvoc_shred( &shred_->hdr, shred ) ) ) {
845 0 : FD_LOG_WARNING(( "equivocating shred detected %lu %u. halting.", shred->slot, shred->idx ));
846 0 : return FD_BLOCKSTORE_OK;
847 0 : }
848 :
849 : /* Short-circuit if we already have the shred. */
850 :
851 0 : return FD_BLOCKSTORE_OK;
852 0 : }
853 :
854 : /* Insert the shred */
855 :
856 0 : if( FD_UNLIKELY( !fd_buf_shred_pool_free( shred_pool ) ) ) {
857 0 : FD_LOG_ERR(( "shred pool is full. halting." ));
858 0 : }
859 0 : fd_buf_shred_t * ele = fd_buf_shred_pool_ele_acquire( shred_pool ); /* always non-NULL */
860 0 : ele->key = shred_key;
861 0 : ele->hdr = *shred;
862 0 : fd_memcpy( &ele->raw, shred, fd_shred_sz( shred ) );
863 0 : fd_buf_shred_map_ele_insert( shred_map, ele, shred_pool ); /* always non-NULL */
864 :
865 : /* Update shred's associated slot meta */
866 :
867 0 : ulong slot = shred->slot;
868 0 : fd_block_map_t * block_map = fd_blockstore_block_map( blockstore );
869 0 : fd_block_map_t * block_map_entry = fd_block_map_query( block_map, &slot, NULL );
870 0 : if( FD_UNLIKELY( !block_map_entry ) ) {
871 :
872 0 : if( FD_UNLIKELY( fd_block_map_key_cnt( block_map ) == fd_block_map_key_max( block_map ) ) ) {
873 :
874 0 : if( FD_UNLIKELY( blockstore->min == blockstore->smr ) ) {
875 0 : FD_LOG_ERR(( "[%s] blockstore->min %lu is smr %lu. unable to evict full blockstore.", __func__, blockstore->min, blockstore->smr ));
876 0 : }
877 :
878 : /* If block_map is full, evict everything through the SMR. */
879 :
880 0 : for( ulong slot = blockstore->min; slot < blockstore->smr; slot++ ) {
881 0 : FD_LOG_NOTICE(("[%s] evicting slot %lu", __func__, slot ));
882 0 : fd_blockstore_slot_remove( blockstore, slot );
883 0 : }
884 0 : }
885 :
886 : /* Try to insert slot into block_map */
887 :
888 0 : block_map_entry = fd_block_map_insert( block_map, &slot );
889 0 : if( FD_UNLIKELY( !block_map_entry ) ) return FD_BLOCKSTORE_ERR_SLOT_FULL;
890 :
891 : /* Initialize the block_map_entry. Note some fields are initialized
892 : to dummy values because we do not have all the necessary metadata
893 : yet. */
894 :
895 0 : block_map_entry->slot = block_map_entry->slot;
896 :
897 0 : block_map_entry->parent_slot = shred->slot - shred->data.parent_off;
898 0 : memset( block_map_entry->child_slots,
899 0 : UCHAR_MAX,
900 0 : FD_BLOCKSTORE_CHILD_SLOT_MAX * sizeof( ulong ) );
901 0 : block_map_entry->child_slot_cnt = 0;
902 :
903 0 : block_map_entry->height = 0;
904 0 : block_map_entry->block_hash = ( fd_hash_t ){ 0 };
905 0 : block_map_entry->bank_hash = ( fd_hash_t ){ 0 };
906 0 : block_map_entry->flags = fd_uchar_set_bit( 0, FD_BLOCK_FLAG_SHREDDING );
907 0 : block_map_entry->ts = 0;
908 0 : block_map_entry->reference_tick = (uchar)( (int)shred->data.flags &
909 0 : (int)FD_SHRED_DATA_REF_TICK_MASK );
910 0 : block_map_entry->consumed_idx = UINT_MAX;
911 0 : block_map_entry->received_idx = 0;
912 0 : block_map_entry->complete_idx = UINT_MAX;
913 :
914 0 : block_map_entry->block_gaddr = 0;
915 0 : }
916 :
917 0 : FD_LOG_DEBUG( ( "slot_meta->consumed_idx: %u, shred->slot: %lu, slot_meta->received_idx: %u, "
918 0 : "shred->idx: %u, shred->complete_idx: %u",
919 0 : block_map_entry->consumed_idx,
920 0 : shred->slot,
921 0 : block_map_entry->received_idx,
922 0 : shred->idx,
923 0 : block_map_entry->complete_idx ) );
924 :
925 : /* Update shred windowing metadata: consumed, received, shred_cnt */
926 :
927 0 : while( fd_buf_shred_query( blockstore, shred->slot, (uint)( block_map_entry->consumed_idx + 1U ) ) ) {
928 0 : block_map_entry->consumed_idx++;
929 0 : }
930 0 : block_map_entry->received_idx = fd_uint_max( block_map_entry->received_idx, shred->idx + 1 );
931 0 : if( shred->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE ) block_map_entry->complete_idx = shred->idx;
932 :
933 : /* update ancestry metadata: parent_slot, is_connected, next_slot */
934 :
935 0 : fd_block_map_t * parent_block_map_entry =
936 0 : fd_blockstore_block_map_query( blockstore, block_map_entry->parent_slot );
937 :
938 : /* Add this slot to its parent's child slots if not already there. */
939 :
940 0 : if( FD_LIKELY( parent_block_map_entry ) ) {
941 0 : int found = 0;
942 0 : for( ulong i = 0; i < parent_block_map_entry->child_slot_cnt; i++ ) {
943 0 : if( FD_LIKELY( parent_block_map_entry->child_slots[i] == slot ) ) {
944 0 : found = 1;
945 0 : }
946 0 : }
947 0 : if( FD_UNLIKELY( !found ) ) {
948 0 : if( parent_block_map_entry->child_slot_cnt == FD_BLOCKSTORE_CHILD_SLOT_MAX ) {
949 0 : FD_LOG_ERR( ( "failed to add slot %lu to parent %lu's children. exceeding child slot max",
950 0 : slot,
951 0 : parent_block_map_entry->slot ) );
952 0 : }
953 0 : parent_block_map_entry->child_slots[parent_block_map_entry->child_slot_cnt++] = slot;
954 0 : }
955 0 : }
956 :
957 0 : if( FD_LIKELY( block_map_entry->consumed_idx == UINT_MAX ||
958 0 : block_map_entry->consumed_idx != block_map_entry->complete_idx ) ) {
959 0 : return FD_BLOCKSTORE_OK;
960 0 : }
961 :
962 : /* Received all shreds, so try to assemble a block. */
963 0 : FD_LOG_DEBUG( ( "received all shreds for slot %lu - now building a block", shred->slot ) );
964 :
965 0 : int rc = deshred( blockstore, shred->slot );
966 0 : switch( rc ) {
967 0 : case FD_BLOCKSTORE_OK:
968 0 : return FD_BLOCKSTORE_OK_SLOT_COMPLETE;
969 0 : case FD_BLOCKSTORE_ERR_SLOT_FULL:
970 0 : FD_LOG_DEBUG( ( "already deshredded slot %lu. ignoring.", shred->slot ) );
971 0 : return FD_BLOCKSTORE_OK;
972 0 : case FD_BLOCKSTORE_ERR_DESHRED_INVALID:
973 0 : FD_LOG_DEBUG( ( "failed to deshred slot %lu. ignoring.", shred->slot ) );
974 0 : return FD_BLOCKSTORE_OK;
975 0 : default:
976 : /* FIXME */
977 0 : FD_LOG_ERR( ( "deshred err %d", rc ) );
978 0 : }
979 0 : }
980 :
981 : fd_shred_t *
982 0 : fd_buf_shred_query( fd_blockstore_t * blockstore, ulong slot, uint shred_idx ) {
983 0 : fd_buf_shred_t * shred_pool = fd_blockstore_buf_shred_pool( blockstore );
984 0 : fd_buf_shred_map_t * shred_map = fd_blockstore_buf_shred_map( blockstore );
985 0 : fd_shred_key_t key = { .slot = slot, .idx = shred_idx };
986 0 : fd_buf_shred_t * query =
987 0 : fd_buf_shred_map_ele_query( shred_map, &key, NULL, shred_pool );
988 0 : if( FD_UNLIKELY( !query ) ) return NULL;
989 0 : return &query->hdr;
990 0 : }
991 :
992 : long
993 0 : fd_buf_shred_query_copy_data( fd_blockstore_t * blockstore, ulong slot, uint shred_idx, void * buf, ulong buf_max ) {
994 0 : if( buf_max < FD_SHRED_MAX_SZ ) return -1;
995 :
996 0 : fd_buf_shred_t * shred_pool = fd_blockstore_buf_shred_pool( blockstore );
997 0 : fd_buf_shred_map_t * shred_map = fd_blockstore_buf_shred_map( blockstore );
998 0 : fd_shred_key_t key = { .slot = slot, .idx = shred_idx };
999 0 : fd_buf_shred_t * shred =
1000 0 : fd_buf_shred_map_ele_query( shred_map, &key, NULL, shred_pool );
1001 0 : if( shred ) {
1002 0 : ulong sz = fd_shred_sz( &shred->hdr );
1003 0 : if( sz > buf_max ) return -1;
1004 0 : fd_memcpy( buf, shred->raw, sz);
1005 0 : return (long)sz;
1006 0 : }
1007 :
1008 0 : fd_block_map_t * query =
1009 0 : fd_block_map_query( fd_blockstore_block_map( blockstore ), &slot, NULL );
1010 0 : if( FD_UNLIKELY( !query || query->block_gaddr == 0 ) ) return -1;
1011 0 : if( shred_idx > query->complete_idx ) return -1;
1012 0 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
1013 0 : fd_block_t * blk = fd_wksp_laddr_fast( wksp, query->block_gaddr );
1014 0 : fd_block_shred_t * shreds = fd_wksp_laddr_fast( wksp, blk->shreds_gaddr );
1015 0 : ulong sz = fd_shred_payload_sz( &shreds[shred_idx].hdr );
1016 0 : if( FD_SHRED_DATA_HEADER_SZ + sz > buf_max ) return -1L;
1017 0 : fd_memcpy( buf, &shreds[shred_idx].hdr, FD_SHRED_DATA_HEADER_SZ );
1018 0 : fd_memcpy( (uchar*)buf + FD_SHRED_DATA_HEADER_SZ, (uchar*)fd_wksp_laddr_fast( wksp, blk->data_gaddr ) + shreds[shred_idx].off, sz );
1019 0 : ulong tot_sz = FD_SHRED_DATA_HEADER_SZ + sz;
1020 0 : ulong merkle_sz = shreds[shred_idx].merkle_sz;
1021 0 : if( merkle_sz ) {
1022 0 : if( tot_sz + merkle_sz > buf_max ) return -1;
1023 0 : fd_memcpy( (uchar*)buf + tot_sz, shreds[shred_idx].merkle, merkle_sz );
1024 0 : tot_sz += merkle_sz;
1025 0 : }
1026 0 : if( tot_sz >= FD_SHRED_MIN_SZ ) return (long)tot_sz;
1027 : /* Zero pad */
1028 0 : fd_memset( (uchar*)buf + tot_sz, 0, FD_SHRED_MIN_SZ - tot_sz );
1029 0 : return (long)FD_SHRED_MIN_SZ;
1030 0 : }
1031 :
1032 : fd_block_t *
1033 0 : fd_blockstore_block_query( fd_blockstore_t * blockstore, ulong slot ) {
1034 0 : fd_block_map_t * query =
1035 0 : fd_block_map_query( fd_blockstore_block_map( blockstore ), &slot, NULL );
1036 0 : if( FD_UNLIKELY( !query || query->block_gaddr == 0 ) ) return NULL;
1037 0 : return fd_wksp_laddr_fast( fd_blockstore_wksp( blockstore ), query->block_gaddr );
1038 0 : }
1039 :
1040 : fd_hash_t const *
1041 0 : fd_blockstore_block_hash_query( fd_blockstore_t * blockstore, ulong slot ) {
1042 0 : fd_block_map_t * query =
1043 0 : fd_block_map_query( fd_blockstore_block_map( blockstore ), &slot, NULL );
1044 0 : if( FD_UNLIKELY( !query || query->block_gaddr == 0 ) ) return NULL;
1045 0 : return &query->block_hash;
1046 0 : }
1047 :
1048 : fd_hash_t const *
1049 0 : fd_blockstore_bank_hash_query( fd_blockstore_t * blockstore, ulong slot ) {
1050 0 : fd_block_map_t * block_map_entry = fd_blockstore_block_map_query( blockstore, slot );
1051 0 : if( FD_UNLIKELY( !block_map_entry ) ) return NULL;
1052 0 : return &block_map_entry->bank_hash;
1053 0 : }
1054 :
1055 : fd_block_map_t *
1056 0 : fd_blockstore_block_map_query( fd_blockstore_t * blockstore, ulong slot ) {
1057 0 : return fd_block_map_query( fd_blockstore_block_map( blockstore ), &slot, NULL );
1058 0 : }
1059 :
1060 : ulong
1061 0 : fd_blockstore_parent_slot_query( fd_blockstore_t * blockstore, ulong slot ) {
1062 0 : fd_block_map_t * query = fd_blockstore_block_map_query( blockstore, slot );
1063 0 : if( FD_UNLIKELY( !query ) ) return FD_SLOT_NULL;
1064 0 : return query->parent_slot;
1065 0 : }
1066 :
1067 : int
1068 0 : fd_blockstore_child_slots_query( fd_blockstore_t * blockstore, ulong slot, ulong ** slots_out, ulong * slot_cnt_out ) {
1069 0 : fd_block_map_t * query = fd_blockstore_block_map_query( blockstore, slot );
1070 0 : if( FD_UNLIKELY( !query ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1071 0 : *slots_out = query->child_slots;
1072 0 : *slot_cnt_out = query->child_slot_cnt;
1073 0 : return FD_BLOCKSTORE_OK;
1074 0 : }
1075 :
1076 : int
1077 0 : fd_blockstore_block_data_query_volatile( fd_blockstore_t * blockstore, ulong slot, fd_block_map_t * block_map_entry_out, fd_block_rewards_t * rewards_out, fd_hash_t * parent_block_hash_out, fd_valloc_t alloc, uchar ** block_data_out, ulong * block_data_out_sz ) {
1078 : /* WARNING: this code is extremely delicate. Do NOT modify without
1079 : understanding all the invariants. In particular, we must never
1080 : dereference through a corrupt pointer. It's OK for the
1081 : destination data to be overwritten/invalid as long as the memory
1082 : location is valid. As long as we don't crash, we can validate the
1083 : data after it is read. */
1084 0 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
1085 0 : fd_block_map_t const * block_map = fd_wksp_laddr_fast( wksp, blockstore->slot_map_gaddr );
1086 0 : for(;;) {
1087 0 : uint seqnum;
1088 0 : if( FD_UNLIKELY( fd_rwseq_start_concur_read( &blockstore->lock, &seqnum ) ) ) continue;
1089 :
1090 0 : fd_block_map_t const * query = fd_block_map_query_safe( block_map, &slot, NULL );
1091 0 : if( FD_UNLIKELY( !query ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1092 0 : memcpy( block_map_entry_out, query, sizeof( fd_block_map_t ) );
1093 0 : ulong blk_gaddr = query->block_gaddr;
1094 0 : if( FD_UNLIKELY( !blk_gaddr ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1095 :
1096 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) continue;
1097 :
1098 0 : fd_block_t * blk = fd_wksp_laddr_fast( wksp, blk_gaddr );
1099 0 : if( rewards_out ) memcpy( rewards_out, &blk->rewards, sizeof(fd_block_rewards_t) );
1100 0 : ulong blk_data_gaddr = blk->data_gaddr;
1101 0 : if( FD_UNLIKELY( !blk_data_gaddr ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1102 0 : ulong sz = *block_data_out_sz = blk->data_sz;
1103 0 : if( sz >= FD_SHRED_MAX_PER_SLOT * FD_SHRED_MAX_SZ ) continue;
1104 :
1105 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) continue;
1106 :
1107 0 : uchar * data_out = fd_valloc_malloc( alloc, 128UL, sz );
1108 0 : if( FD_UNLIKELY( data_out == NULL ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1109 0 : fd_memcpy( data_out, fd_wksp_laddr_fast( wksp, blk_data_gaddr ), sz );
1110 :
1111 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) {
1112 0 : fd_valloc_free( alloc, data_out );
1113 0 : continue;
1114 0 : }
1115 :
1116 0 : *block_data_out = data_out;
1117 :
1118 0 : if( parent_block_hash_out ) {
1119 0 : if( ( query = fd_block_map_query_safe( block_map, &block_map_entry_out->parent_slot, NULL ) ) == NULL ) {
1120 0 : memset( parent_block_hash_out, 0, sizeof(fd_hash_t) );
1121 0 : } else {
1122 0 : fd_memcpy( parent_block_hash_out, query->block_hash.uc, sizeof(fd_hash_t) );
1123 :
1124 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) {
1125 0 : fd_valloc_free( alloc, data_out );
1126 0 : continue;
1127 0 : }
1128 0 : }
1129 0 : }
1130 :
1131 0 : return FD_BLOCKSTORE_OK;
1132 0 : }
1133 0 : }
1134 :
1135 : int
1136 0 : fd_blockstore_block_map_query_volatile( fd_blockstore_t * blockstore, ulong slot, fd_block_map_t * block_map_entry_out ) {
1137 : /* WARNING: this code is extremely delicate. Do NOT modify without
1138 : understanding all the invariants. In particular, we must never
1139 : dereference through a corrupt pointer. It's OK for the
1140 : destination data to be overwritten/invalid as long as the memory
1141 : location is valid. As long as we don't crash, we can validate the
1142 : data after it is read. */
1143 0 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
1144 0 : fd_block_map_t const * slot_map = fd_wksp_laddr_fast( wksp, blockstore->slot_map_gaddr );
1145 0 : for(;;) {
1146 0 : uint seqnum;
1147 0 : if( FD_UNLIKELY( fd_rwseq_start_concur_read( &blockstore->lock, &seqnum ) ) ) continue;
1148 0 : fd_block_map_t const * query = fd_block_map_query_safe( slot_map, &slot, NULL );
1149 0 : if( FD_UNLIKELY( !query ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1150 0 : memcpy( block_map_entry_out, query, sizeof( fd_block_map_t ) );
1151 0 : ulong blk_gaddr = query->block_gaddr;
1152 0 : if( FD_UNLIKELY( !blk_gaddr ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1153 :
1154 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) continue;
1155 :
1156 0 : return FD_BLOCKSTORE_OK;
1157 0 : }
1158 0 : }
1159 :
1160 : fd_blockstore_txn_map_t *
1161 0 : fd_blockstore_txn_query( fd_blockstore_t * blockstore, uchar const sig[FD_ED25519_SIG_SZ] ) {
1162 0 : fd_blockstore_txn_key_t key;
1163 0 : fd_memcpy( &key, sig, sizeof( key ) );
1164 0 : return fd_blockstore_txn_map_query(
1165 0 : fd_wksp_laddr_fast( fd_blockstore_wksp( blockstore ), blockstore->txn_map_gaddr ),
1166 0 : &key,
1167 0 : NULL );
1168 0 : }
1169 :
1170 : int
1171 0 : fd_blockstore_txn_query_volatile( fd_blockstore_t * blockstore, uchar const sig[FD_ED25519_SIG_SZ], fd_blockstore_txn_map_t * txn_out, long * blk_ts, uchar * blk_flags, uchar txn_data_out[FD_TXN_MTU] ) {
1172 : /* WARNING: this code is extremely delicate. Do NOT modify without
1173 : understanding all the invariants. In particular, we must never
1174 : dereference through a corrupt pointer. It's OK for the
1175 : destination data to be overwritten/invalid as long as the memory
1176 : location is valid. As long as we don't crash, we can validate the
1177 : data after it is read. */
1178 0 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
1179 0 : fd_block_map_t const * slot_map = fd_wksp_laddr_fast( wksp, blockstore->slot_map_gaddr );
1180 0 : fd_blockstore_txn_map_t * txn_map = fd_wksp_laddr_fast( wksp, blockstore->txn_map_gaddr );
1181 0 : for(;;) {
1182 0 : uint seqnum;
1183 0 : if( FD_UNLIKELY( fd_rwseq_start_concur_read( &blockstore->lock, &seqnum ) ) ) continue;
1184 :
1185 0 : fd_blockstore_txn_key_t key;
1186 0 : fd_memcpy( &key, sig, sizeof( key ) );
1187 0 : fd_blockstore_txn_map_t const * txn_map_entry = fd_blockstore_txn_map_query_safe( txn_map, &key, NULL );
1188 0 : if( FD_UNLIKELY( txn_map_entry == NULL ) ) return FD_BLOCKSTORE_ERR_TXN_MISSING;
1189 0 : fd_memcpy( txn_out, txn_map_entry, sizeof(fd_blockstore_txn_map_t) );
1190 :
1191 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) continue;
1192 :
1193 0 : fd_block_map_t const * query = fd_block_map_query_safe( slot_map, &txn_out->slot, NULL );
1194 0 : if( FD_UNLIKELY( !query ) ) return FD_BLOCKSTORE_ERR_TXN_MISSING;
1195 0 : ulong blk_gaddr = query->block_gaddr;
1196 0 : if( FD_UNLIKELY( !blk_gaddr ) ) return FD_BLOCKSTORE_ERR_TXN_MISSING;
1197 :
1198 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) continue;
1199 :
1200 0 : fd_block_t * blk = fd_wksp_laddr_fast( wksp, blk_gaddr );
1201 0 : if( blk_ts ) *blk_ts = query->ts;
1202 0 : if( blk_flags ) *blk_flags = query->flags;
1203 0 : ulong ptr = blk->data_gaddr;
1204 0 : ulong sz = blk->data_sz;
1205 0 : if( txn_out->offset + txn_out->sz > sz || txn_out->sz > FD_TXN_MTU ) continue;
1206 :
1207 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) continue;
1208 :
1209 0 : if( txn_data_out == NULL ) return FD_BLOCKSTORE_OK;
1210 0 : uchar const * data = fd_wksp_laddr_fast( wksp, ptr );
1211 0 : fd_memcpy( txn_data_out, data + txn_out->offset, txn_out->sz );
1212 :
1213 0 : if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->lock, seqnum ) ) ) continue;
1214 :
1215 0 : return FD_BLOCKSTORE_OK;
1216 0 : }
1217 0 : }
1218 :
1219 : void
1220 0 : fd_blockstore_block_height_update( fd_blockstore_t * blockstore, ulong slot, ulong height ) {
1221 0 : fd_block_map_t * query = fd_blockstore_block_map_query( blockstore, slot );
1222 0 : if( FD_LIKELY( query )) query->height = height;
1223 0 : }
1224 :
1225 : void
1226 0 : fd_blockstore_log_block_status( fd_blockstore_t * blockstore, ulong around_slot ) {
1227 0 : for( ulong i = around_slot - 5; i < around_slot + 20; ++i ) {
1228 0 : fd_block_map_t * slot_entry =
1229 0 : fd_block_map_query( fd_blockstore_block_map( blockstore ), &i, NULL );
1230 0 : if( !slot_entry ) continue;
1231 0 : FD_LOG_NOTICE( ( "%sslot=%lu received=%u consumed=%u finished=%u",
1232 0 : ( i == around_slot ? "*" : " " ),
1233 0 : i,
1234 0 : slot_entry->received_idx,
1235 0 : slot_entry->consumed_idx,
1236 0 : slot_entry->complete_idx ) );
1237 0 : }
1238 0 : }
1239 :
1240 : static char *
1241 0 : fd_smart_size( ulong sz, char * tmp, size_t tmpsz ) {
1242 0 : if( sz <= (1UL<<7) )
1243 0 : snprintf( tmp, tmpsz, "%lu B", sz );
1244 0 : else if( sz <= (1UL<<17) )
1245 0 : snprintf( tmp, tmpsz, "%.3f KB", ((double)sz/((double)(1UL<<10))) );
1246 0 : else if( sz <= (1UL<<27) )
1247 0 : snprintf( tmp, tmpsz, "%.3f MB", ((double)sz/((double)(1UL<<20))) );
1248 0 : else
1249 0 : snprintf( tmp, tmpsz, "%.3f GB", ((double)sz/((double)(1UL<<30))) );
1250 0 : return tmp;
1251 0 : }
1252 :
1253 : void
1254 0 : fd_blockstore_log_mem_usage( fd_blockstore_t * blockstore ) {
1255 0 : char tmp1[100];
1256 0 : char tmp2[100];
1257 0 : char tmp3[100];
1258 :
1259 0 : FD_LOG_NOTICE(( "blockstore base footprint: %s",
1260 0 : fd_smart_size( fd_blockstore_footprint(), tmp1, sizeof(tmp1) ) ));
1261 0 : fd_buf_shred_t * shred_pool = fd_blockstore_buf_shred_pool( blockstore );
1262 0 : ulong shred_used = fd_buf_shred_pool_used( shred_pool );
1263 0 : ulong shred_max = fd_buf_shred_pool_max( shred_pool );
1264 0 : FD_LOG_NOTICE(( "shred pool footprint: %s (%lu entries used out of %lu, %lu%%)",
1265 0 : fd_smart_size( fd_buf_shred_pool_footprint( shred_max ), tmp1, sizeof(tmp1) ),
1266 0 : shred_used,
1267 0 : shred_max,
1268 0 : (100U*shred_used) / shred_max ));
1269 0 : fd_buf_shred_map_t * shred_map = fd_blockstore_buf_shred_map( blockstore );
1270 0 : ulong shred_map_cnt = fd_buf_shred_map_chain_cnt( shred_map );
1271 0 : FD_LOG_NOTICE(( "shred map footprint: %s (%lu chains, load is %.3f)",
1272 0 : fd_smart_size( fd_buf_shred_map_footprint( shred_map_cnt ), tmp1, sizeof(tmp1) ),
1273 0 : shred_map_cnt,
1274 0 : ((double)shred_used)/((double)shred_map_cnt) ));
1275 0 : fd_block_map_t * slot_map = fd_blockstore_block_map( blockstore );
1276 0 : ulong slot_map_cnt = fd_block_map_key_cnt( slot_map );
1277 0 : ulong slot_map_max = fd_block_map_key_max( slot_map );
1278 0 : FD_LOG_NOTICE(( "slot map footprint: %s (%lu entries used out of %lu, %lu%%)",
1279 0 : fd_smart_size( fd_block_map_footprint( slot_map_max ), tmp1, sizeof(tmp1) ),
1280 0 : slot_map_cnt,
1281 0 : slot_map_max,
1282 0 : (100U*slot_map_cnt)/slot_map_max ));
1283 0 : fd_blockstore_txn_map_t * txn_map = fd_blockstore_txn_map( blockstore );
1284 0 : ulong txn_map_cnt = fd_blockstore_txn_map_key_cnt( txn_map );
1285 0 : ulong txn_map_max = fd_blockstore_txn_map_key_max( txn_map );
1286 0 : FD_LOG_NOTICE(( "txn map footprint: %s (%lu entries used out of %lu, %lu%%)",
1287 0 : fd_smart_size( fd_blockstore_txn_map_footprint( txn_map_max ), tmp1, sizeof(tmp1) ),
1288 0 : txn_map_cnt,
1289 0 : txn_map_max,
1290 0 : (100U*txn_map_cnt)/txn_map_max ));
1291 0 : ulong block_cnt = 0;
1292 0 : ulong data_tot = 0;
1293 0 : ulong data_max = 0;
1294 0 : ulong txn_tot = 0;
1295 0 : ulong txn_max = 0;
1296 :
1297 0 : ulong * q = fd_wksp_laddr_fast( fd_blockstore_wksp( blockstore ), blockstore->slot_deque_gaddr );
1298 0 : fd_blockstore_slot_deque_remove_all( q );
1299 0 : fd_blockstore_slot_deque_push_tail( q, blockstore->smr );
1300 0 : while( !fd_blockstore_slot_deque_empty( q ) ) {
1301 0 : ulong curr = fd_blockstore_slot_deque_pop_head( q );
1302 :
1303 0 : fd_block_map_t * block_map_entry = fd_blockstore_block_map_query( blockstore, curr );
1304 0 : if( FD_UNLIKELY( !block_map_entry || !block_map_entry->block_gaddr ) ) continue;
1305 0 : fd_block_t * block = fd_wksp_laddr_fast( fd_blockstore_wksp( blockstore ), block_map_entry->block_gaddr );
1306 0 : if( block->data_gaddr ) {
1307 0 : block_cnt++;
1308 0 : data_tot += block->data_sz;
1309 0 : data_max = fd_ulong_max( data_max, block->data_sz );
1310 0 : txn_tot += block->txns_cnt;
1311 0 : txn_max = fd_ulong_max( txn_max, block->txns_cnt );
1312 0 : }
1313 :
1314 0 : ulong * child_slots = NULL;
1315 0 : ulong child_slot_cnt = 0;
1316 0 : int rc = fd_blockstore_child_slots_query( blockstore, curr, &child_slots, &child_slot_cnt );
1317 0 : if( FD_UNLIKELY( rc != FD_BLOCKSTORE_OK ) ) {
1318 0 : continue;
1319 0 : }
1320 :
1321 0 : for( ulong i = 0; i < child_slot_cnt; i++ ) {
1322 0 : fd_blockstore_slot_deque_push_tail( q, child_slots[i] );
1323 0 : }
1324 0 : }
1325 :
1326 0 : if( block_cnt )
1327 0 : FD_LOG_NOTICE(( "block cnt: %lu, total size: %s, avg size: %s, max size: %s, avg txns per block: %lu, max txns: %lu",
1328 0 : block_cnt,
1329 0 : fd_smart_size( data_tot, tmp1, sizeof(tmp1) ),
1330 0 : fd_smart_size( data_tot/block_cnt, tmp2, sizeof(tmp2) ),
1331 0 : fd_smart_size( data_max, tmp3, sizeof(tmp3) ),
1332 0 : txn_tot/block_cnt,
1333 0 : txn_max ));
1334 0 : }
1335 :
1336 : fd_blockstore_t *
1337 0 : fd_blockstore_init( fd_blockstore_t * blockstore, fd_slot_bank_t const * slot_bank ) {
1338 0 : ulong slot = slot_bank->slot;
1339 :
1340 0 : blockstore->min = slot;
1341 0 : blockstore->max = slot;
1342 0 : blockstore->hcs = slot;
1343 0 : blockstore->smr = slot;
1344 :
1345 0 : fd_block_map_t * block_map_entry = fd_block_map_insert( fd_blockstore_block_map( blockstore ),
1346 0 : &slot );
1347 :
1348 0 : block_map_entry->parent_slot = slot_bank->prev_slot;
1349 0 : memset( block_map_entry->child_slots, UCHAR_MAX, FD_BLOCKSTORE_CHILD_SLOT_MAX * sizeof( ulong ) );
1350 0 : block_map_entry->child_slot_cnt = 0;
1351 :
1352 0 : block_map_entry->height = slot_bank->block_height;
1353 0 : block_map_entry->bank_hash = slot_bank->banks_hash;
1354 0 : block_map_entry->flags = fd_uchar_set_bit(
1355 0 : fd_uchar_set_bit(
1356 0 : fd_uchar_set_bit(
1357 0 : fd_uchar_set_bit(
1358 0 : fd_uchar_set_bit( block_map_entry->flags,
1359 0 : FD_BLOCK_FLAG_COMPLETED ),
1360 0 : FD_BLOCK_FLAG_PROCESSED ),
1361 0 : FD_BLOCK_FLAG_EQVOCSAFE ),
1362 0 : FD_BLOCK_FLAG_CONFIRMED ),
1363 0 : FD_BLOCK_FLAG_FINALIZED );
1364 0 : block_map_entry->reference_tick = 0;
1365 0 : block_map_entry->ts = 0;
1366 :
1367 0 : block_map_entry->consumed_idx = 0;
1368 0 : block_map_entry->received_idx = 0;
1369 0 : block_map_entry->complete_idx = 0;
1370 :
1371 : /* This creates an empty allocation for a block, to "facade" that we
1372 : have this particular block (even though we don't). This is useful
1373 : to avoid special-casing various blockstore APIs.
1374 :
1375 : This should only ever be done for the snapshot slot, after booting
1376 : up from the snapshot. */
1377 :
1378 0 : fd_block_t * block = fd_alloc_malloc( fd_blockstore_alloc( blockstore ),
1379 0 : alignof( fd_block_t ),
1380 0 : sizeof( fd_block_t ) );
1381 :
1382 : /* Point to the fake block. */
1383 :
1384 0 : block_map_entry->block_gaddr = fd_wksp_gaddr_fast( fd_blockstore_wksp( blockstore ), block );
1385 :
1386 : /* Set all fields to 0. Caller's responsibility to check gaddr and sz != 0. */
1387 :
1388 0 : memset( block, 0, sizeof( fd_block_t ) );
1389 :
1390 0 : return blockstore;
1391 0 : }
|