Line data Source code
1 : #include "fd_blockstore.h"
2 : #include <fcntl.h>
3 : #include <string.h>
4 : #include <stdio.h> /* snprintf */
5 : #include <unistd.h>
6 : #include <errno.h>
7 :
8 : void *
9 : fd_blockstore_new( void * shmem,
10 : ulong wksp_tag,
11 : ulong seed,
12 : ulong shred_max,
13 : ulong block_max,
14 : ulong idx_max,
15 0 : ulong txn_max ) {
16 : /* TODO temporary fix to make sure block_max is a power of 2, as
17 : required for slot map para. We should change to err in config
18 : verification eventually */
19 0 : block_max = fd_ulong_pow2_up( block_max );
20 0 : ulong lock_cnt = fd_ulong_min( block_max, BLOCK_INFO_LOCK_CNT );
21 :
22 0 : fd_blockstore_shmem_t * blockstore_shmem = (fd_blockstore_shmem_t *)shmem;
23 :
24 0 : if( FD_UNLIKELY( !blockstore_shmem ) ) {
25 0 : FD_LOG_WARNING(( "NULL blockstore_shmem" ));
26 0 : return NULL;
27 0 : }
28 :
29 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned((ulong)blockstore_shmem, fd_blockstore_align() ) )) {
30 0 : FD_LOG_WARNING(( "misaligned blockstore_shmem" ));
31 0 : return NULL;
32 0 : }
33 :
34 0 : if( FD_UNLIKELY( !wksp_tag ) ) {
35 0 : FD_LOG_WARNING(( "bad wksp_tag" ));
36 0 : return NULL;
37 0 : }
38 :
39 0 : fd_wksp_t * wksp = fd_wksp_containing( blockstore_shmem );
40 0 : if( FD_UNLIKELY( !wksp ) ) {
41 0 : FD_LOG_WARNING(( "shmem must be part of a workspace" ));
42 0 : return NULL;
43 0 : }
44 :
45 0 : fd_memset( blockstore_shmem, 0, fd_blockstore_footprint( shred_max, block_max, idx_max, txn_max ) );
46 :
47 0 : int lg_idx_max = fd_ulong_find_msb( fd_ulong_pow2_up( idx_max ) );
48 :
49 0 : FD_SCRATCH_ALLOC_INIT( l, shmem );
50 0 : blockstore_shmem = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_blockstore_shmem_t), sizeof(fd_blockstore_shmem_t) );
51 0 : void * shreds = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_buf_shred_t), sizeof(fd_buf_shred_t) * shred_max );
52 0 : void * shred_pool = FD_SCRATCH_ALLOC_APPEND( l, fd_buf_shred_pool_align(), fd_buf_shred_pool_footprint() );
53 0 : void * shred_map = FD_SCRATCH_ALLOC_APPEND( l, fd_buf_shred_map_align(), fd_buf_shred_map_footprint( shred_max ) );
54 0 : void * blocks = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_block_info_t), sizeof(fd_block_info_t) * block_max );
55 0 : void * block_map = FD_SCRATCH_ALLOC_APPEND( l, fd_block_map_align(), fd_block_map_footprint( block_max, lock_cnt, BLOCK_INFO_PROBE_CNT ) );
56 0 : void * block_idx = FD_SCRATCH_ALLOC_APPEND( l, fd_block_idx_align(), fd_block_idx_footprint( lg_idx_max ) );
57 0 : void * slot_deque = FD_SCRATCH_ALLOC_APPEND( l, fd_slot_deque_align(), fd_slot_deque_footprint( block_max ) );
58 0 : void * txn_map = FD_SCRATCH_ALLOC_APPEND( l, fd_txn_map_align(), fd_txn_map_footprint( txn_max ) );
59 0 : void * alloc = FD_SCRATCH_ALLOC_APPEND( l, fd_alloc_align(), fd_alloc_footprint() );
60 0 : ulong top = FD_SCRATCH_ALLOC_FINI( l, fd_blockstore_align() );
61 0 : FD_TEST( fd_ulong_align_up( top - (ulong)shmem, fd_alloc_align() ) == fd_ulong_align_up( fd_blockstore_footprint( shred_max, block_max, idx_max, txn_max ), fd_alloc_align() ) );
62 :
63 0 : (void)shreds;
64 0 : fd_buf_shred_pool_new( shred_pool );
65 0 : fd_buf_shred_map_new ( shred_map, shred_max, seed );
66 0 : memset( blocks, 0, sizeof(fd_block_info_t) * block_max );
67 0 : FD_TEST( fd_block_map_new ( block_map, block_max, lock_cnt, BLOCK_INFO_PROBE_CNT, seed ) );
68 :
69 0 : blockstore_shmem->block_idx_gaddr = fd_wksp_gaddr( wksp, fd_block_idx_join( fd_block_idx_new( block_idx, lg_idx_max ) ) );
70 0 : blockstore_shmem->slot_deque_gaddr = fd_wksp_gaddr( wksp, fd_slot_deque_join (fd_slot_deque_new( slot_deque, block_max ) ) );
71 0 : blockstore_shmem->txn_map_gaddr = fd_wksp_gaddr( wksp, fd_txn_map_join (fd_txn_map_new( txn_map, txn_max, seed ) ) );
72 0 : blockstore_shmem->alloc_gaddr = fd_wksp_gaddr( wksp, fd_alloc_join (fd_alloc_new( alloc, wksp_tag ), wksp_tag ) );
73 :
74 0 : FD_TEST( blockstore_shmem->block_idx_gaddr );
75 0 : FD_TEST( blockstore_shmem->slot_deque_gaddr );
76 0 : FD_TEST( blockstore_shmem->txn_map_gaddr );
77 0 : FD_TEST( blockstore_shmem->alloc_gaddr );
78 :
79 0 : blockstore_shmem->blockstore_gaddr = fd_wksp_gaddr_fast( wksp, blockstore_shmem );
80 0 : blockstore_shmem->wksp_tag = wksp_tag;
81 0 : blockstore_shmem->seed = seed;
82 :
83 0 : blockstore_shmem->archiver = (fd_blockstore_archiver_t){
84 0 : .fd_size_max = FD_BLOCKSTORE_ARCHIVE_MIN_SIZE,
85 0 : .head = FD_BLOCKSTORE_ARCHIVE_START,
86 0 : .tail = FD_BLOCKSTORE_ARCHIVE_START,
87 0 : .num_blocks = 0,
88 0 : };
89 :
90 0 : blockstore_shmem->lps = FD_SLOT_NULL;
91 0 : blockstore_shmem->hcs = FD_SLOT_NULL;
92 0 : blockstore_shmem->wmk = FD_SLOT_NULL;
93 :
94 0 : blockstore_shmem->shred_max = shred_max;
95 0 : blockstore_shmem->block_max = block_max;
96 0 : blockstore_shmem->idx_max = idx_max;
97 0 : blockstore_shmem->txn_max = txn_max;
98 :
99 0 : FD_COMPILER_MFENCE();
100 0 : FD_VOLATILE( blockstore_shmem->magic ) = FD_BLOCKSTORE_MAGIC;
101 0 : FD_COMPILER_MFENCE();
102 :
103 0 : return (void *)blockstore_shmem;
104 0 : }
105 :
106 : fd_blockstore_t *
107 0 : fd_blockstore_join( void * ljoin, void * shblockstore ) {
108 0 : fd_blockstore_t * join = (fd_blockstore_t *)ljoin;
109 0 : fd_blockstore_shmem_t * blockstore = (fd_blockstore_shmem_t *)shblockstore;
110 :
111 0 : if( FD_UNLIKELY( !join ) ) {
112 0 : FD_LOG_WARNING(( "NULL ljoin" ));
113 0 : return NULL;
114 0 : }
115 :
116 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)join, alignof(fd_blockstore_t) ) ) ) {
117 0 : FD_LOG_WARNING(( "misaligned ljoin" ));
118 0 : return NULL;
119 0 : }
120 :
121 0 : if( FD_UNLIKELY( !blockstore ) ) {
122 0 : FD_LOG_WARNING(( "NULL shblockstore" ));
123 0 : return NULL;
124 0 : }
125 :
126 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)blockstore, fd_blockstore_align() ) )) {
127 0 : FD_LOG_WARNING(( "misaligned shblockstore" ));
128 0 : return NULL;
129 0 : }
130 :
131 0 : if( FD_UNLIKELY( blockstore->magic != FD_BLOCKSTORE_MAGIC ) ) {
132 0 : FD_LOG_WARNING(( "bad magic" ));
133 0 : return NULL;
134 0 : }
135 :
136 0 : FD_SCRATCH_ALLOC_INIT( l, shblockstore );
137 0 : blockstore = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_blockstore_shmem_t), sizeof(fd_blockstore_shmem_t) );
138 0 : void * shreds = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_buf_shred_t), sizeof(fd_buf_shred_t) * blockstore->shred_max );
139 0 : void * shred_pool = FD_SCRATCH_ALLOC_APPEND( l, fd_buf_shred_pool_align(), fd_buf_shred_pool_footprint() );
140 0 : void * shred_map = FD_SCRATCH_ALLOC_APPEND( l, fd_buf_shred_map_align(), fd_buf_shred_map_footprint( blockstore->shred_max ) );
141 0 : void * blocks = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_block_info_t), sizeof(fd_block_info_t) * blockstore->block_max );
142 0 : void * block_map = FD_SCRATCH_ALLOC_APPEND( l, fd_block_map_align(), fd_block_map_footprint( blockstore->block_max,
143 0 : fd_ulong_min(blockstore->block_max, BLOCK_INFO_LOCK_CNT),
144 0 : BLOCK_INFO_PROBE_CNT ) );
145 0 : FD_SCRATCH_ALLOC_FINI( l, fd_blockstore_align() );
146 :
147 0 : join->shmem = blockstore;
148 0 : fd_buf_shred_pool_join( join->shred_pool, shred_pool, shreds, blockstore->shred_max );
149 0 : fd_buf_shred_map_join ( join->shred_map, shred_map, shreds, blockstore->shred_max );
150 0 : fd_block_map_join ( join->block_map, block_map, blocks );
151 :
152 0 : FD_TEST( fd_buf_shred_pool_verify( join->shred_pool ) == FD_POOL_SUCCESS );
153 0 : FD_TEST( fd_buf_shred_map_verify ( join->shred_map ) == FD_MAP_SUCCESS );
154 0 : FD_TEST( fd_block_map_verify ( join->block_map ) == FD_MAP_SUCCESS );
155 :
156 0 : return join;
157 0 : }
158 :
159 : void *
160 0 : fd_blockstore_leave( fd_blockstore_t * blockstore ) {
161 :
162 0 : if( FD_UNLIKELY( !blockstore ) ) {
163 0 : FD_LOG_WARNING(( "NULL blockstore" ));
164 0 : return NULL;
165 0 : }
166 :
167 0 : fd_wksp_t * wksp = fd_wksp_containing( blockstore );
168 0 : if( FD_UNLIKELY( !wksp ) ) {
169 0 : FD_LOG_WARNING(( "shmem must be part of a workspace" ));
170 0 : return NULL;
171 0 : }
172 :
173 0 : FD_TEST( fd_buf_shred_pool_leave( blockstore->shred_pool ) );
174 0 : FD_TEST( fd_buf_shred_map_leave( blockstore->shred_map ) );
175 0 : FD_TEST( fd_block_map_leave( blockstore->block_map ) );
176 0 : FD_TEST( fd_block_idx_leave( fd_blockstore_block_idx( blockstore ) ) );
177 0 : FD_TEST( fd_slot_deque_leave( fd_blockstore_slot_deque( blockstore ) ) );
178 0 : FD_TEST( fd_txn_map_leave( fd_blockstore_txn_map( blockstore ) ) );
179 0 : FD_TEST( fd_alloc_leave( fd_blockstore_alloc( blockstore ) ) );
180 :
181 0 : return (void *)blockstore;
182 0 : }
183 :
184 : void *
185 0 : fd_blockstore_delete( void * shblockstore ) {
186 0 : fd_blockstore_t * blockstore = (fd_blockstore_t *)shblockstore;
187 :
188 0 : if( FD_UNLIKELY( !blockstore ) ) {
189 0 : FD_LOG_WARNING(( "NULL shblockstore" ));
190 0 : return NULL;
191 0 : }
192 :
193 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned((ulong)blockstore, fd_blockstore_align() ) )) {
194 0 : FD_LOG_WARNING(( "misaligned shblockstore" ));
195 0 : return NULL;
196 0 : }
197 :
198 0 : if( FD_UNLIKELY( blockstore->shmem->magic != FD_BLOCKSTORE_MAGIC ) ) {
199 0 : FD_LOG_WARNING(( "bad magic" ));
200 0 : return NULL;
201 0 : }
202 :
203 0 : fd_wksp_t * wksp = fd_wksp_containing( blockstore );
204 0 : if( FD_UNLIKELY( !wksp ) ) {
205 0 : FD_LOG_WARNING(( "shmem must be part of a workspace" ));
206 0 : return NULL;
207 0 : }
208 :
209 : /* Delete all structures. */
210 :
211 0 : FD_TEST( fd_buf_shred_pool_delete( &blockstore->shred_pool ) );
212 0 : FD_TEST( fd_buf_shred_map_delete( &blockstore->shred_map ) );
213 0 : FD_TEST( fd_block_map_delete( &blockstore->block_map ) );
214 0 : FD_TEST( fd_block_idx_delete( fd_blockstore_block_idx( blockstore ) ) );
215 0 : FD_TEST( fd_slot_deque_delete( fd_blockstore_slot_deque( blockstore ) ) );
216 0 : FD_TEST( fd_txn_map_delete( fd_blockstore_txn_map( blockstore ) ) );
217 0 : FD_TEST( fd_alloc_delete( fd_blockstore_alloc( blockstore ) ) );
218 :
219 0 : FD_COMPILER_MFENCE();
220 0 : FD_VOLATILE( blockstore->shmem->magic ) = 0UL;
221 0 : FD_COMPILER_MFENCE();
222 :
223 0 : return blockstore;
224 0 : }
225 :
226 : #define check_read_err_safe( cond, msg ) \
227 : do { \
228 : if( FD_UNLIKELY( cond ) ) { \
229 : FD_LOG_WARNING(( "[%s] %s", __func__, msg )); \
230 : return FD_BLOCKSTORE_ERR_SLOT_MISSING; \
231 : } \
232 : } while(0);
233 :
234 : fd_blockstore_t *
235 0 : fd_blockstore_init( fd_blockstore_t * blockstore, int fd, ulong fd_size_max, fd_slot_bank_t const * slot_bank ) {
236 :
237 0 : if ( fd_size_max < FD_BLOCKSTORE_ARCHIVE_MIN_SIZE ) {
238 0 : FD_LOG_ERR(( "archive file size too small" ));
239 0 : return NULL;
240 0 : }
241 0 : blockstore->shmem->archiver.fd_size_max = fd_size_max;
242 :
243 : //build_idx( blockstore, fd );
244 0 : lseek( fd, 0, SEEK_END );
245 :
246 : /* initialize fields using slot bank */
247 :
248 0 : ulong smr = slot_bank->slot;
249 :
250 0 : blockstore->shmem->lps = smr;
251 0 : blockstore->shmem->hcs = smr;
252 0 : blockstore->shmem->wmk = smr;
253 :
254 0 : fd_block_map_query_t query[1];
255 :
256 0 : int err = fd_block_map_prepare( blockstore->block_map, &smr, NULL, query, FD_MAP_FLAG_BLOCKING );
257 0 : fd_block_info_t * ele = fd_block_map_query_ele( query );
258 0 : if ( FD_UNLIKELY( err ) ) FD_LOG_ERR(( "failed to prepare block map for slot %lu", smr ));
259 :
260 0 : ele->slot = smr;
261 0 : ele->parent_slot = slot_bank->prev_slot;
262 0 : memset( ele->child_slots, UCHAR_MAX, FD_BLOCKSTORE_CHILD_SLOT_MAX * sizeof( ulong ) );
263 0 : ele->child_slot_cnt = 0;
264 0 : ele->block_height = slot_bank->block_height;
265 0 : memcpy( &ele->block_hash, slot_bank->block_hash_queue.last_hash, sizeof(fd_hash_t) );
266 0 : ele->bank_hash = slot_bank->banks_hash;
267 0 : ele->block_hash = slot_bank->poh;
268 0 : ele->flags = fd_uchar_set_bit(
269 0 : fd_uchar_set_bit(
270 0 : fd_uchar_set_bit(
271 0 : fd_uchar_set_bit(
272 0 : fd_uchar_set_bit( ele->flags,
273 0 : FD_BLOCK_FLAG_COMPLETED ),
274 0 : FD_BLOCK_FLAG_PROCESSED ),
275 0 : FD_BLOCK_FLAG_EQVOCSAFE ),
276 0 : FD_BLOCK_FLAG_CONFIRMED ),
277 0 : FD_BLOCK_FLAG_FINALIZED );
278 : // ele->ref_tick = 0;
279 0 : ele->ts = 0;
280 0 : ele->consumed_idx = 0;
281 0 : ele->received_idx = 0;
282 0 : ele->buffered_idx = 0;
283 0 : ele->data_complete_idx = 0;
284 0 : ele->slot_complete_idx = 0;
285 0 : ele->ticks_consumed = 0;
286 0 : ele->tick_hash_count_accum = 0;
287 0 : fd_block_set_null( ele->data_complete_idxs );
288 :
289 : /* Set all fields to 0. Caller's responsibility to check gaddr and sz != 0. */
290 :
291 0 : fd_block_map_publish( query );
292 :
293 0 : return blockstore;
294 0 : }
295 :
296 : void
297 0 : fd_blockstore_fini( fd_blockstore_t * blockstore ) {
298 : /* Free all allocations by removing all slots (whether they are
299 : complete or not). */
300 0 : fd_block_info_t * ele0 = (fd_block_info_t *)fd_block_map_shele( blockstore->block_map );
301 0 : ulong block_max = fd_block_map_ele_max( blockstore->block_map );
302 0 : for( ulong ele_idx=0; ele_idx<block_max; ele_idx++ ) {
303 0 : fd_block_info_t * ele = ele0 + ele_idx;
304 0 : if( ele->slot == 0 ) continue; /* unused */
305 0 : fd_blockstore_slot_remove( blockstore, ele->slot );
306 0 : }
307 0 : }
308 :
309 : /* txn map helpers */
310 :
311 : FD_FN_PURE int
312 0 : fd_txn_key_equal( fd_txn_key_t const * k0, fd_txn_key_t const * k1 ) {
313 0 : for( ulong i = 0; i < FD_ED25519_SIG_SZ / sizeof( ulong ); ++i )
314 0 : if( k0->v[i] != k1->v[i] ) return 0;
315 0 : return 1;
316 0 : }
317 :
318 : FD_FN_PURE ulong
319 0 : fd_txn_key_hash( fd_txn_key_t const * k, ulong seed ) {
320 0 : ulong h = seed;
321 0 : for( ulong i = 0; i < FD_ED25519_SIG_SZ / sizeof( ulong ); ++i )
322 0 : h ^= k->v[i];
323 0 : return h;
324 0 : }
325 :
326 : /* Remove a slot from blockstore. Needs to currently be under a blockstore_write
327 : lock due to txn_map access. */
328 : void
329 0 : fd_blockstore_slot_remove( fd_blockstore_t * blockstore, ulong slot ) {
330 0 : FD_LOG_NOTICE(( "[%s] slot: %lu", __func__, slot ));
331 :
332 : /* It is not safe to remove a replaying block. */
333 0 : fd_block_map_query_t query[1] = { 0 };
334 0 : ulong parent_slot = FD_SLOT_NULL;
335 0 : ulong received_idx = 0;
336 0 : int err = FD_MAP_ERR_AGAIN;
337 0 : while( err == FD_MAP_ERR_AGAIN ) {
338 0 : err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, query, 0 );
339 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
340 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ) return; /* slot not found */
341 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
342 0 : if( FD_UNLIKELY( fd_uchar_extract_bit( block_info->flags, FD_BLOCK_FLAG_REPLAYING ) ) ) {
343 0 : FD_LOG_WARNING(( "[%s] slot %lu has replay in progress. not removing.", __func__, slot ));
344 0 : return;
345 0 : }
346 0 : parent_slot = block_info->parent_slot;
347 0 : received_idx = block_info->received_idx;
348 0 : err = fd_block_map_query_test( query );
349 0 : }
350 :
351 0 : err = fd_block_map_remove( blockstore->block_map, &slot, query, FD_MAP_FLAG_BLOCKING );
352 : /* not possible to fail */
353 0 : FD_TEST( !fd_blockstore_block_info_test( blockstore, slot ) );
354 :
355 : /* Unlink slot from its parent only if it is not published. */
356 0 : err = fd_block_map_prepare( blockstore->block_map, &parent_slot, NULL, query, FD_MAP_FLAG_BLOCKING );
357 0 : fd_block_info_t * parent_block_info = fd_block_map_query_ele( query );
358 0 : if( FD_LIKELY( parent_block_info ) ) {
359 0 : for( ulong i = 0; i < parent_block_info->child_slot_cnt; i++ ) {
360 0 : if( FD_LIKELY( parent_block_info->child_slots[i] == slot ) ) {
361 0 : parent_block_info->child_slots[i] =
362 0 : parent_block_info->child_slots[--parent_block_info->child_slot_cnt];
363 0 : }
364 0 : }
365 0 : }
366 0 : fd_block_map_publish( query );
367 :
368 : /* Remove buf_shreds. */
369 0 : for( uint idx = 0; idx < received_idx; idx++ ) {
370 0 : fd_blockstore_shred_remove( blockstore, slot, idx );
371 0 : }
372 :
373 0 : return;
374 0 : }
375 :
376 : void
377 : fd_blockstore_publish( fd_blockstore_t * blockstore,
378 : int fd FD_PARAM_UNUSED,
379 0 : ulong wmk ) {
380 0 : FD_LOG_NOTICE(( "[%s] wmk %lu => smr %lu", __func__, blockstore->shmem->wmk, wmk ));
381 :
382 : /* Caller is incorrectly calling publish. */
383 :
384 0 : if( FD_UNLIKELY( blockstore->shmem->wmk == wmk ) ) {
385 0 : FD_LOG_WARNING(( "[%s] attempting to re-publish when wmk %lu already at smr %lu", __func__, blockstore->shmem->wmk, wmk ));
386 0 : return;
387 0 : }
388 :
389 : /* q uses the slot_deque as the BFS queue */
390 :
391 0 : ulong * q = fd_blockstore_slot_deque( blockstore );
392 :
393 : /* Clear the deque, preparing it to be reused. */
394 :
395 0 : fd_slot_deque_remove_all( q );
396 :
397 : /* Push the watermark onto the queue. */
398 :
399 0 : fd_slot_deque_push_tail( q, blockstore->shmem->wmk );
400 :
401 : /* Conduct a BFS to find slots to prune or archive. */
402 :
403 0 : while( !fd_slot_deque_empty( q ) ) {
404 0 : ulong slot = fd_slot_deque_pop_head( q );
405 0 : fd_block_map_query_t query[1];
406 : /* Blocking read -- we need the block_info ptr to be valid for the
407 : whole time that we are writing stuff to the archiver file. */
408 0 : int err = fd_block_map_prepare( blockstore->block_map, &slot, NULL, query, FD_MAP_FLAG_BLOCKING );
409 0 : if( FD_UNLIKELY( err ) ) {
410 0 : FD_LOG_WARNING(( "[%s] failed to prepare block map for blockstore publishing %lu", __func__, slot ));
411 0 : continue;
412 0 : }
413 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
414 :
415 : /* Add slot's children to the queue. */
416 :
417 0 : for( ulong i = 0; i < block_info->child_slot_cnt; i++ ) {
418 :
419 : /* Stop upon reaching the SMR. */
420 :
421 0 : if( FD_LIKELY( block_info->child_slots[i] != wmk ) ) {
422 0 : fd_slot_deque_push_tail( q, block_info->child_slots[i] );
423 0 : }
424 0 : }
425 :
426 : /* Archive the block into a file if it is finalized. */
427 :
428 : /* if( fd_uchar_extract_bit( block_info->flags, FD_BLOCK_FLAG_FINALIZED ) ) {
429 : fd_block_t * block = fd_wksp_laddr_fast( fd_blockstore_wksp( blockstore ), block_info->block_gaddr );
430 : uchar * data = fd_wksp_laddr_fast( fd_blockstore_wksp( blockstore ), block->data_gaddr );
431 :
432 : fd_block_idx_t * block_idx = fd_blockstore_block_idx( blockstore );
433 :
434 : if( FD_UNLIKELY( fd_block_idx_query( block_idx, slot, NULL ) ) ) {
435 : FD_LOG_ERR(( "[%s] invariant violation. attempted to re-archive finalized block: %lu", __func__, slot ));
436 : } else {
437 : fd_blockstore_ser_t ser = {
438 : .block_map = block_info,
439 : .block = block,
440 : .data = data
441 : };
442 : fd_blockstore_block_checkpt( blockstore, &ser, fd, slot );
443 : }
444 : } */
445 0 : fd_block_map_cancel( query ); // TODO: maybe we should not make prepare so large and instead call prepare again in helpers
446 0 : fd_blockstore_slot_remove( blockstore, slot );
447 0 : }
448 :
449 : /* Scan to clean up any orphaned blocks or shreds < new SMR. */
450 :
451 0 : for (ulong slot = blockstore->shmem->wmk; slot < wmk; slot++) {
452 0 : fd_blockstore_slot_remove( blockstore, slot );
453 0 : }
454 :
455 0 : blockstore->shmem->wmk = wmk;
456 :
457 0 : return;
458 0 : }
459 :
460 : void
461 0 : fd_blockstore_shred_remove( fd_blockstore_t * blockstore, ulong slot, uint idx ) {
462 : // if ( fd_buf_shred_pool_verify( blockstore->shred_pool ) != FD_POOL_SUCCESS || fd_buf_shred_map_verify ( blockstore->shred_map ) != FD_MAP_SUCCESS ) {
463 : // FD_LOG_NOTICE(( "slot %lu idx %u", slot, idx ));
464 : // __asm__("int $3");
465 : // }
466 0 : fd_shred_key_t key = { slot, idx };
467 :
468 0 : fd_buf_shred_map_query_t query[1] = { 0 };
469 0 : int err = fd_buf_shred_map_remove( blockstore->shred_map, &key, NULL, query, FD_MAP_FLAG_BLOCKING );
470 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_CORRUPT ) ) FD_LOG_ERR(( "[%s] map corrupt: shred %lu %u", __func__, slot, idx ));
471 :
472 0 : if( FD_LIKELY( err == FD_MAP_SUCCESS ) ) {
473 0 : fd_buf_shred_t * shred = fd_buf_shred_map_query_ele( query );
474 0 : int err = fd_buf_shred_pool_release( blockstore->shred_pool, shred, 1 );
475 0 : if( FD_UNLIKELY( err == FD_POOL_ERR_INVAL ) ) FD_LOG_ERR(( "[%s] pool error: shred %lu %u not in pool", __func__, slot, idx ));
476 0 : if( FD_UNLIKELY( err == FD_POOL_ERR_CORRUPT ) ) FD_LOG_ERR(( "[%s] pool corrupt: shred %lu %u", __func__, slot, idx ));
477 0 : FD_TEST( !err );
478 0 : }
479 : // FD_TEST( fd_buf_shred_pool_verify( blockstore->shred_pool ) == FD_POOL_SUCCESS );
480 : // FD_TEST( fd_buf_shred_map_verify ( blockstore->shred_map ) == FD_MAP_SUCCESS );
481 0 : }
482 :
483 :
484 : void
485 0 : fd_blockstore_shred_insert( fd_blockstore_t * blockstore, fd_shred_t const * shred ) {
486 : // FD_LOG_NOTICE(( "[%s] slot %lu idx %u", __func__, shred->slot, shred->idx ));
487 :
488 0 : ulong slot = shred->slot;
489 :
490 0 : if( FD_UNLIKELY( slot < blockstore->shmem->wmk ) ) {
491 0 : FD_LOG_DEBUG(( "[%s] slot %lu < wmk %lu. not inserting shred", __func__, slot, blockstore->shmem->wmk ));
492 0 : return;
493 0 : }
494 :
495 0 : fd_shred_key_t key = { slot, .idx = shred->idx };
496 :
497 : /* Test if the blockstore already contains this shred key. */
498 :
499 0 : if( FD_UNLIKELY( fd_blockstore_shred_test( blockstore, slot, shred->idx ) ) ) {
500 :
501 : /* If we receive a shred with the same key (slot and shred idx) but
502 : different payload as one we already have, we'll only keep the
503 : first. Once we receive the full block, we'll use merkle chaining
504 : from the last FEC set to determine whether we have the correct
505 : shred at every index.
506 :
507 : Later, if the block fails to replay (dead block) or the block
508 : hash doesn't match the one we observe from votes, we'll dump the
509 : entire block and use repair to recover the one a majority (52%)
510 : of the cluster has voted on. */
511 :
512 0 : for(;;) {
513 0 : fd_buf_shred_map_query_t query[1] = { 0 };
514 0 : int err = fd_buf_shred_map_query_try( blockstore->shred_map, &key, NULL, query );
515 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_CORRUPT ) ) FD_LOG_ERR(( "[%s] %s. shred: (%lu, %u)", __func__, fd_buf_shred_map_strerror( err ), slot, shred->idx ));
516 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
517 0 : fd_buf_shred_t * buf_shred = fd_buf_shred_map_query_ele( query );
518 0 : buf_shred->eqvoc = ( fd_shred_payload_sz( &buf_shred->hdr ) == fd_shred_payload_sz( shred ) &&
519 0 : memcmp( buf_shred, shred, fd_shred_payload_sz( shred ) ) );
520 0 : err = fd_buf_shred_map_query_test( query );
521 0 : if( FD_LIKELY( err == FD_MAP_SUCCESS) ) break;
522 0 : }
523 0 : return;
524 0 : }
525 :
526 : /* Insert the new shred. */
527 :
528 0 : int err;
529 0 : fd_buf_shred_t * ele = fd_buf_shred_pool_acquire( blockstore->shred_pool, NULL, 1, &err );
530 0 : if( FD_UNLIKELY( err == FD_POOL_ERR_EMPTY ) ) FD_LOG_ERR(( "[%s] %s. increase blockstore shred_max.", __func__, fd_buf_shred_pool_strerror( err ) ));
531 0 : if( FD_UNLIKELY( err == FD_POOL_ERR_CORRUPT ) ) FD_LOG_ERR(( "[%s] %s.", __func__, fd_buf_shred_pool_strerror( err ) ));
532 :
533 0 : ele->key = key;
534 0 : ele->hdr = *shred;
535 0 : fd_memcpy( &ele->buf, shred, fd_shred_sz( shred ) );
536 0 : err = fd_buf_shred_map_insert( blockstore->shred_map, ele, FD_MAP_FLAG_BLOCKING );
537 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_INVAL ) ) FD_LOG_ERR(( "[%s] map error. ele not in pool.", __func__ ));
538 :
539 : /* Update shred's associated slot meta */
540 :
541 0 : if( FD_UNLIKELY( !fd_blockstore_block_info_test( blockstore, slot ) ) ) {
542 0 : fd_block_map_query_t query[1] = { 0 };
543 : /* Prepare will succeed regardless of if the key is in the map or not. It either returns
544 : the element at that idx, or it will return a spot to insert new stuff. So we need to check
545 : if that space is actually unused, to signify that we are adding a new entry. */
546 :
547 : /* Try to insert slot into block_map TODO make non blocking? */
548 :
549 0 : err = fd_block_map_prepare( blockstore->block_map, &slot, NULL, query, FD_MAP_FLAG_BLOCKING );
550 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
551 :
552 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_FULL ) ){
553 0 : FD_LOG_ERR(( "[%s] OOM: failed to insert new block map entry. blockstore needs to save metadata for all slots >= SMR, so increase memory or check for issues with publishing new SMRs.", __func__ ));
554 0 : }
555 :
556 : /* Initialize the block_info. Note some fields are initialized
557 : to dummy values because we do not have all the necessary metadata
558 : yet. */
559 :
560 0 : block_info->slot = slot;
561 :
562 0 : block_info->parent_slot = slot - shred->data.parent_off;
563 0 : memset( block_info->child_slots, UCHAR_MAX, FD_BLOCKSTORE_CHILD_SLOT_MAX * sizeof(ulong) );
564 0 : block_info->child_slot_cnt = 0;
565 :
566 0 : block_info->block_height = 0;
567 0 : block_info->block_hash = ( fd_hash_t ){ 0 };
568 0 : block_info->bank_hash = ( fd_hash_t ){ 0 };
569 0 : block_info->flags = fd_uchar_set_bit( 0, FD_BLOCK_FLAG_RECEIVING );
570 0 : block_info->ts = 0;
571 : // block_info->ref_tick = (uchar)( (int)shred->data.flags &
572 : // (int)FD_SHRED_DATA_REF_TICK_MASK );
573 0 : block_info->buffered_idx = UINT_MAX;
574 0 : block_info->received_idx = 0;
575 0 : block_info->consumed_idx = UINT_MAX;
576 :
577 0 : block_info->data_complete_idx = UINT_MAX;
578 0 : block_info->slot_complete_idx = UINT_MAX;
579 :
580 0 : block_info->ticks_consumed = 0;
581 0 : block_info->tick_hash_count_accum = 0;
582 :
583 0 : fd_block_set_null( block_info->data_complete_idxs );
584 :
585 0 : block_info->block_gaddr = 0;
586 :
587 0 : fd_block_map_publish( query );
588 :
589 0 : FD_TEST( fd_blockstore_block_info_test( blockstore, slot ) );
590 0 : }
591 0 : fd_block_map_query_t query[1] = { 0 };
592 0 : err = fd_block_map_prepare( blockstore->block_map, &slot, NULL, query, FD_MAP_FLAG_BLOCKING );
593 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query ); /* should be impossible for this to fail */
594 :
595 : /* Advance the buffered_idx watermark. */
596 :
597 0 : uint prev_buffered_idx = block_info->buffered_idx;
598 0 : while( FD_LIKELY( fd_blockstore_shred_test( blockstore, slot, block_info->buffered_idx + 1 ) ) ) {
599 0 : block_info->buffered_idx++;
600 0 : }
601 :
602 : /* Mark the ending shred idxs of entry batches. */
603 :
604 0 : fd_block_set_insert_if( block_info->data_complete_idxs, shred->data.flags & FD_SHRED_DATA_FLAG_DATA_COMPLETE, shred->idx );
605 :
606 : /* Advance the data_complete_idx watermark using the shreds in between
607 : the previous consumed_idx and current consumed_idx. */
608 :
609 0 : for (uint idx = prev_buffered_idx + 1; block_info->buffered_idx != FD_SHRED_IDX_NULL && idx <= block_info->buffered_idx; idx++) {
610 0 : if( FD_UNLIKELY( fd_block_set_test( block_info->data_complete_idxs, idx ) ) ) {
611 0 : block_info->data_complete_idx = idx;
612 0 : }
613 0 : }
614 :
615 : /* Update received_idx and slot_complete_idx. */
616 :
617 0 : block_info->received_idx = fd_uint_max( block_info->received_idx, shred->idx + 1 );
618 0 : if( FD_UNLIKELY( shred->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE ) ) {
619 : // FD_LOG_NOTICE(( "slot %lu %u complete", slot, shred->idx ));
620 0 : block_info->slot_complete_idx = shred->idx;
621 0 : }
622 :
623 0 : ulong parent_slot = block_info->parent_slot;
624 :
625 0 : FD_LOG_DEBUG(( "shred: (%lu, %u). consumed: %u, received: %u, complete: %u",
626 0 : slot,
627 0 : shred->idx,
628 0 : block_info->buffered_idx,
629 0 : block_info->received_idx,
630 0 : block_info->slot_complete_idx ));
631 0 : fd_block_map_publish( query );
632 :
633 : /* Update ancestry metadata: parent_slot, is_connected, next_slot.
634 :
635 : If the parent_slot happens to be very old, there's a chance that
636 : it's hash probe could collide with an existing slot in the block
637 : map, and cause what looks like an OOM. Instead of using map_prepare
638 : and hitting this collision, we can either check that the
639 : parent_slot lives in the map with a block_info_test, or use the
640 : shmem wmk value as a more general guard against querying for
641 : parents that are too old. */
642 :
643 0 : if( FD_LIKELY( parent_slot < blockstore->shmem->wmk ) ) return;
644 :
645 0 : err = fd_block_map_prepare( blockstore->block_map, &parent_slot, NULL, query, FD_MAP_FLAG_BLOCKING );
646 0 : fd_block_info_t * parent_block_info = fd_block_map_query_ele( query );
647 :
648 : /* Add this slot to its parent's child slots if not already there. */
649 :
650 0 : if( FD_LIKELY( parent_block_info && parent_block_info->slot == parent_slot ) ) {
651 0 : int found = 0;
652 0 : for( ulong i = 0; i < parent_block_info->child_slot_cnt; i++ ) {
653 0 : if( FD_LIKELY( parent_block_info->child_slots[i] == slot ) ) {
654 0 : found = 1;
655 0 : break;
656 0 : }
657 0 : }
658 0 : if( FD_UNLIKELY( !found ) ) { /* add to parent's child slots if not already there */
659 0 : if( FD_UNLIKELY( parent_block_info->child_slot_cnt == FD_BLOCKSTORE_CHILD_SLOT_MAX ) ) {
660 0 : FD_LOG_ERR(( "failed to add slot %lu to parent %lu's children. exceeding child slot max",
661 0 : slot,
662 0 : parent_block_info->slot ));
663 0 : }
664 0 : parent_block_info->child_slots[parent_block_info->child_slot_cnt++] = slot;
665 0 : }
666 0 : }
667 0 : if( FD_LIKELY( err == FD_MAP_SUCCESS ) ) {
668 0 : fd_block_map_publish( query );
669 0 : } else {
670 : /* err is FD_MAP_ERR_FULL. Not in a valid prepare. Can happen if we
671 : are about to OOM, or if the parents are so far away that it just
672 : happens to chain longer than the probe_max. Somewhat covered by
673 : the early return, but there are some edge cases where we reach
674 : here, and it shouldn't be a LOG_ERR */
675 0 : FD_LOG_WARNING(( "block info not found for parent slot %lu. Have we seen it before?", parent_slot ));
676 0 : }
677 :
678 : //FD_TEST( fd_block_map_verify( blockstore->block_map ) == FD_MAP_SUCCESS );
679 0 : }
680 :
681 : int
682 0 : fd_blockstore_shred_test( fd_blockstore_t * blockstore, ulong slot, uint idx ) {
683 0 : fd_shred_key_t key = { slot, idx };
684 0 : fd_buf_shred_map_query_t query[1] = { 0 };
685 :
686 0 : for(;;) {
687 0 : int err = fd_buf_shred_map_query_try( blockstore->shred_map, &key, NULL, query );
688 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_CORRUPT ) ) FD_LOG_ERR(( "[%s] slot: %lu idx: %u. %s", __func__, slot, idx, fd_buf_shred_map_strerror( err ) ));
689 0 : if( FD_LIKELY( !fd_buf_shred_map_query_test( query ) ) ) return err != FD_MAP_ERR_KEY;
690 0 : }
691 0 : }
692 :
693 : int
694 0 : fd_blockstore_block_info_test( fd_blockstore_t * blockstore, ulong slot ) {
695 0 : int err = FD_MAP_ERR_AGAIN;
696 0 : while( err == FD_MAP_ERR_AGAIN ){
697 0 : fd_block_map_query_t query[1] = { 0 };
698 0 : err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, query, 0 );
699 0 : if( err == FD_MAP_ERR_AGAIN ) continue;
700 0 : if( err == FD_MAP_ERR_KEY ) return 0;
701 0 : err = fd_block_map_query_test( query );
702 0 : }
703 0 : return 1;
704 0 : }
705 :
706 : fd_block_info_t *
707 0 : fd_blockstore_block_map_query( fd_blockstore_t * blockstore, ulong slot ){
708 0 : fd_block_map_query_t quer[1] = { 0 };
709 0 : int err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, quer, FD_MAP_FLAG_BLOCKING );
710 0 : fd_block_info_t * meta = fd_block_map_query_ele( quer );
711 0 : if( err ) return NULL;
712 0 : return meta;
713 0 : }
714 :
715 : int
716 0 : fd_blockstore_block_info_remove( fd_blockstore_t * blockstore, ulong slot ){
717 0 : int err = FD_MAP_ERR_AGAIN;
718 0 : while( err == FD_MAP_ERR_AGAIN ){
719 0 : err = fd_block_map_remove( blockstore->block_map, &slot, NULL, 0 );
720 0 : if( err == FD_MAP_ERR_KEY ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
721 0 : }
722 0 : return FD_BLOCKSTORE_SUCCESS;
723 0 : }
724 :
725 : long
726 0 : fd_buf_shred_query_copy_data( fd_blockstore_t * blockstore, ulong slot, uint idx, void * buf, ulong buf_sz ) {
727 0 : if( buf_sz < FD_SHRED_MAX_SZ ) return -1;
728 0 : fd_shred_key_t key = { slot, idx };
729 0 : ulong sz = 0;
730 0 : int err = FD_MAP_ERR_AGAIN;
731 0 : while( err == FD_MAP_ERR_AGAIN ) {
732 0 : fd_buf_shred_map_query_t query[1] = { 0 };
733 0 : err = fd_buf_shred_map_query_try( blockstore->shred_map, &key, NULL, query );
734 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ) return -1;
735 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_CORRUPT ) ) FD_LOG_ERR(( "[%s] map corrupt. shred %lu %u", __func__, slot, idx ));
736 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
737 0 : fd_buf_shred_t const * shred = fd_buf_shred_map_query_ele_const( query );
738 0 : sz = fd_shred_sz( &shred->hdr );
739 0 : memcpy( buf, shred->buf, sz );
740 0 : err = fd_buf_shred_map_query_test( query );
741 0 : }
742 0 : FD_TEST( !err );
743 0 : return (long)sz;
744 0 : }
745 :
746 : int
747 0 : fd_blockstore_block_hash_query( fd_blockstore_t * blockstore, ulong slot, fd_hash_t * hash_out ) {
748 0 : for(;;) { /* Speculate */
749 0 : fd_block_map_query_t query[1] = { 0 };
750 0 : int err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, query, 0 );
751 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ) return FD_BLOCKSTORE_ERR_KEY;
752 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
753 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
754 0 : memcpy( hash_out, &block_info->block_hash, sizeof(fd_hash_t) );
755 0 : if( FD_LIKELY( fd_block_map_query_test( query ) == FD_MAP_SUCCESS ) ) return FD_BLOCKSTORE_SUCCESS;
756 0 : }
757 0 : }
758 :
759 : int
760 0 : fd_blockstore_bank_hash_query( fd_blockstore_t * blockstore, ulong slot, fd_hash_t * hash_out ) {
761 0 : for(;;) { /* Speculate */
762 0 : fd_block_map_query_t query[1] = { 0 };
763 0 : int err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, query, 0 );
764 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ) return FD_BLOCKSTORE_ERR_KEY;
765 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
766 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
767 0 : memcpy( hash_out, &block_info->bank_hash, sizeof(fd_hash_t) );
768 0 : if( FD_LIKELY( fd_block_map_query_test( query ) == FD_MAP_SUCCESS ) ) return FD_BLOCKSTORE_SUCCESS;
769 0 : }
770 0 : }
771 :
772 : ulong
773 0 : fd_blockstore_parent_slot_query( fd_blockstore_t * blockstore, ulong slot ) {
774 0 : int err = FD_MAP_ERR_AGAIN;
775 0 : ulong parent_slot = FD_SLOT_NULL;
776 0 : while( err == FD_MAP_ERR_AGAIN ){
777 0 : fd_block_map_query_t query[1] = { 0 };
778 0 : err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, query, 0 );
779 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
780 :
781 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ) return FD_SLOT_NULL;
782 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
783 :
784 0 : parent_slot = block_info->parent_slot;
785 0 : err = fd_block_map_query_test( query );
786 0 : }
787 0 : return parent_slot;
788 0 : }
789 :
790 : int
791 : fd_blockstore_slice_query( fd_blockstore_t * blockstore,
792 : ulong slot,
793 : uint start_idx,
794 : uint end_idx /* inclusive */,
795 : ulong max,
796 : uchar * buf,
797 0 : ulong * buf_sz ) {
798 : /* verify that the batch idxs provided is at batch boundaries*/
799 :
800 0 : int err = FD_MAP_ERR_AGAIN;
801 0 : int invalid_idx = 0;
802 0 : while( err == FD_MAP_ERR_AGAIN ){
803 0 : fd_block_map_query_t quer[1] = { 0 };
804 0 : err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, quer, 0 );
805 0 : fd_block_info_t * query = fd_block_map_query_ele( quer );
806 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
807 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
808 0 : fd_block_set_t * data_complete_idxs = query->data_complete_idxs;
809 0 : if ( ( start_idx > 0 && !fd_block_set_test( data_complete_idxs, start_idx - 1 ))
810 0 : || start_idx > query->slot_complete_idx
811 0 : || !fd_block_set_test( data_complete_idxs, end_idx ) ) {
812 0 : invalid_idx = 1;
813 0 : }
814 0 : err = fd_block_map_query_test( quer );
815 0 : }
816 0 : if( FD_UNLIKELY( invalid_idx ) ) {
817 0 : FD_LOG_WARNING(( "[%s] invalid idxs: (%lu, %u, %u)", __func__, slot, start_idx, end_idx ));
818 0 : return FD_BLOCKSTORE_ERR_SHRED_INVALID;
819 0 : }
820 :
821 0 : ulong off = 0;
822 0 : for(uint idx = start_idx; idx <= end_idx; idx++) {
823 0 : ulong payload_sz = 0;
824 :
825 0 : for(;;) { /* speculative copy one shred */
826 0 : fd_shred_key_t key = { slot, idx };
827 0 : fd_buf_shred_map_query_t query[1] = { 0 };
828 0 : int err = fd_buf_shred_map_query_try( blockstore->shred_map, &key, NULL, query );
829 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_CORRUPT ) ){
830 0 : FD_LOG_WARNING(( "[%s] key: (%lu, %u) %s", __func__, slot, idx, fd_buf_shred_map_strerror( err ) ));
831 0 : return FD_BLOCKSTORE_ERR_CORRUPT;
832 0 : }
833 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ){
834 0 : FD_LOG_WARNING(( "[%s] key: (%lu, %u) %s", __func__, slot, idx, fd_buf_shred_map_strerror( err ) ));
835 0 : return FD_BLOCKSTORE_ERR_KEY;
836 0 : }
837 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
838 :
839 0 : fd_buf_shred_t const * shred = fd_buf_shred_map_query_ele_const( query );
840 0 : uchar const * payload = fd_shred_data_payload( &shred->hdr );
841 0 : payload_sz = fd_shred_payload_sz( &shred->hdr );
842 0 : if( FD_UNLIKELY( off + payload_sz > max ) ) {
843 0 : FD_LOG_WARNING(( "[%s] increase `max`", __func__ )); /* caller needs to increase max */
844 0 : return FD_BLOCKSTORE_ERR_INVAL;
845 0 : }
846 :
847 0 : if( FD_UNLIKELY( payload_sz > FD_SHRED_DATA_PAYLOAD_MAX ) ) return FD_BLOCKSTORE_ERR_SHRED_INVALID;
848 0 : if( FD_UNLIKELY( off + payload_sz > max ) ) return FD_BLOCKSTORE_ERR_NO_MEM;
849 0 : fd_memcpy( buf + off, payload, payload_sz );
850 0 : err = fd_buf_shred_map_query_test( query );
851 0 : if( FD_LIKELY( err == FD_MAP_SUCCESS ) ) break;
852 0 : }; /* successful speculative copy */
853 :
854 0 : off += payload_sz;
855 0 : }
856 0 : *buf_sz = off;
857 0 : return FD_BLOCKSTORE_SUCCESS;
858 0 : }
859 :
860 : int
861 0 : fd_blockstore_shreds_complete( fd_blockstore_t * blockstore, ulong slot ){
862 : //fd_block_t * block_exists = fd_blockstore_block_query( blockstore, slot );
863 0 : fd_block_map_query_t query[1];
864 0 : int complete = 0;
865 0 : int err = FD_MAP_ERR_AGAIN;
866 0 : while( err == FD_MAP_ERR_AGAIN ){
867 0 : err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, query, 0 );
868 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
869 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ) return 0;
870 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
871 0 : complete = ( block_info->buffered_idx != FD_SHRED_IDX_NULL ) &&
872 0 : ( block_info->slot_complete_idx == block_info->buffered_idx );
873 0 : err = fd_block_map_query_test( query );
874 0 : }
875 0 : return complete;
876 :
877 :
878 : /* When replacing block_query( slot ) != NULL with this function:
879 : There are other things verified in a successful deshred & scan block that are not verified here.
880 : scan_block does a round of well-formedness checks like parsing txns, and no premature end of batch
881 : like needing cnt, microblock, microblock format.
882 :
883 : This maybe should be fine in places where we check both
884 : shreds_complete and flag PROCESSED/REPLAYING is set, because validation has been for sure done
885 : if the block has been replayed
886 :
887 : Should be careful in places that call this now that happen before the block is replayed, if we want
888 : to assume the shreds are well-formed we can't. */
889 :
890 0 : }
891 :
892 :
893 : int
894 : fd_blockstore_block_data_query_volatile( fd_blockstore_t * blockstore,
895 : int fd,
896 : ulong slot,
897 : fd_valloc_t alloc,
898 : fd_hash_t * parent_block_hash_out,
899 : fd_block_info_t * block_info_out,
900 : fd_block_rewards_t * block_rewards_out,
901 : uchar ** block_data_out,
902 0 : ulong * block_data_sz_out ) {
903 :
904 : /* WARNING: this code is extremely delicate. Do NOT modify without
905 : understanding all the invariants. In particular, we must never
906 : dereference through a corrupt pointer. It's OK for the destination
907 : data to be overwritten/invalid as long as the memory location is
908 : valid. As long as we don't crash, we can validate the data after it
909 : is read. */
910 0 : (void)blockstore;
911 0 : (void)fd;
912 0 : (void)slot;
913 0 : (void)alloc;
914 0 : (void)parent_block_hash_out;
915 0 : (void)block_info_out;
916 0 : (void)block_rewards_out;
917 0 : (void)block_data_out;
918 0 : (void)block_data_sz_out;
919 0 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
920 : #if BLOCK_ARCHIVING
921 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
922 : fd_block_idx_t * block_idx = fd_blockstore_block_idx( blockstore );
923 : fd_block_idx_t * idx_entry = NULL;
924 :
925 : ulong off = ULONG_MAX;
926 : for(;;) {
927 : idx_entry = fd_block_idx_query( block_idx, slot, NULL );
928 : if( FD_LIKELY( idx_entry ) ) off = idx_entry->off;
929 : break;
930 : }
931 :
932 : if ( FD_UNLIKELY( off < ULONG_MAX ) ) { /* optimize for non-archival queries */
933 : FD_LOG_DEBUG( ( "Querying archive for block %lu", slot ) );
934 : fd_block_t block_out;
935 : int err = fd_blockstore_block_info_restore( &blockstore->shmem->archiver, fd, idx_entry, block_info_out, &block_out );
936 : if( FD_UNLIKELY( err ) ) {
937 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
938 : }
939 : uchar * block_data = fd_valloc_malloc( alloc, 128UL, block_out.data_sz );
940 : err = fd_blockstore_block_data_restore( &blockstore->shmem->archiver,
941 : fd,
942 : idx_entry,
943 : block_data,
944 : block_out.data_sz,
945 : block_out.data_sz);
946 : if( FD_UNLIKELY( err ) ) {
947 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
948 : }
949 : fd_block_idx_t * parent_idx_entry = fd_block_idx_query( block_idx, block_info_out->parent_slot, NULL );
950 : if( FD_UNLIKELY( !parent_idx_entry ) ) {
951 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
952 : }
953 : *parent_block_hash_out = parent_idx_entry->block_hash;
954 : *block_info_out = *block_info_out; /* no op */
955 : *block_rewards_out = block_out.rewards;
956 : *block_data_out = block_data;
957 : *block_data_sz_out = block_out.data_sz;
958 : return FD_BLOCKSTORE_SUCCESS;
959 : }
960 :
961 : uchar * prev_data_out = NULL;
962 : ulong prev_sz = 0;
963 : int err;
964 : for(;;) {
965 : //if( FD_UNLIKELY( fd_rwseq_start_concur_read( &blockstore->shmem->lock, &seqnum ) ) ) continue;
966 : fd_block_map_query_t quer[1] = { 0 };
967 : err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, quer, 0 );
968 : fd_block_info_t const * query = fd_block_map_query_ele( quer );
969 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN )) continue;
970 : if( FD_UNLIKELY( !query ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
971 :
972 : memcpy( block_info_out, query, sizeof( fd_block_info_t ) );
973 : ulong blk_gaddr = query->block_gaddr;
974 : if( FD_UNLIKELY( !blk_gaddr ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
975 :
976 : err = fd_block_map_query_test( quer );
977 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
978 :
979 : //if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->shmem->lock, seqnum ) ) ) continue;
980 :
981 : fd_block_t * blk = fd_wksp_laddr_fast( wksp, blk_gaddr );
982 : if( block_rewards_out ) memcpy( block_rewards_out, &blk->rewards, sizeof(fd_block_rewards_t) );
983 : ulong blk_data_gaddr = blk->data_gaddr;
984 : if( FD_UNLIKELY( !blk_data_gaddr ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
985 : ulong sz = *block_data_sz_out = blk->data_sz;
986 : if( sz >= FD_SHRED_MAX_PER_SLOT * FD_SHRED_MAX_SZ ) continue;
987 :
988 : /* batch_query no longer blocking, so no read lock */
989 :
990 : uchar * data_out;
991 : if( prev_sz >= sz ) {
992 : data_out = prev_data_out;
993 : } else {
994 : if( prev_data_out != NULL ) {
995 : fd_valloc_free( alloc, prev_data_out );
996 : }
997 : prev_data_out = data_out = fd_valloc_malloc( alloc, 128UL, sz );
998 : prev_sz = sz;
999 : }
1000 : if( FD_UNLIKELY( data_out == NULL ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1001 : /* sets data_out to block data */
1002 : ulong batch_idx = 0;
1003 : ulong batch_sz = 0;
1004 : ulong total_blk_sz = 0;
1005 : while( batch_idx <= query->slot_complete_idx ){
1006 : int err = fd_blockstore_slice_query( blockstore, slot, (uint)batch_idx, sz - total_blk_sz, data_out + total_blk_sz, &batch_sz );
1007 : if( FD_UNLIKELY( err ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1008 : total_blk_sz += batch_sz;
1009 :
1010 : if( FD_UNLIKELY( batch_idx == 0 ) ){
1011 : batch_idx = fd_block_set_const_iter_init( query->data_complete_idxs ) + 1;
1012 : } else {
1013 : batch_idx = fd_block_set_const_iter_next( query->data_complete_idxs, batch_idx - 1 ) + 1;
1014 : }
1015 : }
1016 :
1017 : /*if( FD_UNLIKELY( fd_rwseq_check_concur_read( &blockstore->shmem->lock, seqnum ) ) ) {
1018 : fd_valloc_free( alloc, data_out );
1019 : continue;
1020 : }*/
1021 :
1022 : *block_data_out = data_out;
1023 : *block_data_sz_out = total_blk_sz;
1024 :
1025 : if( parent_block_hash_out ) {
1026 : err = fd_block_map_query_try( blockstore->block_map, &block_info_out->parent_slot, NULL, quer, 0 );
1027 : query = fd_block_map_query_ele( quer );
1028 : if( query == NULL ) {
1029 : memset( parent_block_hash_out, 0, sizeof(fd_hash_t) );
1030 : } else {
1031 : fd_memcpy( parent_block_hash_out, query->block_hash.uc, sizeof(fd_hash_t) );
1032 :
1033 : err = fd_block_map_query_test( quer );
1034 :
1035 : if( err ) {
1036 : fd_valloc_free( alloc, data_out );
1037 : continue;
1038 : }
1039 : }
1040 : }
1041 :
1042 : return FD_BLOCKSTORE_SUCCESS;
1043 : }
1044 : #endif
1045 0 : }
1046 :
1047 :
1048 : int
1049 : fd_blockstore_block_map_query_volatile( fd_blockstore_t * blockstore,
1050 : int fd,
1051 : ulong slot,
1052 0 : fd_block_info_t * block_info_out ) {
1053 :
1054 : /* WARNING: this code is extremely delicate. Do NOT modify without
1055 : understanding all the invariants. In particular, we must never
1056 : dereference through a corrupt pointer. It's OK for the destination
1057 : data to be overwritten/invalid as long as the memory location is
1058 : valid. As long as we don't crash, we can validate the data after it
1059 : is read. */
1060 :
1061 0 : fd_block_idx_t * block_idx = fd_blockstore_block_idx( blockstore );
1062 :
1063 0 : ulong off = ULONG_MAX;
1064 0 : for( ;; ) {
1065 0 : fd_block_idx_t * idx_entry = fd_block_idx_query( block_idx, slot, NULL );
1066 0 : if( FD_LIKELY( idx_entry ) ) off = idx_entry->off;
1067 0 : break;
1068 0 : }
1069 :
1070 0 : if( FD_UNLIKELY( off < ULONG_MAX ) ) { /* optimize for non-archival queries */
1071 0 : if( FD_UNLIKELY( lseek( fd, (long)off, SEEK_SET ) == -1 ) ) {
1072 0 : FD_LOG_WARNING(( "failed to seek" ));
1073 0 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1074 0 : }
1075 0 : ulong rsz;
1076 0 : int err = fd_io_read( fd, block_info_out, sizeof( fd_block_info_t ), sizeof( fd_block_info_t ), &rsz );
1077 0 : if( FD_UNLIKELY( err ) ) {
1078 0 : FD_LOG_WARNING(( "failed to read block map entry" ));
1079 0 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1080 0 : }
1081 0 : return FD_BLOCKSTORE_SUCCESS;
1082 0 : }
1083 :
1084 0 : int err = FD_MAP_ERR_AGAIN;
1085 0 : while( err == FD_MAP_ERR_AGAIN ) {
1086 0 : fd_block_map_query_t quer[1] = { 0 };
1087 0 : err = fd_block_map_query_try( blockstore->block_map, &slot, NULL, quer, 0 );
1088 0 : fd_block_info_t const * query = fd_block_map_query_ele_const( quer );
1089 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY ) ) return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1090 :
1091 0 : fd_memcpy( block_info_out, query, sizeof( fd_block_info_t ) );
1092 :
1093 0 : err = fd_block_map_query_test( quer );
1094 0 : }
1095 0 : return FD_BLOCKSTORE_SUCCESS;
1096 0 : }
1097 :
1098 : fd_txn_map_t *
1099 0 : fd_blockstore_txn_query( fd_blockstore_t * blockstore, uchar const sig[FD_ED25519_SIG_SZ] ) {
1100 0 : fd_txn_key_t key;
1101 0 : fd_memcpy( &key, sig, sizeof( key ) );
1102 0 : return fd_txn_map_query( fd_blockstore_txn_map( blockstore ), &key, NULL );
1103 0 : }
1104 :
1105 : int
1106 : fd_blockstore_txn_query_volatile( fd_blockstore_t * blockstore,
1107 : int fd,
1108 : uchar const sig[FD_ED25519_SIG_SZ],
1109 : fd_txn_map_t * txn_out,
1110 : long * blk_ts,
1111 : uchar * blk_flags,
1112 0 : uchar txn_data_out[FD_TXN_MTU] ) {
1113 : /* WARNING: this code is extremely delicate. Do NOT modify without
1114 : understanding all the invariants. In particular, we must never
1115 : dereference through a corrupt pointer. It's OK for the
1116 : destination data to be overwritten/invalid as long as the memory
1117 : location is valid. As long as we don't crash, we can validate the
1118 : data after it is read. */
1119 0 : (void)blockstore;
1120 0 : (void)fd;
1121 0 : (void)sig;
1122 0 : (void)txn_out;
1123 0 : (void)blk_ts;
1124 0 : (void)blk_flags;
1125 0 : (void)txn_data_out;
1126 0 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1127 : #if BLOCK_ARCHIVING
1128 : fd_wksp_t * wksp = fd_blockstore_wksp( blockstore );
1129 : fd_txn_map_t * txn_map = fd_blockstore_txn_map( blockstore );
1130 :
1131 : for(;;) {
1132 : fd_txn_key_t key;
1133 : memcpy( &key, sig, sizeof(key) );
1134 : fd_txn_map_t const * txn_map_entry = fd_txn_map_query_safe( txn_map, &key, NULL );
1135 : if( FD_UNLIKELY( txn_map_entry == NULL ) ) return FD_BLOCKSTORE_ERR_TXN_MISSING;
1136 : memcpy( txn_out, txn_map_entry, sizeof(fd_txn_map_t) );
1137 : break;
1138 : }
1139 :
1140 : fd_block_idx_t * block_idx = fd_blockstore_block_idx( blockstore );
1141 :
1142 : ulong off = ULONG_MAX;
1143 : for(;;) {
1144 : fd_block_idx_t * idx_entry = fd_block_idx_query( block_idx, txn_out->slot, NULL );
1145 : if( FD_LIKELY( idx_entry ) ) off = idx_entry->off;
1146 : break;
1147 : }
1148 :
1149 : if ( FD_UNLIKELY( off < ULONG_MAX ) ) { /* optimize for non-archival */
1150 : if( FD_UNLIKELY( lseek( fd, (long)off, SEEK_SET ) == -1 ) ) {
1151 : FD_LOG_WARNING(( "failed to seek" ));
1152 : return FD_BLOCKSTORE_ERR_SLOT_MISSING;
1153 : }
1154 : fd_block_info_t block_info;
1155 : ulong rsz; int err;
1156 : err = fd_io_read( fd, &block_info, sizeof(fd_block_info_t), sizeof(fd_block_info_t), &rsz );
1157 : check_read_write_err( err );
1158 : err = fd_io_read( fd, txn_data_out, txn_out->sz, txn_out->sz, &rsz );
1159 : check_read_write_err( err );
1160 : err = (int)lseek( fd, (long)off + (long)txn_out->offset, SEEK_SET );
1161 : check_read_write_err( err );
1162 : err = fd_io_read( fd, txn_data_out, txn_out->sz, txn_out->sz, &rsz );
1163 : check_read_write_err( err);
1164 : return FD_BLOCKSTORE_SUCCESS;
1165 : }
1166 :
1167 : for(;;) {
1168 : fd_block_map_query_t quer[1] = { 0 };
1169 : fd_block_map_query_try( blockstore->block_map, &txn_out->slot, NULL, quer, 0 );
1170 : fd_block_info_t const * query = fd_block_map_query_ele_const( quer );
1171 :
1172 : if( FD_UNLIKELY( !query ) ) return FD_BLOCKSTORE_ERR_TXN_MISSING;
1173 : ulong blk_gaddr = query->block_gaddr;
1174 : if( FD_UNLIKELY( !blk_gaddr ) ) return FD_BLOCKSTORE_ERR_TXN_MISSING;
1175 :
1176 : if( fd_block_map_query_test( quer ) ) continue;
1177 :
1178 : fd_block_t * blk = fd_wksp_laddr_fast( wksp, blk_gaddr );
1179 : if( blk_ts ) *blk_ts = query->ts;
1180 : if( blk_flags ) *blk_flags = query->flags;
1181 : ulong ptr = blk->data_gaddr;
1182 : ulong sz = blk->data_sz;
1183 : if( txn_out->offset + txn_out->sz > sz || txn_out->sz > FD_TXN_MTU ) continue;
1184 :
1185 : if( FD_UNLIKELY( fd_block_map_query_test( quer ) ) ) continue;
1186 :
1187 : if( txn_data_out == NULL ) return FD_BLOCKSTORE_SUCCESS;
1188 : uchar const * data = fd_wksp_laddr_fast( wksp, ptr );
1189 : fd_memcpy( txn_data_out, data + txn_out->offset, txn_out->sz );
1190 :
1191 : if( FD_UNLIKELY( fd_block_map_query_test( quer ) ) ) continue;
1192 :
1193 : return FD_BLOCKSTORE_SUCCESS;
1194 : }
1195 : #endif
1196 0 : }
1197 :
1198 : void
1199 0 : fd_blockstore_block_height_update( fd_blockstore_t * blockstore, ulong slot, ulong height ) {
1200 0 : fd_block_map_query_t query[1] = { 0 };
1201 : // TODO make nonblocking
1202 0 : int err = fd_block_map_prepare( blockstore->block_map, &slot, NULL, query, FD_MAP_FLAG_BLOCKING );
1203 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
1204 0 : if( FD_UNLIKELY( err || block_info->slot != slot ) ) return;
1205 0 : block_info->block_height = height;
1206 0 : fd_block_map_publish( query );
1207 0 : }
1208 :
1209 : void
1210 0 : fd_blockstore_log_block_status( fd_blockstore_t * blockstore, ulong around_slot ) {
1211 0 : fd_block_map_query_t query[1] = { 0 };
1212 0 : uint received_idx = 0;
1213 0 : uint buffered_idx = 0;
1214 0 : uint slot_complete_idx = 0;
1215 :
1216 0 : for( ulong i = around_slot - 5; i < around_slot + 20; ++i ) {
1217 0 : int err = FD_MAP_ERR_AGAIN;
1218 0 : while( err == FD_MAP_ERR_AGAIN ){
1219 0 : err = fd_block_map_query_try( blockstore->block_map, &i, NULL, query, 0 );
1220 0 : fd_block_info_t * slot_entry = fd_block_map_query_ele( query );
1221 0 : if( err == FD_MAP_ERR_KEY ) break;
1222 0 : if( err == FD_MAP_ERR_AGAIN ) continue;
1223 0 : received_idx = slot_entry->received_idx;
1224 0 : buffered_idx = slot_entry->buffered_idx;
1225 0 : slot_complete_idx = slot_entry->slot_complete_idx;
1226 0 : err = fd_block_map_query_test( query );
1227 0 : if( err == FD_MAP_ERR_KEY ) break;
1228 0 : }
1229 :
1230 0 : if( err == FD_MAP_ERR_KEY ) continue;
1231 :
1232 0 : FD_LOG_NOTICE(( "%sslot=%lu received=%u consumed=%u finished=%u",
1233 0 : ( i == around_slot ? "*" : " " ),
1234 0 : i,
1235 0 : received_idx,
1236 0 : buffered_idx,
1237 0 : slot_complete_idx ));
1238 0 : }
1239 0 : }
1240 :
1241 : static char *
1242 0 : fd_smart_size( ulong sz, char * tmp, size_t tmpsz ) {
1243 0 : if( sz <= (1UL<<7) )
1244 0 : snprintf( tmp, tmpsz, "%lu B", sz );
1245 0 : else if( sz <= (1UL<<17) )
1246 0 : snprintf( tmp, tmpsz, "%.3f KB", ((double)sz/((double)(1UL<<10))) );
1247 0 : else if( sz <= (1UL<<27) )
1248 0 : snprintf( tmp, tmpsz, "%.3f MB", ((double)sz/((double)(1UL<<20))) );
1249 0 : else
1250 0 : snprintf( tmp, tmpsz, "%.3f GB", ((double)sz/((double)(1UL<<30))) );
1251 0 : return tmp;
1252 0 : }
1253 :
1254 : void
1255 0 : fd_blockstore_log_mem_usage( fd_blockstore_t * blockstore ) {
1256 0 : char tmp1[100];
1257 :
1258 0 : FD_LOG_NOTICE(( "blockstore base footprint: %s",
1259 0 : fd_smart_size( sizeof(fd_blockstore_t), tmp1, sizeof(tmp1) ) ));
1260 0 : ulong shred_max = fd_buf_shred_pool_ele_max( blockstore->shred_pool );
1261 0 : FD_LOG_NOTICE(( "shred pool footprint: %s %lu entries)",
1262 0 : fd_smart_size( fd_buf_shred_pool_footprint(), tmp1, sizeof(tmp1) ),
1263 0 : shred_max ));
1264 0 : ulong shred_map_cnt = fd_buf_shred_map_chain_cnt( blockstore->shred_map );
1265 0 : FD_LOG_NOTICE(( "shred map footprint: %s (%lu chains, load is %.3f)",
1266 0 : fd_smart_size( fd_buf_shred_map_footprint( shred_map_cnt ), tmp1, sizeof(tmp1) ),
1267 0 : shred_map_cnt,
1268 0 : (double)shred_map_cnt) );
1269 :
1270 : /*fd_block_info_t * slot_map = fd_blockstore_block_map( blockstore );
1271 : ulong slot_map_cnt = fd_block_map_key_cnt( slot_map );
1272 : ulong slot_map_max = fd_block_map_key_max( slot_map );
1273 : FD_LOG_NOTICE(( "slot map footprint: %s (%lu entries used out of %lu, %lu%%)",
1274 : fd_smart_size( fd_block_map_footprint( slot_map_max ), tmp1, sizeof(tmp1) ),
1275 : slot_map_cnt,
1276 : slot_map_max,
1277 : (100U*slot_map_cnt)/slot_map_max )); */
1278 :
1279 0 : fd_txn_map_t * txn_map = fd_blockstore_txn_map( blockstore );
1280 0 : ulong txn_map_cnt = fd_txn_map_key_cnt( txn_map );
1281 0 : ulong txn_map_max = fd_txn_map_key_max( txn_map );
1282 0 : FD_LOG_NOTICE(( "txn map footprint: %s (%lu entries used out of %lu, %lu%%)",
1283 0 : fd_smart_size( fd_txn_map_footprint( txn_map_max ), tmp1, sizeof(tmp1) ),
1284 0 : txn_map_cnt,
1285 0 : txn_map_max,
1286 0 : (100U*txn_map_cnt)/txn_map_max ));
1287 0 : ulong block_cnt = 0;
1288 :
1289 0 : ulong * q = fd_blockstore_slot_deque( blockstore );
1290 0 : fd_slot_deque_remove_all( q );
1291 0 : fd_slot_deque_push_tail( q, blockstore->shmem->wmk );
1292 0 : while( !fd_slot_deque_empty( q ) ) {
1293 0 : ulong curr = fd_slot_deque_pop_head( q );
1294 :
1295 0 : fd_block_map_query_t query[1] = { 0 };
1296 0 : int err = fd_block_map_query_try( blockstore->block_map, &curr, NULL, query, FD_MAP_FLAG_BLOCKING );
1297 0 : fd_block_info_t * block_info = fd_block_map_query_ele( query );
1298 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_KEY || !block_info ) ) continue;
1299 :
1300 0 : for( ulong i = 0; i < block_info->child_slot_cnt; i++ ) {
1301 0 : fd_slot_deque_push_tail( q, block_info->child_slots[i] );
1302 0 : }
1303 0 : }
1304 :
1305 0 : if( block_cnt )
1306 0 : FD_LOG_NOTICE(( "block cnt: %lu",
1307 0 : block_cnt ));
1308 0 : }
|