Line data Source code
1 : #ifndef HEADER_fd_src_choreo_forest_fd_forest_h
2 : #define HEADER_fd_src_choreo_forest_fd_forest_h
3 :
4 : /* Forest is an API for repairing blocks as they are discovered from the
5 : cluster via Turbine or Gossip. Shreds (from Turbine) and votes (from
6 : Gossip) inform forest that a block with the given slot they are
7 : associated with exists. Blk repair ensures that this block is
8 : received in its entirety by requesting repairs for missing shreds for
9 : the block.
10 :
11 : Like other fork-aware structures, forest maintains a tree that
12 : records the ancestry of slots. It also maintains a frontier, which
13 : models the leaves of the tree ie. the oldest (in ancestry) blocks
14 : that still need to be repaired (across multiple forks).
15 :
16 : Forest constructs the ancestry tree backwards, and then repairs the
17 : tree forwards (using BFS). */
18 :
19 : /* FD_FOREST_USE_HANDHOLDING: Define this to non-zero at compile time
20 : to turn on additional runtime checks and logging. */
21 :
22 : #include "../../disco/fd_disco_base.h"
23 :
24 : #ifndef FD_FOREST_USE_HANDHOLDING
25 : #define FD_FOREST_USE_HANDHOLDING 1
26 : #endif
27 :
28 0 : #define FD_FOREST_VER_UNINIT (0UL)
29 0 : #define FD_FOREST_VER_INVAL (ULONG_MAX)
30 :
31 0 : #define FD_FOREST_MAGIC (0xf17eda2ce7b1c0UL) /* firedancer forest version 0 */
32 :
33 : #define SET_NAME fd_forest_blk_idxs
34 0 : #define SET_MAX FD_SHRED_BLK_MAX
35 : #include "../../util/tmpl/fd_set.c"
36 :
37 : /* fd_forest_blk_t implements a left-child, right-sibling n-ary
38 : tree. Each ele maintains the `pool` index of its left-most child
39 : (`child_idx`), its immediate-right sibling (`sibling_idx`), and its
40 : parent (`parent_idx`).
41 :
42 : This tree structure is gaddr-safe and supports accesses and
43 : operations from processes with separate local forest joins. */
44 :
45 : struct __attribute__((aligned(128UL))) fd_forest_blk {
46 : ulong slot; /* map key */
47 : ulong parent_slot; /* map key of the parent. invariant: if parent is populated, parent_slot is populated. the converse is not necessarily true. */
48 : ulong next; /* internal use by fd_pool, fd_map_chain */
49 : ulong parent; /* pool idx of the parent in the tree */
50 : ulong child; /* pool idx of the left-child */
51 : ulong sibling; /* pool idx of the right-sibling */
52 :
53 : uint consumed_idx; /* highest contiguous consumed shred idx */
54 : uint buffered_idx; /* highest contiguous buffered shred idx */
55 : uint complete_idx; /* shred_idx with SLOT_COMPLETE_FLAG ie. last shred idx in the slot */
56 :
57 : fd_forest_blk_idxs_t fecs[fd_forest_blk_idxs_word_cnt]; /* fec set idxs */
58 : fd_forest_blk_idxs_t idxs[fd_forest_blk_idxs_word_cnt]; /* data shred idxs */
59 : fd_forest_blk_idxs_t cmpl[fd_forest_blk_idxs_word_cnt]; /* last shred idx of every FEC set that has been completed by shred_tile */
60 :
61 : /* i.e. when fecs == cmpl, the slot is truly complete and everything
62 : is contained in fec store. Look at fec_clear for more details.*/
63 :
64 : /* Metrics */
65 :
66 : fd_forest_blk_idxs_t code[fd_forest_blk_idxs_word_cnt]; /* code shred idxs */
67 : long first_shred_ts; /* timestamp of first shred rcved in slot != complete_idx */
68 : long first_req_ts; /* timestamp of first request received in slot != complete_idx */
69 : uint turbine_cnt; /* number of shreds received from turbine */
70 : uint repair_cnt; /* number of data shreds received from repair */
71 : };
72 : typedef struct fd_forest_blk fd_forest_blk_t;
73 :
74 : #define POOL_NAME fd_forest_pool
75 0 : #define POOL_T fd_forest_blk_t
76 : #include "../../util/tmpl/fd_pool.c"
77 :
78 : #define MAP_NAME fd_forest_ancestry
79 : #define MAP_ELE_T fd_forest_blk_t
80 0 : #define MAP_KEY slot
81 : #include "../../util/tmpl/fd_map_chain.c"
82 :
83 : #define MAP_NAME fd_forest_frontier
84 : #define MAP_ELE_T fd_forest_blk_t
85 0 : #define MAP_KEY slot
86 : #include "../../util/tmpl/fd_map_chain.c"
87 :
88 : #define MAP_NAME fd_forest_orphaned
89 : #define MAP_ELE_T fd_forest_blk_t
90 0 : #define MAP_KEY slot
91 : #include "../../util/tmpl/fd_map_chain.c"
92 :
93 : #define MAP_NAME fd_forest_subtrees
94 : #define MAP_ELE_T fd_forest_blk_t
95 0 : #define MAP_KEY slot
96 : #include "../../util/tmpl/fd_map_chain.c"
97 :
98 : struct fd_forest_cns {
99 : ulong slot;
100 : ulong forest_pool_idx;
101 : ulong next;
102 : };
103 : typedef struct fd_forest_cns fd_forest_cns_t;
104 :
105 : #define MAP_NAME fd_forest_consumed
106 : #define MAP_ELE_T fd_forest_cns_t
107 0 : #define MAP_KEY slot
108 : #include "../../util/tmpl/fd_map_chain.c"
109 :
110 : #define POOL_NAME fd_forest_conspool
111 0 : #define POOL_T fd_forest_cns_t
112 : #include "../../util/tmpl/fd_pool.c"
113 :
114 : /* Internal use only for BFSing */
115 : #define DEQUE_NAME fd_forest_deque
116 0 : #define DEQUE_T ulong
117 : #include "../../util/tmpl/fd_deque_dynamic.c"
118 :
119 :
120 : /* fd_forest_t is the top-level structure that holds the root of
121 : the tree, as well as the memory pools and map structures.
122 :
123 : These structures are bump-allocated and laid out contiguously in
124 : memory from the fd_forest_t * pointer which points to the
125 : beginning of the memory region.
126 :
127 : --------------------- <- fd_forest_t *
128 : | metadata |
129 : |-------------------|
130 : | pool |
131 : |-------------------|
132 : | ancestry |
133 : |-------------------|
134 : | frontier |
135 : |-------------------|
136 : | subtrees |
137 : |-------------------|
138 : | orphaned |
139 : ---------------------
140 :
141 :
142 : A valid, initialized forest is always non-empty. After
143 : `fd_forest_init` the forest will always have a root ele unless
144 : modified improperly out of forest's API.*/
145 :
146 : struct __attribute__((aligned(128UL))) fd_forest {
147 : ulong root; /* pool idx of the root */
148 : ulong iter; /* pool idx of the iterator */
149 : ulong wksp_gaddr; /* wksp gaddr of fd_forest in the backing wksp, non-zero gaddr */
150 : ulong ver_gaddr; /* wksp gaddr of version fseq, incremented on write ops */
151 : ulong pool_gaddr; /* wksp gaddr of fd_pool */
152 : ulong ancestry_gaddr; /* wksp_gaddr of fd_forest_ancestry */
153 : ulong frontier_gaddr; /* leaves that needs repair */
154 : ulong subtrees_gaddr; /* head of orphaned trees */
155 : ulong orphaned_gaddr; /* map of parent_slot to singly-linked list of ele orphaned by that parent slot */
156 :
157 : ulong consumed_gaddr; /* map of slot to pool idx of the completed repair frontier */
158 : ulong conspool_gaddr; /* wksp gaddr of fd_forest_consumed_pool */
159 : ulong deque_gaddr; /* wksp gaddr of fd_forest_deque. internal use only for BFSing */
160 : ulong magic; /* ==FD_FOREST_MAGIC */
161 : };
162 : typedef struct fd_forest fd_forest_t;
163 :
164 : FD_PROTOTYPES_BEGIN
165 :
166 : /* Constructors */
167 :
168 : /* fd_forest_{align,footprint} return the required alignment and
169 : footprint of a memory region suitable for use as forest with up to
170 : ele_max eles and vote_max votes. */
171 :
172 : FD_FN_CONST static inline ulong
173 0 : fd_forest_align( void ) {
174 0 : return alignof(fd_forest_t);
175 0 : }
176 :
177 : FD_FN_CONST static inline ulong
178 0 : fd_forest_footprint( ulong ele_max ) {
179 0 : return FD_LAYOUT_FINI(
180 0 : FD_LAYOUT_APPEND(
181 0 : FD_LAYOUT_APPEND(
182 0 : FD_LAYOUT_APPEND(
183 0 : FD_LAYOUT_APPEND(
184 0 : FD_LAYOUT_APPEND(
185 0 : FD_LAYOUT_APPEND(
186 0 : FD_LAYOUT_APPEND(
187 0 : FD_LAYOUT_APPEND(
188 0 : FD_LAYOUT_APPEND(
189 0 : FD_LAYOUT_APPEND(
190 0 : FD_LAYOUT_INIT,
191 0 : alignof(fd_forest_t), sizeof(fd_forest_t) ),
192 0 : fd_fseq_align(), fd_fseq_footprint() ),
193 0 : fd_forest_pool_align(), fd_forest_pool_footprint ( ele_max ) ),
194 0 : fd_forest_ancestry_align(), fd_forest_ancestry_footprint( ele_max ) ),
195 0 : fd_forest_frontier_align(), fd_forest_frontier_footprint( ele_max ) ),
196 0 : fd_forest_subtrees_align(), fd_forest_subtrees_footprint( ele_max ) ),
197 0 : fd_forest_orphaned_align(), fd_forest_orphaned_footprint( ele_max ) ),
198 0 : fd_forest_consumed_align(), fd_forest_consumed_footprint( ele_max ) ), /* TODO: we can size this a LOT smaller than ele_max */
199 0 : fd_forest_conspool_align(), fd_forest_conspool_footprint( ele_max ) ), /* TODO: we can size this a LOT smaller than ele_max */
200 0 : fd_forest_deque_align(), fd_forest_deque_footprint ( ele_max ) ),
201 0 : fd_forest_align() );
202 0 : }
203 :
204 : /* fd_forest_new formats an unused memory region for use as a
205 : forest. mem is a non-NULL pointer to this region in the local
206 : address space with the required footprint and alignment. */
207 :
208 : void *
209 : fd_forest_new( void * shmem, ulong ele_max, ulong seed );
210 :
211 : /* fd_forest_join joins the caller to the forest. forest
212 : points to the first byte of the memory region backing the forest
213 : in the caller's address space. Returns a pointer in the local
214 : address space to forest on success. */
215 :
216 : fd_forest_t *
217 : fd_forest_join( void * forest );
218 :
219 : /* fd_forest_leave leaves a current local join. Returns a pointer
220 : to the underlying shared memory region on success and NULL on failure
221 : (logs details). Reasons for failure include forest is NULL. */
222 :
223 : void *
224 : fd_forest_leave( fd_forest_t const * forest );
225 :
226 : /* fd_forest_delete unformats a memory region used as a
227 : forest. Assumes only the nobody is joined to the region.
228 : Returns a pointer to the underlying shared memory region or NULL if
229 : used obviously in error (e.g. forest is obviously not a
230 : forest ... logs details). The ownership of the memory region is
231 : transferred to the caller. */
232 :
233 : void *
234 : fd_forest_delete( void * forest );
235 :
236 : /* fd_forest_init initializes a forest. Assumes forest
237 : is a valid local join and no one else is joined. root is the initial
238 : root forest will use. This is the snapshot slot if booting from
239 : a snapshot, 0 if the genesis slot.
240 :
241 : In general, this should be called by the same process that formatted
242 : forest's memory, ie. the caller of fd_forest_new. */
243 :
244 : fd_forest_t *
245 : fd_forest_init( fd_forest_t * forest, ulong root );
246 :
247 : /* fd_forest_fini finishes an forest. Assumes forest is
248 : a valid local join and no one else is joined. */
249 :
250 : fd_forest_t *
251 : fd_forest_fini( fd_forest_t * forest );
252 :
253 : /* Accessors */
254 :
255 : /* fd_forest_wksp returns the local join to the wksp backing the
256 : forest. The lifetime of the returned pointer is at least as
257 : long as the lifetime of the local join. Assumes forest is a
258 : current local join. */
259 :
260 : FD_FN_PURE static inline fd_wksp_t *
261 0 : fd_forest_wksp( fd_forest_t const * forest ) {
262 0 : return (fd_wksp_t *)( ( (ulong)forest ) - forest->wksp_gaddr );
263 0 : }
264 :
265 : /* fd_forest_{ver, ver_const} returns the local join to the version
266 : number fseq. The lifetime of the returned pointer is at least as
267 : long as the lifetime of the local join. Assumes forest is a
268 : current local join. If value is ULONG_MAX, ghost is uninitialized or
269 : invalid. Query pre- & post-read:
270 :
271 : odd: if either pre or post is odd, discard read.
272 : even: if pre == post, read is consistent. */
273 :
274 : FD_FN_PURE static inline ulong *
275 0 : fd_forest_ver( fd_forest_t * forest ) {
276 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->ver_gaddr );
277 0 : }
278 :
279 : FD_FN_PURE static inline ulong const *
280 0 : fd_forest_ver_const( fd_forest_t const * forest ) {
281 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->ver_gaddr );
282 0 : }
283 :
284 : /* fd_forest_{pool, pool_const} returns a pointer in the caller's address
285 : space to forest's element pool. */
286 :
287 : FD_FN_PURE static inline fd_forest_blk_t *
288 0 : fd_forest_pool( fd_forest_t * forest ) {
289 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->pool_gaddr );
290 0 : }
291 :
292 : FD_FN_PURE static inline fd_forest_blk_t const *
293 0 : fd_forest_pool_const( fd_forest_t const * forest ) {
294 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->pool_gaddr );
295 0 : }
296 :
297 : /* fd_forest_{ancestry, ancestry_const} returns a pointer in the caller's
298 : address space to forest's ancestry map. */
299 :
300 : FD_FN_PURE static inline fd_forest_ancestry_t *
301 0 : fd_forest_ancestry( fd_forest_t * forest ) {
302 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->ancestry_gaddr );
303 0 : }
304 :
305 : FD_FN_PURE static inline fd_forest_ancestry_t const *
306 0 : fd_forest_ancestry_const( fd_forest_t const * forest ) {
307 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->ancestry_gaddr );
308 0 : }
309 :
310 : /* fd_forest_{frontier, frontier_const} returns a pointer in the caller's
311 : address space to forest's frontier map. */
312 :
313 : FD_FN_PURE static inline fd_forest_frontier_t *
314 0 : fd_forest_frontier( fd_forest_t * forest ) {
315 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->frontier_gaddr );
316 0 : }
317 :
318 : FD_FN_PURE static inline fd_forest_frontier_t const *
319 0 : fd_forest_frontier_const( fd_forest_t const * forest ) {
320 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->frontier_gaddr );
321 0 : }
322 :
323 : /* fd_forest_{subtrees, subtrees_const} returns a pointer in the caller's
324 : address space to forest's subtrees map. */
325 :
326 : FD_FN_PURE static inline fd_forest_subtrees_t *
327 0 : fd_forest_subtrees( fd_forest_t * forest ) {
328 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->subtrees_gaddr );
329 0 : }
330 :
331 : FD_FN_PURE static inline fd_forest_subtrees_t const *
332 0 : fd_forest_subtrees_const( fd_forest_t const * forest ) {
333 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->subtrees_gaddr );
334 0 : }
335 :
336 : /* fd_forest_{orphaned, orphaned_const} returns a pointer in the caller's
337 : address space to forest's orphaned map. */
338 :
339 : FD_FN_PURE static inline fd_forest_orphaned_t *
340 0 : fd_forest_orphaned( fd_forest_t * forest ) {
341 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->orphaned_gaddr );
342 0 : }
343 :
344 : FD_FN_PURE static inline fd_forest_orphaned_t const *
345 0 : fd_forest_orphaned_const( fd_forest_t const * forest ) {
346 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->orphaned_gaddr );
347 0 : }
348 :
349 : /* fd_forest_{consumed, consumed_const} returns a pointer in the caller's
350 : address space to forest's consumed map. */
351 :
352 : FD_FN_PURE static inline fd_forest_consumed_t *
353 0 : fd_forest_consumed( fd_forest_t * forest ) {
354 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->consumed_gaddr );
355 0 : }
356 :
357 : FD_FN_PURE static inline fd_forest_consumed_t const *
358 0 : fd_forest_consumed_const( fd_forest_t const * forest ) {
359 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->consumed_gaddr );
360 0 : }
361 :
362 : /* fd_forest_{conspool, conspool_const} returns a pointer in the caller's
363 : address space to forest's conspool pool. */
364 :
365 : FD_FN_PURE static inline fd_forest_cns_t *
366 0 : fd_forest_conspool( fd_forest_t * forest ) {
367 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->conspool_gaddr );
368 0 : }
369 :
370 : FD_FN_PURE static inline fd_forest_cns_t const *
371 0 : fd_forest_conspool_const( fd_forest_t const * forest ) {
372 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->conspool_gaddr );
373 0 : }
374 :
375 : /* fd_forest_root_slot returns forest's root slot. Assumes
376 : forest is a current local join. */
377 :
378 : FD_FN_PURE static inline ulong
379 0 : fd_forest_root_slot( fd_forest_t const * forest ) {
380 0 : if( FD_UNLIKELY( forest->root == fd_forest_pool_idx_null( fd_forest_pool_const( forest ) ) )) return ULONG_MAX; /* uninitialized */
381 0 : return fd_forest_pool_ele_const( fd_forest_pool_const( forest ), forest->root )->slot;
382 0 : }
383 :
384 : fd_forest_blk_t *
385 : fd_forest_query( fd_forest_t * forest, ulong slot );
386 :
387 : /* Operations */
388 :
389 : /* fd_forest_blk_insert inserts a new block into the forest. Assumes
390 : slot >= forest->smr, and the blk pool has a free element (if
391 : handholding is enabled, explicitly checks and errors). This blk
392 : insert is idempotent, and can be called multiple times with the same
393 : slot. Returns the inserted forest ele. */
394 :
395 : fd_forest_blk_t *
396 : fd_forest_blk_insert( fd_forest_t * forest, ulong slot, ulong parent_slot );
397 :
398 0 : #define SHRED_SRC_TURBINE 0
399 0 : #define SHRED_SRC_REPAIR 1
400 0 : #define SHRED_SRC_RECOVERED 2
401 :
402 : /* fd_forest_shred_insert inserts a new shred into the forest.
403 : Assumes slot is already in forest, and should typically be called
404 : directly after fd_forest_block_insert. Returns the forest ele
405 : corresponding to the shred slot. */
406 :
407 : fd_forest_blk_t *
408 : fd_forest_data_shred_insert( fd_forest_t * forest, ulong slot, ulong parent_slot, uint shred_idx, uint fec_set_idx, int slot_complete, int src );
409 :
410 : fd_forest_blk_t *
411 : fd_forest_code_shred_insert( fd_forest_t * forest, ulong slot, uint shred_idx );
412 :
413 : /* fd_forest_fec_insert inserts a new fully completed FEC set into the
414 : forest. Assumes slot is already in forest, and should typically be
415 : called directly after fd_forest_block_insert. Returns the forest ele
416 : corresponding to the shred slot. */
417 :
418 : fd_forest_blk_t *
419 : fd_forest_fec_insert( fd_forest_t * forest, ulong slot, ulong parent_slot, uint last_shred_idx, uint fec_set_idx, int slot_complete );
420 :
421 : /* fd_forest_fec_clear clears the FEC set at the given slot and
422 : fec_set_idx.
423 : Can fec_clear break consumed frontier invariants? No. Why?
424 :
425 : 1. consumed frontier moves past slot n iff
426 : child && (we have received evidence of a child of slot n)
427 : head->complete_idx != UINT_MAX && head->complete_idx == head->buffered_idx && (all shreds of a slot have been received)
428 : 0==memcmp( head->cmpl, head->fecs, sizeof(fd_forest_blk_idxs_t) * fd_forest_blk_idxs_word_cnt ) (our fec completion record matches the observed FECs)
429 :
430 : First we show that we must have a fec completion for each FEC in
431 : slot `n` in order for the consumed frontier to move past `n`.
432 :
433 : If we have received all the shreds of `n`, we must have all FEC
434 : sets idxs of `n` recorded in the fecs bitset. The cmpl bitset
435 : is updated only on fd_forest_fec_insert, which is only called on
436 : fec_completes message from shed tile.
437 :
438 : Which means we must have received fec completion for all the FEC
439 : sets of `n` from shred tile in order for the cmpl bitset to
440 : match the fecs bitset, for the consumed to move past `n`.
441 :
442 : Since we only call fd_forest_fec_insert on the fec_completes
443 : message from shred tile, this means fec_resolver must have
444 : COMPLETED each FEC set of slot n at least once. After the first
445 : fec_completes for each FEC set in `n`, the fec_resolver will
446 : have the FEC sets of `n` in the done_map of the fec_resolver.
447 : This prevents any more duplicate fec_completes for `n`
448 : happening for usually a buffer of 65k fec sets, until they get
449 : evicted from the done_map.
450 :
451 : Now consider that we have moved the consumed frontier past slot
452 : n.
453 :
454 : It is technically possible for us to receive again a FEC set of
455 : slot n many many slots later, after the done_map has erased all
456 : records of the FEC sets of slot n. This is not a problem.
457 : 1) If slot n is not in scope of the forest root, then
458 : fec_resolver will build a ctx for that FEC set, but repair
459 : will ignore any messages related to that FEC set, as the slot
460 : <= forest_root. Eventually the fec_resolver will either
461 : complete or evict the ctx, in which either the fec_completes
462 : or fec_clear msg will be ignored by repair.
463 : 2) If slot n is in scope of the forest root, then the shred
464 : delivered to repair will trigger a data_shred_insert call
465 : that does nothing, as repair already has record of that
466 : shred. Eventually the fec_completes or fec_clear msg will be
467 : delivered to repair. fec_insert will do nothing. fec_clear
468 : will remove the idxs for the shreds from the bitset, and
469 : update the buffered_idx. This doesn't matter though! because
470 : we already have moved past slot n on the consumed frontier.
471 : No need to request those shreds again. */
472 :
473 : void
474 : fd_forest_fec_clear( fd_forest_t * forest, ulong slot, uint fec_set_idx, uint max_shred_idx );
475 :
476 : /* fd_forest_publish publishes slot as the new forest root, setting
477 : the subtree beginning from slot as the new forest tree (ie. slot
478 : and all its descendants). Prunes all eles not in slot's forest.
479 : Assumes slot is present in forest. Returns the new root. */
480 :
481 : fd_forest_blk_t const *
482 : fd_forest_publish( fd_forest_t * forest, ulong slot );
483 :
484 : struct fd_forest_iter {
485 : ulong ele_idx;
486 : uint shred_idx;
487 : ulong frontier_ver; /* the frontier version number of forest at time of initialization */
488 : fd_forest_consumed_iter_t head; /* the frontier node "root" of our current iteration, provided NO insertions or deletions in the frontier. */
489 : };
490 : typedef struct fd_forest_iter fd_forest_iter_t;
491 :
492 : /* fd_forest_iter_* supports iteration over a frontier node of the
493 : forest and its children. iter_init selects the frontier_iter_init
494 : node from the frontier. iter_next advances the iterator to the next
495 : shred currently not yet existing in the forest, and will always
496 : choose the left most child to iterate down. This iterator is safe
497 : with concurrent query/insert/remove. If the forest has not been
498 : modified, the iterator will continue down all frontier nodes. If not,
499 : the iterator will return done.
500 :
501 : An iterator is done when the ele_idx is null, i.e. the leaf of the
502 : original selected frontier node has been reached.
503 :
504 : An iterator signifies to the repair tile to request the
505 : highest_window_index when the ele_idx is not null and shred_idx is
506 : UINT_MAX.
507 :
508 : Otherwise, the iterator signifies to the repair tile to request a
509 : regular shred window_idx.
510 :
511 : Caller should generally have a timeout that resets the iterator. In
512 : addition, since the iterator always chooses the leftmost child,
513 : reaching new forks under one frontier node relies on repair reponses
514 : -> shreds being inserted. Thus the frontier nodes can advance to the
515 : slot where the fork branched. */
516 :
517 : fd_forest_iter_t
518 : fd_forest_iter_init( fd_forest_t * forest );
519 :
520 : fd_forest_iter_t
521 : fd_forest_iter_next( fd_forest_iter_t iter, fd_forest_t const * forest );
522 :
523 : int
524 : fd_forest_iter_done( fd_forest_iter_t iter, fd_forest_t const * forest );
525 :
526 : /* Misc */
527 :
528 : /* fd_forest_verify checks the forest is not obviously corrupt.
529 : Returns 0 if verify succeeds, -1 otherwise. */
530 :
531 : int
532 : fd_forest_verify( fd_forest_t const * forest );
533 :
534 : void
535 : fd_forest_frontier_print( fd_forest_t const * forest );
536 :
537 : /* fd_forest_print pretty-prints a formatted forest tree. Printing begins
538 : from `ele` (it will appear as the root in the print output).
539 :
540 : The most straightforward and commonly used printing pattern is:
541 : `fd_forest_print( forest, fd_forest_root( forest ) )`
542 :
543 : This would print forest beginning from the root.
544 :
545 : Alternatively, caller can print a more localized view, for example
546 : starting from the grandparent of the most recently executed slot:
547 :
548 : ```
549 : fd_forest_blk_t const * ele = fd_forest_query( slot );
550 : fd_forest_print( forest, fd_forest_parent( fd_forest_parent( ele ) ) )
551 : ```
552 :
553 : Callers should add null-checks as appropriate in actual usage. */
554 :
555 : void
556 : fd_forest_print( fd_forest_t const * forest );
557 :
558 : FD_PROTOTYPES_END
559 :
560 : #endif /* HEADER_fd_src_choreo_forest_fd_forest_h */
|