Line data Source code
1 : #ifndef HEADER_fd_src_discof_forest_fd_forest_h
2 : #define HEADER_fd_src_discof_forest_fd_forest_h
3 :
4 : /* Forest is an API for repairing blocks as they are discovered from the
5 : cluster via Turbine or Gossip. Shreds (from Turbine) and votes (from
6 : Gossip) inform forest that a block with the given slot they are
7 : associated with exists. Blk repair ensures that this block is
8 : received in its entirety by requesting repairs for missing shreds for
9 : the block.
10 :
11 : Like other fork-aware structures, forest maintains a tree that
12 : records the ancestry of slots. It also maintains a frontier, which
13 : models the leaves of the tree ie. the oldest (in ancestry) blocks
14 : that still need to be repaired (across multiple forks).
15 :
16 : Forest constructs the ancestry tree backwards, and then repairs the
17 : tree forwards (using BFS). */
18 :
19 : /* FD_FOREST_USE_HANDHOLDING: Define this to non-zero at compile time
20 : to turn on additional runtime checks and logging. */
21 :
22 : #include "../../disco/fd_disco_base.h"
23 :
24 : #ifndef FD_FOREST_USE_HANDHOLDING
25 : #define FD_FOREST_USE_HANDHOLDING 1
26 : #endif
27 :
28 0 : #define FD_FOREST_VER_UNINIT (0UL)
29 0 : #define FD_FOREST_VER_INVAL (ULONG_MAX)
30 :
31 0 : #define FD_FOREST_MAGIC (0xf17eda2ce7b1c0UL) /* firedancer forest version 0 */
32 :
33 : #define SET_NAME fd_forest_blk_idxs
34 0 : #define SET_MAX FD_SHRED_BLK_MAX
35 : #include "../../util/tmpl/fd_set.c"
36 :
37 : /* fd_forest_blk_t implements a left-child, right-sibling n-ary
38 : tree. Each ele maintains the `pool` index of its left-most child
39 : (`child_idx`), its immediate-right sibling (`sibling_idx`), and its
40 : parent (`parent_idx`).
41 :
42 : This tree structure is gaddr-safe and supports accesses and
43 : operations from processes with separate local forest joins. */
44 :
45 : struct __attribute__((aligned(128UL))) fd_forest_blk {
46 : ulong slot; /* map key */
47 : ulong parent_slot; /* map key of the parent. invariant: if parent is populated, parent_slot is populated. the converse is not necessarily true. */
48 : ulong next; /* internal use by fd_pool, fd_map_chain */
49 : ulong parent; /* pool idx of the parent in the tree */
50 : ulong child; /* pool idx of the left-child */
51 : ulong sibling; /* pool idx of the right-sibling */
52 :
53 : ulong head; /* reserved by dlist. not all blks will be part of a dlist. */
54 : ulong tail; /* reserved by dlist */
55 :
56 : uint consumed_idx; /* highest contiguous fec-completed shred idx */
57 : uint buffered_idx; /* highest contiguous buffered shred idx */
58 : uint complete_idx; /* shred_idx with SLOT_COMPLETE_FLAG ie. last shred idx in the slot */
59 :
60 : fd_forest_blk_idxs_t fecs[fd_forest_blk_idxs_word_cnt]; /* fec set idxs - 1, or the idx of the last shred in every FEC set */
61 : fd_forest_blk_idxs_t idxs[fd_forest_blk_idxs_word_cnt]; /* data shred idxs */
62 : fd_forest_blk_idxs_t cmpl[fd_forest_blk_idxs_word_cnt]; /* last shred idx of every FEC set that has been completed by shred_tile */
63 :
64 : /* i.e. when fecs == cmpl, the slot is truly complete and everything
65 : is contained in fec store. Look at fec_clear for more details.*/
66 :
67 : int est_buffered_tick_recv; /* tick of shred at buffered_idx. Note since we don't track all the
68 : ticks received, this will be a lower bound estimate on the highest tick we have seen.
69 : But this is only used for limiting eager repair, so an exact value is not necessary. */
70 :
71 : /* Metrics */
72 :
73 : fd_forest_blk_idxs_t code[fd_forest_blk_idxs_word_cnt]; /* code shred idxs */
74 : long first_shred_ts; /* tick of first shred rcved in slot != complete_idx */
75 : long first_req_ts; /* tick of first request sent in slot != complete_idx */
76 : uint turbine_cnt; /* number of shreds received from turbine */
77 : uint repair_cnt; /* number of data shreds received from repair */
78 : uint recovered_cnt; /* number of shreds recovered from reedsol recovery */
79 : };
80 : typedef struct fd_forest_blk fd_forest_blk_t;
81 :
82 : #define POOL_NAME fd_forest_pool
83 0 : #define POOL_T fd_forest_blk_t
84 : #include "../../util/tmpl/fd_pool.c"
85 :
86 : #define MAP_NAME fd_forest_ancestry
87 : #define MAP_ELE_T fd_forest_blk_t
88 0 : #define MAP_KEY slot
89 : #include "../../util/tmpl/fd_map_chain.c"
90 :
91 : #define MAP_NAME fd_forest_frontier
92 : #define MAP_ELE_T fd_forest_blk_t
93 0 : #define MAP_KEY slot
94 : #include "../../util/tmpl/fd_map_chain.c"
95 :
96 : #define MAP_NAME fd_forest_orphaned
97 : #define MAP_ELE_T fd_forest_blk_t
98 0 : #define MAP_KEY slot
99 : #include "../../util/tmpl/fd_map_chain.c"
100 :
101 : #define MAP_NAME fd_forest_subtrees
102 : #define MAP_ELE_T fd_forest_blk_t
103 0 : #define MAP_KEY slot
104 : #include "../../util/tmpl/fd_map_chain.c"
105 :
106 : #define DLIST_NAME fd_forest_subtlist /* thread a dlist through the subtree elements for fast iteration */
107 : #define DLIST_ELE_T fd_forest_blk_t
108 0 : #define DLIST_NEXT head
109 0 : #define DLIST_PREV tail
110 : #include "../../util/tmpl/fd_dlist.c"
111 :
112 : /* A reference to a forest element
113 :
114 : The following maps/pools are used to track future requests.
115 :
116 : Requests:
117 : - slots that branch from the main tree (ancestry) that are being repaired /
118 : have yet to be repaired. Maintained in a dlist, where the head
119 : is the current slot being repaired.
120 :
121 : Orphreqs (orphaned requests):
122 : - slots that branch from the unconnected trees (subtrees/orphans) that are being repaired /
123 : have yet to be repaired. Maintained in a dlist, where the head
124 : is the current orphan request being repaired.
125 :
126 : Note that orphan requests are specifically an optimization from when
127 : we are catching up from very far behind. In the usual case when we
128 : boot and we are catching up from close behind, need orphans is
129 : very fast and has a non-negligible cost on total repair time. But
130 : during special cases where we are catching up from very far behind,
131 : need orphans can take a significant time because orphan requests
132 : cannot be pipelined. In this case, we can use time waiting for
133 : orphan requests to respond to also repair the full slots of these
134 : orphan trees.
135 :
136 : Consumed:
137 : - slots where the entire ancestry up to the root has been completed.
138 : This is what we are repairing next. There should be <= num forks
139 : elements in the consumed map.
140 : */
141 : struct fd_forest_ref {
142 : ulong idx; /* forest pool idx of the ele this ref refers to */
143 : ulong next; /* reserved by dlist */
144 : ulong prev; /* reserved by dlist */
145 : ulong hash; /* reserved by pool and map_chain */
146 : };
147 : typedef struct fd_forest_ref fd_forest_ref_t;
148 :
149 : #define MAP_NAME fd_forest_requests /* TODO this map could be redundant (i.e. we only need the deque). Also this is awkwardly coupled between forest and policy */
150 : #define MAP_ELE_T fd_forest_ref_t
151 0 : #define MAP_KEY idx
152 0 : #define MAP_NEXT hash
153 : #include "../../util/tmpl/fd_map_chain.c"
154 :
155 : #define DLIST_NAME fd_forest_reqslist
156 : #define DLIST_ELE_T fd_forest_ref_t
157 0 : #define DLIST_NEXT next
158 0 : #define DLIST_PREV prev
159 : #include "../../util/tmpl/fd_dlist.c"
160 :
161 : #define POOL_NAME fd_forest_reqspool
162 0 : #define POOL_T fd_forest_ref_t
163 0 : #define POOL_NEXT hash
164 : #include "../../util/tmpl/fd_pool.c"
165 :
166 : /* Below for fast tracking of contiguous completes slots */
167 : #define MAP_NAME fd_forest_consumed
168 : #define MAP_ELE_T fd_forest_ref_t
169 0 : #define MAP_KEY idx
170 0 : #define MAP_NEXT hash
171 : #include "../../util/tmpl/fd_map_chain.c"
172 :
173 : #define DLIST_NAME fd_forest_conslist
174 : #define DLIST_ELE_T fd_forest_ref_t
175 0 : #define DLIST_NEXT next
176 0 : #define DLIST_PREV prev
177 : #include "../../util/tmpl/fd_dlist.c"
178 :
179 : #define POOL_NAME fd_forest_conspool
180 0 : #define POOL_T fd_forest_ref_t
181 0 : #define POOL_NEXT hash
182 : #include "../../util/tmpl/fd_pool.c"
183 :
184 : /* Reuse reqslist for orphan requests list, and share pool */
185 :
186 : /* Internal use only for BFSing */
187 : #define DEQUE_NAME fd_forest_deque
188 0 : #define DEQUE_T ulong
189 : #include "../../util/tmpl/fd_deque_dynamic.c"
190 :
191 :
192 : /* fd_forest_t is the top-level structure that holds the root of
193 : the tree, as well as the memory pools and map structures.
194 :
195 : These structures are bump-allocated and laid out contiguously in
196 : memory from the fd_forest_t * pointer which points to the
197 : beginning of the memory region.
198 :
199 : --------------------- <- fd_forest_t *
200 : | metadata |
201 : |-------------------|
202 : | pool |
203 : |-------------------|
204 : | ancestry |
205 : |-------------------|
206 : | frontier |
207 : |-------------------|
208 : | subtrees |
209 : |-------------------|
210 : | orphaned |
211 : |-------------------|
212 : | requests |
213 : |-------------------|
214 : | reqslist |
215 : |-------------------|
216 : | reqspool |
217 : |-------------------|
218 : | orphreqs |
219 : |-------------------|
220 : | orphlist (reqlist)|
221 : |-------------------|
222 : | consumed |
223 : |-------------------|
224 : | conspool |
225 : |-------------------|
226 : | deque |
227 : ---------------------
228 :
229 : A valid, initialized forest is always non-empty. After
230 : `fd_forest_init` the forest will always have a root ele unless
231 : modified improperly out of forest's API.*/
232 :
233 : struct fd_forest_iter {
234 : ulong ele_idx;
235 : uint shred_idx;
236 : ulong list_gaddr; /* wksp gaddr of the list this iterator corresponds to */
237 : };
238 : typedef struct fd_forest_iter fd_forest_iter_t;
239 : struct __attribute__((aligned(128UL))) fd_forest {
240 : ulong root; /* pool idx of the root */
241 : ulong wksp_gaddr; /* wksp gaddr of fd_forest in the backing wksp, non-zero gaddr */
242 : ulong ver_gaddr; /* wksp gaddr of version fseq, incremented on write ops */
243 : ulong pool_gaddr; /* wksp gaddr of fd_pool */
244 : ulong ancestry_gaddr; /* wksp_gaddr of fd_forest_ancestry */
245 : ulong frontier_gaddr; /* leaves that needs repair */
246 : ulong subtrees_gaddr; /* head of orphaned trees */
247 : ulong orphaned_gaddr; /* map of parent_slot to singly-linked list of ele orphaned by that parent slot */
248 :
249 : ulong subtlist_gaddr; /* wksp gaddr of fd_forest_subtlist - linkedlist of subtree elements*/
250 :
251 : /* Request trackers */
252 :
253 : ulong requests_gaddr; /* map of slot to pool idx of the completed repair frontier */
254 : ulong reqslist_gaddr; /* wksp gaddr of fd_forest_reqslist */
255 : ulong reqspool_gaddr; /* wksp gaddr of fd_forest_reqspool */
256 :
257 : ulong consumed_gaddr; /* wksp gaddr of fd_forest_consumed */
258 : ulong conslist_gaddr; /* wksp gaddr of fd_forest_conslist */
259 : ulong conspool_gaddr; /* wksp gaddr of fd_forest_conspool */
260 :
261 : ulong orphreqs_gaddr; /* wksp gaddr of fd_forest_orphreqs */
262 : ulong orphlist_gaddr; /* wksp gaddr of fd_forest_orphlist */
263 :
264 : fd_forest_iter_t iter; /* requests iterator corresponding to head of requests deque */
265 : fd_forest_iter_t orphiter; /* orphan requests iterator corresponding to head of orphan requests list */
266 :
267 : ulong deque_gaddr; /* wksp gaddr of fd_forest_deque. internal use only for BFSing */
268 : ulong magic; /* ==FD_FOREST_MAGIC */
269 : };
270 : typedef struct fd_forest fd_forest_t;
271 :
272 : FD_PROTOTYPES_BEGIN
273 :
274 : /* Constructors */
275 :
276 : /* fd_forest_{align,footprint} return the required alignment and
277 : footprint of a memory region suitable for use as forest with up to
278 : ele_max eles and vote_max votes. */
279 :
280 : FD_FN_CONST static inline ulong
281 0 : fd_forest_align( void ) {
282 0 : return alignof(fd_forest_t);
283 0 : }
284 :
285 : FD_FN_CONST static inline ulong
286 0 : fd_forest_footprint( ulong ele_max ) {
287 0 : return FD_LAYOUT_FINI(
288 0 : FD_LAYOUT_APPEND(
289 0 : FD_LAYOUT_APPEND(
290 0 : FD_LAYOUT_APPEND(
291 0 : FD_LAYOUT_APPEND(
292 0 : FD_LAYOUT_APPEND(
293 0 : FD_LAYOUT_APPEND(
294 0 : FD_LAYOUT_APPEND(
295 0 : FD_LAYOUT_APPEND(
296 0 : FD_LAYOUT_APPEND(
297 0 : FD_LAYOUT_APPEND(
298 0 : FD_LAYOUT_APPEND(
299 0 : FD_LAYOUT_APPEND(
300 0 : FD_LAYOUT_APPEND(
301 0 : FD_LAYOUT_APPEND(
302 0 : FD_LAYOUT_APPEND(
303 0 : FD_LAYOUT_APPEND(
304 0 : FD_LAYOUT_APPEND(
305 0 : FD_LAYOUT_INIT,
306 0 : alignof(fd_forest_t), sizeof(fd_forest_t) ),
307 0 : fd_fseq_align(), fd_fseq_footprint() ),
308 0 : fd_forest_pool_align(), fd_forest_pool_footprint ( ele_max ) ),
309 0 : fd_forest_ancestry_align(), fd_forest_ancestry_footprint( ele_max ) ),
310 0 : fd_forest_frontier_align(), fd_forest_frontier_footprint( ele_max ) ),
311 0 : fd_forest_subtrees_align(), fd_forest_subtrees_footprint( ele_max ) ),
312 0 : fd_forest_orphaned_align(), fd_forest_orphaned_footprint( ele_max ) ),
313 0 : fd_forest_subtlist_align(), fd_forest_subtlist_footprint( ) ),
314 :
315 0 : fd_forest_requests_align(), fd_forest_requests_footprint( ele_max ) ),
316 0 : fd_forest_reqslist_align(), fd_forest_reqslist_footprint( ) ),
317 0 : fd_forest_reqspool_align(), fd_forest_reqspool_footprint( ele_max ) ),
318 0 : fd_forest_consumed_align(), fd_forest_consumed_footprint( ele_max ) ),
319 0 : fd_forest_conslist_align(), fd_forest_conslist_footprint( ) ),
320 0 : fd_forest_conspool_align(), fd_forest_conspool_footprint( ele_max ) ),
321 0 : fd_forest_requests_align(), fd_forest_requests_footprint( ele_max ) ),
322 0 : fd_forest_reqslist_align(), fd_forest_reqslist_footprint( ) ),
323 0 : fd_forest_deque_align(), fd_forest_deque_footprint ( ele_max ) ),
324 0 : fd_forest_align() );
325 0 : }
326 :
327 : /* fd_forest_new formats an unused memory region for use as a
328 : forest. mem is a non-NULL pointer to this region in the local
329 : address space with the required footprint and alignment. */
330 :
331 : void *
332 : fd_forest_new( void * shmem, ulong ele_max, ulong seed );
333 :
334 : /* fd_forest_join joins the caller to the forest. forest
335 : points to the first byte of the memory region backing the forest
336 : in the caller's address space. Returns a pointer in the local
337 : address space to forest on success. */
338 :
339 : fd_forest_t *
340 : fd_forest_join( void * forest );
341 :
342 : /* fd_forest_leave leaves a current local join. Returns a pointer
343 : to the underlying shared memory region on success and NULL on failure
344 : (logs details). Reasons for failure include forest is NULL. */
345 :
346 : void *
347 : fd_forest_leave( fd_forest_t const * forest );
348 :
349 : /* fd_forest_delete unformats a memory region used as a
350 : forest. Assumes only the nobody is joined to the region.
351 : Returns a pointer to the underlying shared memory region or NULL if
352 : used obviously in error (e.g. forest is obviously not a
353 : forest ... logs details). The ownership of the memory region is
354 : transferred to the caller. */
355 :
356 : void *
357 : fd_forest_delete( void * forest );
358 :
359 : /* fd_forest_init initializes a forest. Assumes forest
360 : is a valid local join and no one else is joined. root is the initial
361 : root forest will use. This is the snapshot slot if booting from
362 : a snapshot, 0 if the genesis slot.
363 :
364 : In general, this should be called by the same process that formatted
365 : forest's memory, ie. the caller of fd_forest_new. */
366 :
367 : fd_forest_t *
368 : fd_forest_init( fd_forest_t * forest, ulong root );
369 :
370 : /* fd_forest_fini finishes an forest. Assumes forest is
371 : a valid local join and no one else is joined. */
372 :
373 : fd_forest_t *
374 : fd_forest_fini( fd_forest_t * forest );
375 :
376 : /* Accessors */
377 :
378 : /* fd_forest_wksp returns the local join to the wksp backing the
379 : forest. The lifetime of the returned pointer is at least as
380 : long as the lifetime of the local join. Assumes forest is a
381 : current local join. */
382 :
383 : FD_FN_PURE static inline fd_wksp_t *
384 0 : fd_forest_wksp( fd_forest_t const * forest ) {
385 0 : return (fd_wksp_t *)( ( (ulong)forest ) - forest->wksp_gaddr );
386 0 : }
387 :
388 : /* fd_forest_{ver, ver_const} returns the local join to the version
389 : number fseq. The lifetime of the returned pointer is at least as
390 : long as the lifetime of the local join. Assumes forest is a
391 : current local join. If value is ULONG_MAX, ghost is uninitialized or
392 : invalid. Query pre- & post-read:
393 :
394 : odd: if either pre or post is odd, discard read.
395 : even: if pre == post, read is consistent. */
396 :
397 : FD_FN_PURE static inline ulong *
398 0 : fd_forest_ver( fd_forest_t * forest ) {
399 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->ver_gaddr );
400 0 : }
401 :
402 : FD_FN_PURE static inline ulong const *
403 0 : fd_forest_ver_const( fd_forest_t const * forest ) {
404 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->ver_gaddr );
405 0 : }
406 :
407 : /* fd_forest_{pool, pool_const} returns a pointer in the caller's address
408 : space to forest's element pool. */
409 :
410 : FD_FN_PURE static inline fd_forest_blk_t *
411 0 : fd_forest_pool( fd_forest_t * forest ) {
412 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->pool_gaddr );
413 0 : }
414 :
415 : FD_FN_PURE static inline fd_forest_blk_t const *
416 0 : fd_forest_pool_const( fd_forest_t const * forest ) {
417 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->pool_gaddr );
418 0 : }
419 :
420 : /* fd_forest_{ancestry, ancestry_const} returns a pointer in the caller's
421 : address space to forest's ancestry map. */
422 :
423 : FD_FN_PURE static inline fd_forest_ancestry_t *
424 0 : fd_forest_ancestry( fd_forest_t * forest ) {
425 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->ancestry_gaddr );
426 0 : }
427 :
428 : FD_FN_PURE static inline fd_forest_ancestry_t const *
429 0 : fd_forest_ancestry_const( fd_forest_t const * forest ) {
430 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->ancestry_gaddr );
431 0 : }
432 :
433 : /* fd_forest_{frontier, frontier_const} returns a pointer in the caller's
434 : address space to forest's frontier map. */
435 :
436 : FD_FN_PURE static inline fd_forest_frontier_t *
437 0 : fd_forest_frontier( fd_forest_t * forest ) {
438 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->frontier_gaddr );
439 0 : }
440 :
441 : FD_FN_PURE static inline fd_forest_frontier_t const *
442 0 : fd_forest_frontier_const( fd_forest_t const * forest ) {
443 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->frontier_gaddr );
444 0 : }
445 :
446 : /* fd_forest_{subtrees, subtrees_const} returns a pointer in the caller's
447 : address space to forest's subtrees map. */
448 :
449 : FD_FN_PURE static inline fd_forest_subtrees_t *
450 0 : fd_forest_subtrees( fd_forest_t * forest ) {
451 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->subtrees_gaddr );
452 0 : }
453 :
454 : FD_FN_PURE static inline fd_forest_subtrees_t const *
455 0 : fd_forest_subtrees_const( fd_forest_t const * forest ) {
456 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->subtrees_gaddr );
457 0 : }
458 :
459 : /* fd_forest_{subtlist, subtlist_const} returns a pointer in the caller's
460 : address space to forest's subtlist. */
461 :
462 : FD_FN_PURE static inline fd_forest_subtlist_t *
463 0 : fd_forest_subtlist( fd_forest_t * forest ) {
464 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->subtlist_gaddr );
465 0 : }
466 :
467 : FD_FN_PURE static inline fd_forest_subtlist_t const *
468 0 : fd_forest_subtlist_const( fd_forest_t const * forest ) {
469 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->subtlist_gaddr );
470 0 : }
471 :
472 : /* fd_forest_{orphaned, orphaned_const} returns a pointer in the caller's
473 : address space to forest's orphaned map. */
474 :
475 : FD_FN_PURE static inline fd_forest_orphaned_t *
476 0 : fd_forest_orphaned( fd_forest_t * forest ) {
477 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->orphaned_gaddr );
478 0 : }
479 :
480 : FD_FN_PURE static inline fd_forest_orphaned_t const *
481 0 : fd_forest_orphaned_const( fd_forest_t const * forest ) {
482 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->orphaned_gaddr );
483 0 : }
484 :
485 : /* fd_forest_{consumed, consumed_const} returns a pointer in the caller's
486 : address space to forest's consumed map. */
487 :
488 : FD_FN_PURE static inline fd_forest_consumed_t *
489 0 : fd_forest_consumed( fd_forest_t * forest ) {
490 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->consumed_gaddr );
491 0 : }
492 :
493 : FD_FN_PURE static inline fd_forest_consumed_t const *
494 0 : fd_forest_consumed_const( fd_forest_t const * forest ) {
495 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->consumed_gaddr );
496 0 : }
497 :
498 : /* fd_forest_{conslist, conslist_const} returns a pointer in the caller's
499 : address space to forest's consumed list. */
500 :
501 : FD_FN_PURE static inline fd_forest_conslist_t *
502 0 : fd_forest_conslist( fd_forest_t * forest ) {
503 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->conslist_gaddr );
504 0 : }
505 :
506 : FD_FN_PURE static inline fd_forest_conslist_t const *
507 0 : fd_forest_conslist_const( fd_forest_t const * forest ) {
508 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->conslist_gaddr );
509 0 : }
510 :
511 : /* fd_forest_{conspool, conspool_const} returns a pointer in the caller's
512 : address space to forest's consumed pool. */
513 :
514 : FD_FN_PURE static inline fd_forest_ref_t *
515 0 : fd_forest_conspool( fd_forest_t * forest ) {
516 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->conspool_gaddr );
517 0 : }
518 :
519 : FD_FN_PURE static inline fd_forest_ref_t const *
520 0 : fd_forest_conspool_const( fd_forest_t const * forest ) {
521 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->conspool_gaddr );
522 0 : }
523 :
524 : /* fd_forest_{requests, requests_const} returns a pointer in the caller's
525 : address space to forest's requests map. */
526 :
527 : FD_FN_PURE static inline fd_forest_requests_t *
528 0 : fd_forest_requests( fd_forest_t * forest ) {
529 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->requests_gaddr );
530 0 : }
531 :
532 : FD_FN_PURE static inline fd_forest_requests_t const *
533 0 : fd_forest_requests_const( fd_forest_t const * forest ) {
534 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->requests_gaddr );
535 0 : }
536 :
537 : /* fd_forest_{reqslist, reqslist_const} returns a pointer in the caller's
538 : address space to forest's reqslist. */
539 :
540 : FD_FN_PURE static inline fd_forest_reqslist_t *
541 0 : fd_forest_reqslist( fd_forest_t * forest ) {
542 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->reqslist_gaddr );
543 0 : }
544 :
545 : FD_FN_PURE static inline fd_forest_reqslist_t const *
546 0 : fd_forest_reqslist_const( fd_forest_t const * forest ) {
547 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->reqslist_gaddr );
548 0 : }
549 :
550 : /* fd_forest_{orphreqs, orphanreqs_const} returns a pointer in the caller's
551 : address space to forest's orphanreqs. */
552 :
553 : FD_FN_PURE static inline fd_forest_requests_t *
554 0 : fd_forest_orphreqs( fd_forest_t * forest ) {
555 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->orphreqs_gaddr );
556 0 : }
557 :
558 : FD_FN_PURE static inline fd_forest_requests_t const *
559 0 : fd_forest_orphreqs_const( fd_forest_t const * forest ) {
560 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->orphreqs_gaddr );
561 0 : }
562 :
563 : /* fd_forest_{orphlist, orphanlist_const} returns a pointer in the caller's
564 : address space to forest's orphanlist. */
565 :
566 : FD_FN_PURE static inline fd_forest_reqslist_t *
567 0 : fd_forest_orphlist( fd_forest_t * forest ) {
568 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->orphlist_gaddr );
569 0 : }
570 :
571 : FD_FN_PURE static inline fd_forest_reqslist_t const *
572 0 : fd_forest_orphlist_const( fd_forest_t const * forest ) {
573 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->orphlist_gaddr );
574 0 : }
575 :
576 : /* fd_forest_{reqspool, reqspool_const} returns a pointer in the caller's
577 : address space to forest's reqspool pool. */
578 :
579 : FD_FN_PURE static inline fd_forest_ref_t *
580 0 : fd_forest_reqspool( fd_forest_t * forest ) {
581 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->reqspool_gaddr );
582 0 : }
583 :
584 : FD_FN_PURE static inline fd_forest_ref_t const *
585 0 : fd_forest_reqspool_const( fd_forest_t const * forest ) {
586 0 : return fd_wksp_laddr_fast( fd_forest_wksp( forest ), forest->reqspool_gaddr );
587 0 : }
588 :
589 : /* fd_forest_root_slot returns forest's root slot. Assumes
590 : forest is a current local join. */
591 :
592 : FD_FN_PURE static inline ulong
593 0 : fd_forest_root_slot( fd_forest_t const * forest ) {
594 0 : if( FD_UNLIKELY( forest->root == fd_forest_pool_idx_null( fd_forest_pool_const( forest ) ) )) return ULONG_MAX; /* uninitialized */
595 0 : return fd_forest_pool_ele_const( fd_forest_pool_const( forest ), forest->root )->slot;
596 0 : }
597 :
598 : fd_forest_blk_t *
599 : fd_forest_query( fd_forest_t * forest, ulong slot );
600 :
601 : /* Operations */
602 :
603 : /* fd_forest_blk_insert inserts a new block into the forest. Assumes
604 : slot >= forest->smr, and the blk pool has a free element (if
605 : handholding is enabled, explicitly checks and errors). This blk
606 : insert is idempotent, and can be called multiple times with the same
607 : slot. Returns the inserted forest ele. */
608 :
609 : fd_forest_blk_t *
610 : fd_forest_blk_insert( fd_forest_t * forest, ulong slot, ulong parent_slot );
611 :
612 : /* fd_forest_blk_parent_update updates the parent of a block in the forest.
613 : Needed for profiler mode. */
614 :
615 : fd_forest_blk_t *
616 : fd_forest_blk_parent_update( fd_forest_t * forest, ulong slot, ulong parent_slot );
617 :
618 0 : #define SHRED_SRC_TURBINE 0
619 0 : #define SHRED_SRC_REPAIR 1
620 0 : #define SHRED_SRC_RECOVERED 2
621 :
622 : /* fd_forest_shred_insert inserts a new shred into the forest.
623 : Assumes slot is already in forest, and should typically be called
624 : directly after fd_forest_block_insert. Returns the forest ele
625 : corresponding to the shred slot. */
626 :
627 : fd_forest_blk_t *
628 : fd_forest_data_shred_insert( fd_forest_t * forest, ulong slot, ulong parent_slot, uint shred_idx, uint fec_set_idx, int slot_complete, int ref_tick, int src );
629 :
630 : fd_forest_blk_t *
631 : fd_forest_code_shred_insert( fd_forest_t * forest, ulong slot, uint shred_idx );
632 :
633 : /* fd_forest_fec_insert inserts a new fully completed FEC set into the
634 : forest. Assumes slot is already in forest, and should typically be
635 : called directly after fd_forest_block_insert. Returns the forest ele
636 : corresponding to the shred slot. */
637 :
638 : fd_forest_blk_t *
639 : fd_forest_fec_insert( fd_forest_t * forest, ulong slot, ulong parent_slot, uint last_shred_idx, uint fec_set_idx, int slot_complete, int ref_tick );
640 :
641 : /* fd_forest_fec_clear clears the FEC set at the given slot and
642 : fec_set_idx.
643 : Can fec_clear break requests frontier invariants? No. Why?
644 :
645 : TODO: Update this comment with new requests map changes
646 :
647 : 2) If slot n is in scope of the forest root, then the shred
648 : delivered to repair will trigger a data_shred_insert call
649 : that does nothing, as repair already has record of that
650 : shred. Eventually the fec_completes or fec_clear msg will be
651 : delivered to repair. fec_insert will do nothing. fec_clear
652 : will remove the idxs for the shreds from the bitset, and
653 : update the buffered_idx. This doesn't matter though! because
654 : we already have moved past slot n on the requests frontier.
655 : No need to request those shreds again.
656 :
657 : Except 2) breaks a bit with in specific leader slot cases. See
658 : fd_forest_fec_clear for more details. */
659 :
660 : void
661 : fd_forest_fec_clear( fd_forest_t * forest, ulong slot, uint fec_set_idx, uint max_shred_idx );
662 :
663 : /* fd_forest_publish publishes slot as the new forest root, setting
664 : the subtree beginning from slot as the new forest tree (ie. slot
665 : and all its descendants). Prunes all eles not in slot's forest.
666 : Assumes slot is present in forest. Returns the new root. */
667 :
668 : fd_forest_blk_t const *
669 : fd_forest_publish( fd_forest_t * forest, ulong slot );
670 :
671 : /* fd_forest_highest_repaired_slot returns the highest child of a fully,
672 : contiguously repaired slot. */
673 : ulong
674 : fd_forest_highest_repaired_slot( fd_forest_t const * forest );
675 :
676 : /* fd_forest_iter_* takes either the standard iterator or the orphan
677 : iterator and returns the next shred to request. The iterator must
678 : one of the two iterators that is owned by the forest.
679 :
680 : The iterator will be in an iter_done state if there are no current
681 : shreds to request.
682 :
683 : The forward forest iterator will visit each shred at most once over
684 : the lifetime of the forest, without revisiting past shreds, so it is
685 : up to the caller to track which shreds will need re-requesting. The
686 : exception to the rule is slots where the slot_complete shred is still
687 : not known - the highest window idx will be requested for that slot,
688 : and the slot will be added to the tail of the requests deque so that
689 : later we may revisit it. As a result, the children of that slot may
690 : also be revisited multiple times.
691 :
692 : Note this case is pretty rare.
693 :
694 : An iterator signifies to the repair tile to request the
695 : highest_window_index when the ele_idx is not null and shred_idx is
696 : UINT_MAX.
697 :
698 : Otherwise, the iterator signifies to the repair tile to request a
699 : regular shred window_idx.
700 :
701 : Invariants for requests map and requests deque:
702 :
703 : There can only be one occurence of the slot in the requests deque at
704 : any time. Any slot in the requests deque must exist in the requests
705 : map, and vice versa. Any slot in the requests map must also exist in
706 : the forest. During publish the requests map must also be pruned.
707 :
708 : If we are mid-request of a slot that gets pruned, forest will take
709 : responsibility to update the iterator to a valid slot.
710 :
711 : TODO: should this really be an iterator?? or just a _next function? */
712 :
713 : fd_forest_iter_t *
714 : fd_forest_iter_next( fd_forest_iter_t * iter, fd_forest_t * forest );
715 :
716 : int
717 : fd_forest_iter_done( fd_forest_iter_t * iter, fd_forest_t * forest );
718 :
719 : /* Misc */
720 :
721 : /* fd_forest_verify checks the forest is not obviously corrupt.
722 : Returns 0 if verify succeeds, -1 otherwise. */
723 :
724 : int
725 : fd_forest_verify( fd_forest_t const * forest );
726 :
727 : /* fd_forest_print pretty-prints a formatted forest tree. Printing begins
728 : from `ele` (it will appear as the root in the print output).
729 :
730 : The most straightforward and commonly used printing pattern is:
731 : `fd_forest_print( forest, fd_forest_root( forest ) )`
732 :
733 : This would print forest beginning from the root.
734 :
735 : Alternatively, caller can print a more localized view, for example
736 : starting from the grandparent of the most recently executed slot:
737 :
738 : ```
739 : fd_forest_blk_t const * ele = fd_forest_query( slot );
740 : fd_forest_print( forest, fd_forest_parent( fd_forest_parent( ele ) ) )
741 : ```
742 :
743 : Callers should add null-checks as appropriate in actual usage. */
744 :
745 : void
746 : fd_forest_print( fd_forest_t const * forest );
747 :
748 : FD_PROTOTYPES_END
749 :
750 : #endif /* HEADER_fd_src_discof_forest_fd_forest_h */
|