Line data Source code
1 : #include "../../ballet/shred/fd_shred.h"
2 : #include "../../ballet/shred/fd_fec_set.h"
3 : #include "../../ballet/sha512/fd_sha512.h"
4 : #include "../../ballet/ed25519/fd_ed25519.h"
5 : #include "../../ballet/reedsol/fd_reedsol.h"
6 : #include "../metrics/fd_metrics.h"
7 : #include "fd_fec_resolver.h"
8 :
9 264 : #define SHRED_CNT_NOT_SET (UINT_MAX/2U)
10 :
11 : typedef union {
12 : fd_ed25519_sig_t u;
13 : ulong l;
14 : } wrapped_sig_t;
15 :
16 : struct set_ctx;
17 : typedef struct set_ctx set_ctx_t;
18 :
19 : struct __attribute__((aligned(32UL))) set_ctx {
20 : wrapped_sig_t sig;
21 : fd_fec_set_t * set;
22 : fd_bmtree_commit_t * tree;
23 : set_ctx_t * prev;
24 : set_ctx_t * next;
25 : ulong total_rx_shred_cnt;
26 : ulong fec_set_idx;
27 : /* The shred index of the first parity shred in this FEC set */
28 : ulong parity_idx0;
29 : uchar data_variant;
30 : uchar parity_variant;
31 : /* If this FEC set has resigned shreds, this is our signature of the
32 : root of the Merkle tree */
33 : wrapped_sig_t retransmitter_sig;
34 : };
35 : typedef struct set_ctx set_ctx_t;
36 :
37 : #define DEQUE_NAME freelist
38 222 : #define DEQUE_T fd_fec_set_t *
39 : #include "../../util/tmpl/fd_deque_dynamic.c"
40 :
41 : #define DEQUE_NAME bmtrlist
42 132 : #define DEQUE_T void *
43 : #include "../../util/tmpl/fd_deque_dynamic.c"
44 :
45 : static const wrapped_sig_t null_signature = {{0}};
46 :
47 7977 : #define MAP_KEY sig
48 7188 : #define MAP_KEY_T wrapped_sig_t
49 789 : #define MAP_KEY_NULL null_signature
50 17427 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp( (k0).u, (k1).u, FD_ED25519_SIG_SZ ))
51 10710 : #define MAP_KEY_INVAL(k) MAP_KEY_EQUAL( k, MAP_KEY_NULL )
52 : #define MAP_KEY_EQUAL_IS_SLOW 1
53 6957 : #define MAP_KEY_HASH(key) ((MAP_HASH_T)fd_ulong_hash( key.l ))
54 : #define MAP_MEMOIZE 0
55 : #define MAP_NAME ctx_map
56 7056 : #define MAP_T set_ctx_t
57 : /* The prev and next fields of set_ctx_t thread a linked list through
58 : the map. The map can move elements around during a deletion though,
59 : so we need to update the links when it does. Thankfully it gives a
60 : perfect hook for doing so. */
61 6 : #define MAP_MOVE(d,s) do { \
62 6 : set_ctx_t * _d = &(d); \
63 6 : set_ctx_t * _s = &(s); \
64 6 : _s->prev->next = _d; \
65 6 : _s->next->prev = _d; \
66 6 : *_d = *_s; \
67 6 : } while( 0 )
68 : #include "../../util/tmpl/fd_map_dynamic.c"
69 :
70 :
71 : struct __attribute__((aligned(FD_FEC_RESOLVER_ALIGN))) fd_fec_resolver {
72 : /* depth stores the number of FEC sets this resolver can track
73 : simultaneously. done_depth stores the depth of the done tcache,
74 : i.e. the number of done FEC set keys that this resolver remembers.
75 : partial_depth stores the minimum size of the free FEC set list.
76 : completed_depth stores the size of the completed FEC set list. */
77 : ulong depth;
78 : ulong partial_depth;
79 : ulong complete_depth;
80 : ulong done_depth;
81 :
82 : /* expected_shred_version: discard all shreds with a shred version
83 : other than the specified value */
84 : ushort expected_shred_version;
85 :
86 : /* curr_map: A map (using fd_map_dynamic) from tags of signatures to
87 : the context object with its relevant data. This map contains at
88 : most `depth` elements at any time, but to improve query performance,
89 : we size it at 2*depth. */
90 : set_ctx_t * curr_map;
91 :
92 : /* curr_ll_sentinel: The elements of curr_map also make
93 : essentially a circular doubly linked list using the next and prev
94 : fields. To simplify the logic, we use a sentinel node that's
95 : stored here instead of in the map. Thus, the head (newest) and the
96 : tail (oldest) of the linked list are the next and prev pointers of
97 : this context (respectively). The other fields aren't used. */
98 : set_ctx_t curr_ll_sentinel[1];
99 :
100 : /* done: stores signatures of FEC sets that have recently been
101 : completed. This is like a tcache, but with a non-ulong key and
102 : using a linked list instead of a ring buffer. Any new packets
103 : matching tags in this set can be ignored. Since the data structure
104 : we need (map with linked list) is very similar to for curr_map, we
105 : just use the same fd_map_dynamic instantiation. Only fields sig,
106 : prev, and next are used. */
107 : set_ctx_t * done_map;
108 :
109 : /* done_ll_sentinel: Analogous to curr_ll_sentinel, but for the done
110 : map instead of the current map. */
111 : set_ctx_t done_ll_sentinel[1];
112 :
113 : /* free_list and complete_list are deques (using fd_deque_dynamic)
114 : that FEC sets that are not in contexts in curr_map. Similarly,
115 : bmtree_free_list stores footprints for the bmtree objects that are
116 : not in contexts in curr_map. These lists point to objects of
117 : indeterminate state and need to be cleared/reset when popped off.
118 : Invariant: at every entry and exit to fd_fec_resolver_add_shred:
119 : - free_list has between partial_depth and partial_depth+depth
120 : elements.
121 : - complete_list has complete_depth elements
122 : - bmtree_free_list has between 0 and depth elements
123 : (all these counts are inclusive). */
124 : fd_fec_set_t * * free_list;
125 : fd_fec_set_t * * complete_list;
126 : void * * bmtree_free_list;
127 :
128 : /* signer is used to sign shreds that require a retransmitter
129 : signature. sign_ctx is provided as the first argument to the
130 : function. */
131 : fd_fec_resolver_sign_fn * signer;
132 : void * sign_ctx;
133 :
134 : /* max_shred_idx is the exclusive upper bound for shred indices. We
135 : need to reject any shred with an index >= max_shred_idx, but we
136 : also want to reject anything that is part of an FEC set where the
137 : highest index of a shred in the FEC set will be >= max_shred_idx.
138 : */
139 : ulong max_shred_idx;
140 :
141 : /* sha512 and reedsol are used for calculations while adding a shred.
142 : Their state outside a call to add_shred is indeterminate. */
143 : fd_sha512_t sha512[1];
144 : fd_reedsol_t reedsol[1];
145 :
146 : /* The footprint for the objects follows the struct and is in the same
147 : order as the pointers, namely:
148 : curr_map map
149 : done_map map
150 : free_list deque
151 : complete_list deque
152 : bmtree_free_list deque
153 : Actual footprint for bmtrees */
154 : };
155 :
156 : typedef struct fd_fec_resolver fd_fec_resolver_t;
157 :
158 : FD_FN_PURE ulong
159 : fd_fec_resolver_footprint( ulong depth,
160 : ulong partial_depth,
161 : ulong complete_depth,
162 6 : ulong done_depth ) {
163 6 : if( FD_UNLIKELY( (depth==0UL) | (partial_depth==0UL) | (complete_depth==0UL) | (done_depth==0UL) ) ) return 0UL;
164 6 : if( FD_UNLIKELY( (depth>=(1UL<<62)-1UL) | (done_depth>=(1UL<<62)-1UL ) ) ) return 0UL; /* prevent overflow */
165 :
166 6 : int lg_curr_map_cnt = fd_ulong_find_msb( depth + 1UL ) + 2; /* See fd_tcache.h for the logic */
167 6 : int lg_done_map_cnt = fd_ulong_find_msb( done_depth + 1UL ) + 2; /* ... behind the + 2. */
168 :
169 6 : ulong footprint_per_bmtree = fd_bmtree_commit_footprint( FD_SHRED_MERKLE_LAYER_CNT );
170 :
171 6 : ulong layout = FD_LAYOUT_INIT;
172 6 : layout = FD_LAYOUT_APPEND( layout, FD_FEC_RESOLVER_ALIGN, sizeof(fd_fec_resolver_t) );
173 6 : layout = FD_LAYOUT_APPEND( layout, ctx_map_align(), ctx_map_footprint( lg_curr_map_cnt ) );
174 6 : layout = FD_LAYOUT_APPEND( layout, ctx_map_align(), ctx_map_footprint( lg_done_map_cnt ) );
175 6 : layout = FD_LAYOUT_APPEND( layout, freelist_align(), freelist_footprint( depth+partial_depth+1UL ) );
176 6 : layout = FD_LAYOUT_APPEND( layout, freelist_align(), freelist_footprint( complete_depth+1UL ) );
177 6 : layout = FD_LAYOUT_APPEND( layout, bmtrlist_align(), bmtrlist_footprint( depth+1UL ) );
178 6 : layout = FD_LAYOUT_APPEND( layout, FD_BMTREE_COMMIT_ALIGN, depth*footprint_per_bmtree );
179 :
180 6 : return FD_LAYOUT_FINI( layout, FD_FEC_RESOLVER_ALIGN );
181 6 : }
182 :
183 3 : FD_FN_CONST ulong fd_fec_resolver_align( void ) { return FD_FEC_RESOLVER_ALIGN; }
184 :
185 :
186 : void *
187 : fd_fec_resolver_new( void * shmem,
188 : fd_fec_resolver_sign_fn * signer,
189 : void * sign_ctx,
190 : ulong depth,
191 : ulong partial_depth,
192 : ulong complete_depth,
193 : ulong done_depth,
194 : fd_fec_set_t * sets,
195 : ushort expected_shred_version,
196 27 : ulong max_shred_idx ) {
197 27 : if( FD_UNLIKELY( (depth==0UL) | (partial_depth==0UL) | (complete_depth==0UL) | (done_depth==0UL) ) ) return NULL;
198 27 : if( FD_UNLIKELY( (depth>=(1UL<<62)-1UL) | (done_depth>=(1UL<<62)-1UL ) ) ) return NULL;
199 :
200 27 : int lg_curr_map_cnt = fd_ulong_find_msb( depth + 1UL ) + 2;
201 27 : int lg_done_map_cnt = fd_ulong_find_msb( done_depth + 1UL ) + 2;
202 :
203 27 : ulong footprint_per_bmtree = fd_bmtree_commit_footprint( FD_SHRED_MERKLE_LAYER_CNT );
204 :
205 27 : FD_SCRATCH_ALLOC_INIT( l, shmem );
206 27 : void * self = FD_SCRATCH_ALLOC_APPEND( l, FD_FEC_RESOLVER_ALIGN, sizeof(fd_fec_resolver_t) );
207 27 : void * curr = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_curr_map_cnt ) );
208 27 : void * done = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_done_map_cnt ) );
209 27 : void * free = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( depth+partial_depth+1UL ) );
210 27 : void * cmplst = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( complete_depth+1UL ) );
211 27 : void * bmfree = FD_SCRATCH_ALLOC_APPEND( l, bmtrlist_align(), bmtrlist_footprint( depth+1UL ) );
212 27 : void * bmfootprint = FD_SCRATCH_ALLOC_APPEND( l, FD_BMTREE_COMMIT_ALIGN, depth*footprint_per_bmtree );
213 27 : FD_SCRATCH_ALLOC_FINI( l, FD_FEC_RESOLVER_ALIGN );
214 :
215 27 : fd_fec_resolver_t * resolver = (fd_fec_resolver_t *)self;
216 :
217 27 : if( FD_UNLIKELY( !ctx_map_new ( curr, lg_curr_map_cnt )) ) { FD_LOG_WARNING(( "curr map_new failed" )); return NULL; }
218 27 : if( FD_UNLIKELY( !ctx_map_new ( done, lg_done_map_cnt )) ) { FD_LOG_WARNING(( "done map_new failed" )); return NULL; }
219 27 : if( FD_UNLIKELY( !freelist_new ( free, depth+partial_depth+1UL )) ) { FD_LOG_WARNING(( "freelist_new failed" )); return NULL; }
220 27 : if( FD_UNLIKELY( !freelist_new ( cmplst, complete_depth+1UL )) ) { FD_LOG_WARNING(( "freelist_new failed" )); return NULL; }
221 27 : if( FD_UNLIKELY( !bmtrlist_new ( bmfree, depth )) ) { FD_LOG_WARNING(( "bmtrlist_new failed" )); return NULL; }
222 27 : if( FD_UNLIKELY( !fd_sha512_new( (void *)resolver->sha512 )) ) { FD_LOG_WARNING(( "sha512_new failed" )); return NULL; }
223 :
224 : /* Initialize all the lists */
225 27 : fd_fec_set_t * * free_list = freelist_join( free );
226 27 : fd_fec_set_t * * complete_list = freelist_join( cmplst );
227 108 : for( ulong i=0UL; i<depth+partial_depth; i++ ) { freelist_push_tail( free_list, sets+i ); }
228 54 : for( ulong i=depth+partial_depth; i<depth+partial_depth+complete_depth; i++ ) { freelist_push_tail( complete_list, sets+i ); }
229 27 : freelist_leave( complete_list );
230 27 : freelist_leave( free_list );
231 :
232 27 : void * * bmtree_list = bmtrlist_join( bmfree );
233 81 : for( ulong i=0UL; i<depth; i++ ) { bmtrlist_push_tail( bmtree_list, (uchar *)bmfootprint + i*footprint_per_bmtree ); }
234 27 : bmtrlist_leave( bmtree_list );
235 :
236 27 : if( FD_UNLIKELY( expected_shred_version==(ushort)0 ) ) { FD_LOG_WARNING(( "expected shred version cannot be 0" )); return NULL; }
237 :
238 27 : resolver->curr_ll_sentinel->prev = resolver->curr_ll_sentinel;
239 27 : resolver->curr_ll_sentinel->next = resolver->curr_ll_sentinel;
240 27 : resolver->done_ll_sentinel->prev = resolver->done_ll_sentinel;
241 27 : resolver->done_ll_sentinel->next = resolver->done_ll_sentinel;
242 :
243 27 : resolver->depth = depth;
244 27 : resolver->partial_depth = partial_depth;
245 27 : resolver->complete_depth = complete_depth;
246 27 : resolver->done_depth = done_depth;
247 27 : resolver->expected_shred_version = expected_shred_version;
248 27 : resolver->signer = signer;
249 27 : resolver->sign_ctx = sign_ctx;
250 27 : resolver->max_shred_idx = max_shred_idx;
251 27 : return shmem;
252 27 : }
253 :
254 : fd_fec_resolver_t *
255 27 : fd_fec_resolver_join( void * shmem ) {
256 27 : fd_fec_resolver_t * resolver = (fd_fec_resolver_t *)shmem;
257 27 : ulong depth = resolver->depth;
258 27 : ulong partial_depth = resolver->partial_depth;
259 27 : ulong complete_depth = resolver->complete_depth;
260 27 : ulong done_depth = resolver->done_depth;
261 :
262 27 : int lg_curr_map_cnt = fd_ulong_find_msb( depth + 1UL ) + 2;
263 27 : int lg_done_map_cnt = fd_ulong_find_msb( done_depth + 1UL ) + 2;
264 :
265 27 : FD_SCRATCH_ALLOC_INIT( l, shmem );
266 27 : /* self */ FD_SCRATCH_ALLOC_APPEND( l, FD_FEC_RESOLVER_ALIGN, sizeof(fd_fec_resolver_t) );
267 27 : void * curr = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_curr_map_cnt ) );
268 27 : void * done = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_done_map_cnt ) );
269 27 : void * free = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( depth+partial_depth+1UL ) );
270 27 : void * cmplst = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( complete_depth+1UL ) );
271 27 : void * bmfree = FD_SCRATCH_ALLOC_APPEND( l, bmtrlist_align(), bmtrlist_footprint( depth+1UL ) );
272 27 : FD_SCRATCH_ALLOC_FINI( l, FD_FEC_RESOLVER_ALIGN );
273 :
274 27 : resolver->curr_map = ctx_map_join ( curr ); if( FD_UNLIKELY( !resolver->curr_map ) ) return NULL;
275 27 : resolver->done_map = ctx_map_join ( done ); if( FD_UNLIKELY( !resolver->done_map ) ) return NULL;
276 27 : resolver->free_list = freelist_join ( free ); if( FD_UNLIKELY( !resolver->free_list ) ) return NULL;
277 27 : resolver->complete_list = freelist_join ( cmplst ); if( FD_UNLIKELY( !resolver->complete_list ) ) return NULL;
278 27 : resolver->bmtree_free_list = bmtrlist_join ( bmfree ); if( FD_UNLIKELY( !resolver->bmtree_free_list ) ) return NULL;
279 27 : if( FD_UNLIKELY( !fd_sha512_join( resolver->sha512 ) ) ) return NULL;
280 :
281 27 : return resolver;
282 27 : }
283 :
284 : /* Two helper functions for working with the linked lists that are
285 : threaded through maps. Use them as follows:
286 : ctx_ll_insert( <sentinel corresponding to map>, ctx_map_insert( <map>, key ) );
287 : ctx_map_remove( <map>, ctx_ll_remove( <node to remove> ) );
288 :
289 : */
290 : /* Removes r from the linked list */
291 : static set_ctx_t *
292 207 : ctx_ll_remove( set_ctx_t * r ) {
293 207 : r->next->prev = r->prev;
294 207 : r->prev->next = r->next;
295 207 : r->next = NULL;
296 207 : r->prev = NULL;
297 207 : return r;
298 207 : }
299 :
300 : /* Inserts c immediately after p. Returns c. */
301 : static set_ctx_t *
302 255 : ctx_ll_insert( set_ctx_t * p, set_ctx_t * c ) {
303 255 : c->next = p->next;
304 255 : c->prev = p;
305 255 : p->next->prev = c;
306 255 : p->next = c;
307 255 : return c;
308 255 : }
309 :
310 :
311 : int fd_fec_resolver_add_shred( fd_fec_resolver_t * resolver,
312 : fd_shred_t const * shred,
313 : ulong shred_sz,
314 : uchar const * leader_pubkey,
315 : fd_fec_set_t const * * out_fec_set,
316 : fd_shred_t const * * out_shred,
317 3522 : fd_bmtree_node_t * out_merkle_root ) {
318 : /* Unpack variables */
319 3522 : ulong partial_depth = resolver->partial_depth;
320 3522 : ulong done_depth = resolver->done_depth;
321 :
322 3522 : fd_fec_set_t * * free_list = resolver->free_list;
323 3522 : fd_fec_set_t * * complete_list = resolver->complete_list;
324 3522 : void * * bmtree_free_list = resolver->bmtree_free_list;
325 3522 : set_ctx_t * curr_map = resolver->curr_map;
326 3522 : set_ctx_t * done_map = resolver->done_map;
327 :
328 3522 : fd_reedsol_t * reedsol = resolver->reedsol;
329 3522 : fd_sha512_t * sha512 = resolver->sha512;
330 :
331 3522 : set_ctx_t * curr_ll_sentinel = resolver->curr_ll_sentinel;
332 3522 : set_ctx_t * done_ll_sentinel = resolver->done_ll_sentinel;
333 :
334 : /* Invariants:
335 : * no key is in both the done map and the current map
336 : * each set pointer provided to the new function is in exactly one
337 : of curr_map, freelist, or complete_list
338 : * bmtree_free_list has exactly partial_depth fewer elements than
339 : freelist
340 : */
341 3522 : wrapped_sig_t * w_sig = (wrapped_sig_t *)shred->signature;
342 :
343 : /* Immediately reject any shred with a 0 signature. */
344 3522 : if( FD_UNLIKELY( ctx_map_key_inval( *w_sig ) ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
345 :
346 : /* Are we already done with this FEC set? */
347 3522 : int found = !!ctx_map_query( done_map, *w_sig, NULL );
348 :
349 3522 : if( found ) return FD_FEC_RESOLVER_SHRED_IGNORED; /* With no packet loss, we expect found==1 about 50% of the time */
350 :
351 3171 : set_ctx_t * ctx = ctx_map_query( curr_map, *w_sig, NULL );
352 :
353 3171 : fd_bmtree_node_t leaf[1];
354 3171 : uchar variant = shred->variant;
355 3171 : uchar shred_type = fd_shred_type( variant );
356 :
357 3171 : if( FD_UNLIKELY( (shred_type==FD_SHRED_TYPE_LEGACY_DATA) | (shred_type==FD_SHRED_TYPE_LEGACY_CODE) ) ) {
358 : /* Reject any legacy shreds */
359 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
360 0 : }
361 :
362 3171 : if( FD_UNLIKELY( shred->version!=resolver->expected_shred_version ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
363 3168 : if( FD_UNLIKELY( shred_sz<fd_shred_sz( shred ) ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
364 3168 : if( FD_UNLIKELY( shred->idx>=resolver->max_shred_idx ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
365 :
366 3156 : int is_data_shred = fd_shred_is_data( shred_type );
367 :
368 3156 : if( !is_data_shred ) { /* Roughly 50/50 branch */
369 1707 : if( FD_UNLIKELY( (shred->code.data_cnt>FD_REEDSOL_DATA_SHREDS_MAX) | (shred->code.code_cnt>FD_REEDSOL_PARITY_SHREDS_MAX) ) )
370 3 : return FD_FEC_RESOLVER_SHRED_REJECTED;
371 1704 : if( FD_UNLIKELY( (shred->code.data_cnt==0UL) | (shred->code.code_cnt==0UL) ) )
372 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
373 1704 : if( FD_UNLIKELY( (ulong)shred->fec_set_idx+(ulong)shred->code.data_cnt>=resolver->max_shred_idx ) )
374 3 : return FD_FEC_RESOLVER_SHRED_REJECTED;
375 1701 : if( FD_UNLIKELY( (ulong)shred->idx + (ulong)shred->code.code_cnt - (ulong)shred->code.idx>=resolver->max_shred_idx ) )
376 3 : return FD_FEC_RESOLVER_SHRED_REJECTED;
377 1701 : }
378 :
379 :
380 : /* For the purposes of the shred header, tree_depth means the number
381 : of nodes, counting the leaf but excluding the root. For bmtree,
382 : depth means the number of layers, which counts both. */
383 3147 : ulong tree_depth = fd_shred_merkle_cnt( variant ); /* In [0, 15] */
384 3147 : ulong reedsol_protected_sz = 1115UL + FD_SHRED_DATA_HEADER_SZ - FD_SHRED_SIGNATURE_SZ - FD_SHRED_MERKLE_NODE_SZ*tree_depth
385 3147 : - FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type )
386 3147 : - FD_SHRED_SIGNATURE_SZ *fd_shred_is_resigned( shred_type); /* In [743, 1139] conservatively*/
387 3147 : ulong data_merkle_protected_sz = reedsol_protected_sz + FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type );
388 3147 : ulong parity_merkle_protected_sz = reedsol_protected_sz + FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type )+FD_SHRED_CODE_HEADER_SZ-FD_ED25519_SIG_SZ;
389 3147 : ulong merkle_protected_sz = fd_ulong_if( is_data_shred, data_merkle_protected_sz, parity_merkle_protected_sz );
390 :
391 3147 : fd_bmtree_hash_leaf( leaf, (uchar const *)shred + sizeof(fd_ed25519_sig_t), merkle_protected_sz, FD_BMTREE_LONG_PREFIX_SZ );
392 :
393 : /* in_type_idx is between [0, code.data_cnt) or [0, code.code_cnt),
394 : where data_cnt <= FD_REEDSOL_DATA_SHREDS_MAX and code_cnt <=
395 : FD_REEDSOL_PARITY_SHREDS_MAX.
396 : On the other hand, shred_idx, goes from [0, code.data_cnt +
397 : code.code_cnt), with all the data shreds having
398 : shred_idx < code.data_cnt and all the parity shreds having
399 : shred_idx >= code.data_cnt. */
400 3147 : ulong in_type_idx = fd_ulong_if( is_data_shred, shred->idx - shred->fec_set_idx, shred->code.idx );
401 3147 : ulong shred_idx = fd_ulong_if( is_data_shred, in_type_idx, in_type_idx + shred->code.data_cnt );
402 :
403 3147 : if( FD_UNLIKELY( in_type_idx >= fd_ulong_if( is_data_shred, FD_REEDSOL_DATA_SHREDS_MAX, FD_REEDSOL_PARITY_SHREDS_MAX ) ) )
404 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
405 : /* This, combined with the check on shred->code.data_cnt implies that
406 : shred_idx is in [0, DATA_SHREDS_MAX+PARITY_SHREDS_MAX). */
407 :
408 3147 : if( FD_UNLIKELY( tree_depth>FD_SHRED_MERKLE_LAYER_CNT-1UL ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
409 3147 : if( FD_UNLIKELY( fd_bmtree_depth( shred_idx+1UL ) > tree_depth+1UL ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
410 :
411 3147 : if( FD_UNLIKELY( !ctx ) ) {
412 : /* This is the first shred in the FEC set */
413 132 : if( FD_UNLIKELY( freelist_cnt( free_list )<=partial_depth ) ) {
414 : /* Packet loss is really high and we have a lot of in-progress FEC
415 : sets that we haven't been able to finish. Take the resources
416 : (FEC set and bmtree) from the oldest, and send the oldest FEC
417 : set to the back of the free list. */
418 33 : set_ctx_t * victim_ctx = resolver->curr_ll_sentinel->prev;
419 :
420 : /* Add this one that we're sacrificing to the done map to
421 : prevent the possibility of thrashing. */
422 33 : ctx_ll_insert( done_ll_sentinel, ctx_map_insert( done_map, victim_ctx->sig ) );
423 33 : if( FD_UNLIKELY( ctx_map_key_cnt( done_map ) > done_depth ) ) ctx_map_remove( done_map, ctx_ll_remove( done_ll_sentinel->prev ) );
424 :
425 33 : freelist_push_tail( free_list, victim_ctx->set );
426 33 : bmtrlist_push_tail( bmtree_free_list, victim_ctx->tree );
427 :
428 : /* Remove from linked list and then from the map */
429 33 : ctx_map_remove( curr_map, ctx_ll_remove( victim_ctx ) );
430 :
431 33 : FD_MCNT_INC( SHRED, FEC_SET_SPILLED, 1UL );
432 33 : }
433 : /* Now we know |free_list|>partial_depth and |bmtree_free_list|>1 */
434 :
435 132 : fd_fec_set_t * set_to_use = freelist_pop_head( free_list );
436 132 : void * bmtree_mem = bmtrlist_pop_head( bmtree_free_list );
437 :
438 : /* Now we need to derive the root of the Merkle tree and verify the
439 : signature to prevent a DOS attack just by sending lots of invalid
440 : shreds. */
441 132 : fd_bmtree_commit_t * tree;
442 132 : tree = fd_bmtree_commit_init( bmtree_mem, FD_SHRED_MERKLE_NODE_SZ, FD_BMTREE_LONG_PREFIX_SZ, FD_SHRED_MERKLE_LAYER_CNT );
443 :
444 132 : fd_bmtree_node_t _root[1];
445 132 : fd_shred_merkle_t const * proof = fd_shred_merkle_nodes( shred );
446 132 : int rv = fd_bmtree_commitp_insert_with_proof( tree, shred_idx, leaf, (uchar const *)proof, tree_depth, _root );
447 132 : if( FD_UNLIKELY( !rv ) ) {
448 0 : freelist_push_head( free_list, set_to_use );
449 0 : bmtrlist_push_head( bmtree_free_list, bmtree_mem );
450 0 : FD_MCNT_INC( SHRED, SHRED_REJECTED_INITIAL, 1UL );
451 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
452 0 : }
453 :
454 132 : if( FD_UNLIKELY( FD_ED25519_SUCCESS != fd_ed25519_verify( _root->hash, 32UL, shred->signature, leader_pubkey, sha512 ) ) ) {
455 0 : freelist_push_head( free_list, set_to_use );
456 0 : bmtrlist_push_head( bmtree_free_list, bmtree_mem );
457 0 : FD_MCNT_INC( SHRED, SHRED_REJECTED_INITIAL, 1UL );
458 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
459 0 : }
460 :
461 : /* Copy the merkle root into the output arg. */
462 132 : if( FD_LIKELY( out_merkle_root ) ) memcpy( out_merkle_root, _root, sizeof(fd_bmtree_node_t) );
463 :
464 : /* This seems like a legitimate FEC set, so we can reserve some
465 : resources for it. */
466 132 : ctx = ctx_ll_insert( curr_ll_sentinel, ctx_map_insert( curr_map, *w_sig ) );
467 132 : ctx->set = set_to_use;
468 132 : ctx->tree = tree;
469 132 : ctx->total_rx_shred_cnt = 0UL;
470 132 : ctx->data_variant = fd_uchar_if( is_data_shred, variant, fd_shred_variant( fd_shred_swap_type( shred_type ), (uchar)tree_depth ) );
471 132 : ctx->parity_variant = fd_uchar_if( !is_data_shred, variant, fd_shred_variant( fd_shred_swap_type( shred_type ), (uchar)tree_depth ) );
472 :
473 132 : if( FD_UNLIKELY( fd_shred_is_resigned( shred_type ) & !!(resolver->signer) ) ) {
474 3 : resolver->signer( resolver->sign_ctx, ctx->retransmitter_sig.u, _root->hash );
475 129 : } else {
476 129 : fd_memset( ctx->retransmitter_sig.u, 0, 64UL );
477 129 : }
478 :
479 : /* Reset the FEC set */
480 132 : ctx->set->data_shred_cnt = SHRED_CNT_NOT_SET;
481 132 : ctx->set->parity_shred_cnt = SHRED_CNT_NOT_SET;
482 132 : d_rcvd_join( d_rcvd_new( d_rcvd_delete( d_rcvd_leave( ctx->set->data_shred_rcvd ) ) ) );
483 132 : p_rcvd_join( p_rcvd_new( p_rcvd_delete( p_rcvd_leave( ctx->set->parity_shred_rcvd ) ) ) );
484 :
485 3015 : } else {
486 : /* This is not the first shred in the set */
487 : /* First, check to make sure this is not a duplicate */
488 3015 : int shred_dup = fd_int_if( is_data_shred, d_rcvd_test( ctx->set->data_shred_rcvd, in_type_idx ),
489 3015 : p_rcvd_test( ctx->set->parity_shred_rcvd, in_type_idx ) );
490 :
491 3015 : if( FD_UNLIKELY( shred_dup ) ) return FD_FEC_RESOLVER_SHRED_IGNORED;
492 :
493 : /* Ensure that all the shreds in the FEC set have consistent
494 : variants. They all must have the same tree_depth and the same
495 : chained/not chained, resigned/not resigned bits. */
496 2802 : if( FD_UNLIKELY( variant!=fd_uchar_if( is_data_shred, ctx->data_variant, ctx->parity_variant ) ) ) {
497 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
498 0 : }
499 :
500 2802 : fd_shred_merkle_t const * proof = fd_shred_merkle_nodes( shred );
501 2802 : int rv = fd_bmtree_commitp_insert_with_proof( ctx->tree, shred_idx, leaf, (uchar const *)proof, tree_depth, NULL );
502 2802 : if( !rv ) return FD_FEC_RESOLVER_SHRED_REJECTED;
503 2802 : }
504 :
505 2928 : if( FD_UNLIKELY( (ctx->set->data_shred_cnt==SHRED_CNT_NOT_SET) & (!is_data_shred) ) ) {
506 111 : ctx->set->data_shred_cnt = shred->code.data_cnt;
507 111 : ctx->set->parity_shred_cnt = shred->code.code_cnt;
508 111 : ctx->parity_idx0 = shred->idx - in_type_idx;
509 111 : ctx->fec_set_idx = shred->fec_set_idx;
510 111 : }
511 :
512 : /* At this point, the shred has passed Merkle validation and is new.
513 : We also know that ctx is a pointer to the slot for signature in the
514 : current map. */
515 :
516 : /* Copy the shred to memory the FEC resolver owns */
517 2928 : uchar * dst = fd_ptr_if( is_data_shred, ctx->set->data_shreds[ in_type_idx ], ctx->set->parity_shreds[ in_type_idx ] );
518 2928 : fd_memcpy( dst, shred, fd_shred_sz( shred ) );
519 :
520 : /* If the shred needs a retransmitter signature, set it */
521 2928 : if( FD_UNLIKELY( fd_shred_is_resigned( shred_type ) ) ) {
522 99 : memcpy( dst + fd_shred_retransmitter_sig_off( (fd_shred_t *)dst ), ctx->retransmitter_sig.u, 64UL );
523 99 : }
524 :
525 2928 : d_rcvd_insert_if( ctx->set->data_shred_rcvd, is_data_shred, in_type_idx );
526 2928 : p_rcvd_insert_if( ctx->set->parity_shred_rcvd, !is_data_shred, in_type_idx );
527 2928 : ctx->total_rx_shred_cnt++;
528 :
529 2928 : *out_shred = (fd_shred_t const *)dst;
530 :
531 : /* Do we have enough to begin reconstruction? */
532 2928 : if( FD_LIKELY( ctx->total_rx_shred_cnt < ctx->set->data_shred_cnt ) ) return FD_FEC_RESOLVER_SHRED_OKAY;
533 :
534 : /* At this point, the FEC set is either valid or permanently invalid,
535 : so we can consider it done either way. First though, since ctx_map_remove
536 : can change what's at *ctx, so unpack the values before we do that */
537 90 : fd_fec_set_t * set = ctx->set;
538 90 : fd_bmtree_commit_t * tree = ctx->tree;
539 90 : ulong fec_set_idx = ctx->fec_set_idx;
540 90 : ulong parity_idx0 = ctx->parity_idx0;
541 90 : wrapped_sig_t retran_sig = ctx->retransmitter_sig;
542 90 : uchar parity_variant = ctx->parity_variant;
543 90 : uchar data_variant = ctx->data_variant;
544 :
545 90 : ctx_ll_insert( done_ll_sentinel, ctx_map_insert( done_map, ctx->sig ) );
546 90 : if( FD_UNLIKELY( ctx_map_key_cnt( done_map ) > done_depth ) ) ctx_map_remove( done_map, ctx_ll_remove( done_ll_sentinel->prev ) );
547 :
548 90 : ctx_map_remove( curr_map, ctx_ll_remove( ctx ) );
549 :
550 90 : reedsol = fd_reedsol_recover_init( (void*)reedsol, reedsol_protected_sz );
551 2832 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) {
552 2742 : uchar * rs_payload = set->data_shreds[ i ] + sizeof(fd_ed25519_sig_t);
553 2742 : if( d_rcvd_test( set->data_shred_rcvd, i ) ) fd_reedsol_recover_add_rcvd_shred ( reedsol, 1, rs_payload );
554 1419 : else fd_reedsol_recover_add_erased_shred( reedsol, 1, rs_payload );
555 2742 : }
556 2994 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ ) {
557 2904 : uchar * rs_payload = set->parity_shreds[ i ] + FD_SHRED_CODE_HEADER_SZ;
558 2904 : if( p_rcvd_test( set->parity_shred_rcvd, i ) ) fd_reedsol_recover_add_rcvd_shred ( reedsol, 0, rs_payload );
559 1437 : else fd_reedsol_recover_add_erased_shred( reedsol, 0, rs_payload );
560 2904 : }
561 :
562 90 : if( FD_UNLIKELY( FD_REEDSOL_SUCCESS != fd_reedsol_recover_fini( reedsol ) ) ) {
563 : /* A few lines up, we already checked to make sure it wasn't the
564 : insufficient case, so it must be the inconsistent case. That
565 : means the leader signed a shred with invalid Reed-Solomon FEC
566 : set. This shouldn't happen in practice, but we need to handle it
567 : for the malicious leader case. This should probably be a
568 : slash-able offense. */
569 0 : freelist_push_tail( free_list, set );
570 0 : bmtrlist_push_tail( bmtree_free_list, tree );
571 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
572 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
573 0 : }
574 :
575 90 : uchar const * chained_root = fd_ptr_if( fd_shred_is_chained( shred_type ), (uchar *)shred+fd_shred_chain_off( variant ), NULL );
576 :
577 : /* Iterate over recovered shreds, add them to the Merkle tree,
578 : populate headers and signatures. */
579 2832 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) {
580 2742 : if( !d_rcvd_test( set->data_shred_rcvd, i ) ) {
581 1419 : fd_memcpy( set->data_shreds[i], shred, sizeof(fd_ed25519_sig_t) );
582 1419 : if( FD_LIKELY( fd_shred_is_chained( shred_type ) ) ) {
583 0 : fd_memcpy( set->data_shreds[i]+fd_shred_chain_off( data_variant ), chained_root, FD_SHRED_MERKLE_ROOT_SZ );
584 0 : }
585 1419 : fd_bmtree_hash_leaf( leaf, set->data_shreds[i]+sizeof(fd_ed25519_sig_t), data_merkle_protected_sz, FD_BMTREE_LONG_PREFIX_SZ );
586 1419 : if( FD_UNLIKELY( !fd_bmtree_commitp_insert_with_proof( tree, i, leaf, NULL, 0, NULL ) ) ) {
587 0 : freelist_push_tail( free_list, set );
588 0 : bmtrlist_push_tail( bmtree_free_list, tree );
589 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
590 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
591 0 : }
592 :
593 1419 : }
594 2742 : }
595 :
596 2994 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ ) {
597 2904 : if( !p_rcvd_test( set->parity_shred_rcvd, i ) ) {
598 1437 : fd_shred_t * p_shred = (fd_shred_t *)set->parity_shreds[i]; /* We can't parse because we haven't populated the header */
599 1437 : fd_memcpy( p_shred->signature, shred->signature, sizeof(fd_ed25519_sig_t) );
600 1437 : p_shred->variant = parity_variant;
601 1437 : p_shred->slot = shred->slot;
602 1437 : p_shred->idx = (uint)(i + parity_idx0);
603 1437 : p_shred->version = shred->version;
604 1437 : p_shred->fec_set_idx = (uint)fec_set_idx;
605 1437 : p_shred->code.data_cnt = (ushort)set->data_shred_cnt;
606 1437 : p_shred->code.code_cnt = (ushort)set->parity_shred_cnt;
607 1437 : p_shred->code.idx = (ushort)i;
608 :
609 1437 : if( FD_LIKELY( fd_shred_is_chained( shred_type ) ) ) {
610 345 : fd_memcpy( set->parity_shreds[i]+fd_shred_chain_off( parity_variant ), chained_root, FD_SHRED_MERKLE_ROOT_SZ );
611 345 : }
612 :
613 1437 : fd_bmtree_hash_leaf( leaf, set->parity_shreds[i]+ sizeof(fd_ed25519_sig_t), parity_merkle_protected_sz, FD_BMTREE_LONG_PREFIX_SZ );
614 1437 : if( FD_UNLIKELY( !fd_bmtree_commitp_insert_with_proof( tree, set->data_shred_cnt + i, leaf, NULL, 0, NULL ) ) ) {
615 0 : freelist_push_tail( free_list, set );
616 0 : bmtrlist_push_tail( bmtree_free_list, tree );
617 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
618 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
619 0 : }
620 1437 : }
621 2904 : }
622 :
623 : /* Check that the whole Merkle tree is consistent. */
624 90 : if( FD_UNLIKELY( !fd_bmtree_commitp_fini( tree, set->data_shred_cnt + set->parity_shred_cnt ) ) ) {
625 0 : freelist_push_tail( free_list, set );
626 0 : bmtrlist_push_tail( bmtree_free_list, tree );
627 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
628 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
629 0 : }
630 :
631 : /* Check that all the fields that are supposed to be consistent across
632 : an FEC set actually are. */
633 90 : fd_shred_t const * base_data_shred = fd_shred_parse( set->data_shreds [ 0 ], FD_SHRED_MIN_SZ );
634 90 : fd_shred_t const * base_parity_shred = fd_shred_parse( set->parity_shreds[ 0 ], FD_SHRED_MAX_SZ );
635 90 : int reject = (!base_data_shred) | (!base_parity_shred);
636 :
637 2742 : for( ulong i=1UL; (!reject) & (i<set->data_shred_cnt); i++ ) {
638 : /* Technically, we only need to re-parse the ones we recovered with
639 : Reedsol, but parsing is pretty cheap and the rest of the
640 : validation we need to do on all of them. */
641 2652 : fd_shred_t const * parsed = fd_shred_parse( set->data_shreds[ i ], FD_SHRED_MIN_SZ );
642 2652 : if( FD_UNLIKELY( !parsed ) ) { reject = 1; break; }
643 2652 : reject |= parsed->variant != base_data_shred->variant;
644 2652 : reject |= parsed->slot != base_data_shred->slot;
645 2652 : reject |= parsed->version != base_data_shred->version;
646 2652 : reject |= parsed->fec_set_idx != base_data_shred->fec_set_idx;
647 2652 : reject |= parsed->data.parent_off != base_data_shred->data.parent_off;
648 :
649 2652 : reject |= fd_shred_is_chained( fd_shred_type( parsed->variant ) ) &&
650 2652 : !fd_memeq( (uchar *)parsed +fd_shred_chain_off( parsed->variant ),
651 183 : (uchar *)base_data_shred+fd_shred_chain_off( base_data_shred->variant ), FD_SHRED_MERKLE_ROOT_SZ );
652 2652 : }
653 2994 : for( ulong i=0UL; (!reject) & (i<set->parity_shred_cnt); i++ ) {
654 2904 : fd_shred_t const * parsed = fd_shred_parse( set->parity_shreds[ i ], FD_SHRED_MAX_SZ );
655 2904 : if( FD_UNLIKELY( !parsed ) ) { reject = 1; break; }
656 2904 : reject |= fd_shred_type( parsed->variant ) != fd_shred_swap_type( fd_shred_type( base_data_shred->variant ) );
657 2904 : reject |= fd_shred_merkle_cnt( parsed->variant ) != fd_shred_merkle_cnt( base_data_shred->variant );
658 2904 : reject |= parsed->slot != base_data_shred->slot;
659 2904 : reject |= parsed->version != base_data_shred->version;
660 2904 : reject |= parsed->fec_set_idx != base_data_shred->fec_set_idx;
661 2904 : reject |= parsed->code.data_cnt != base_parity_shred->code.data_cnt;
662 2904 : reject |= parsed->code.code_cnt != base_parity_shred->code.code_cnt;
663 2904 : reject |= parsed->code.idx != (ushort)i;
664 :
665 2904 : reject |= fd_shred_is_chained( fd_shred_type( parsed->variant ) ) &&
666 2904 : !fd_memeq( (uchar *)parsed +fd_shred_chain_off( parsed->variant ),
667 360 : (uchar *)base_data_shred+fd_shred_chain_off( base_data_shred->variant ), FD_SHRED_MERKLE_ROOT_SZ );
668 2904 : }
669 90 : if( FD_UNLIKELY( reject ) ) {
670 0 : freelist_push_tail( free_list, set );
671 0 : bmtrlist_push_tail( bmtree_free_list, tree );
672 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
673 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
674 0 : }
675 :
676 : /* Populate missing Merkle proofs */
677 2832 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) if( !d_rcvd_test( set->data_shred_rcvd, i ) )
678 1419 : fd_bmtree_get_proof( tree, set->data_shreds[i] + fd_shred_merkle_off( (fd_shred_t *)set->data_shreds[i] ), i );
679 :
680 2994 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ ) if( !p_rcvd_test( set->parity_shred_rcvd, i ) )
681 1437 : fd_bmtree_get_proof( tree, set->parity_shreds[i] + fd_shred_merkle_off( (fd_shred_t *)set->parity_shreds[i] ), set->data_shred_cnt+i );
682 :
683 : /* Set the retransmitter signature for shreds that need one */
684 90 : if( FD_UNLIKELY( fd_shred_is_resigned( shred_type ) ) ) {
685 99 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) if( !d_rcvd_test( set->data_shred_rcvd, i ) )
686 0 : memcpy( set->data_shreds[i] + fd_shred_retransmitter_sig_off( (fd_shred_t *)set->data_shreds[i] ), retran_sig.u, 64UL );
687 :
688 99 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ ) if( !p_rcvd_test( set->parity_shred_rcvd, i ) )
689 93 : memcpy( set->parity_shreds[i] + fd_shred_retransmitter_sig_off( (fd_shred_t *)set->parity_shreds[i] ), retran_sig.u, 64UL );
690 3 : }
691 :
692 : /* Finally... A valid FEC set. Forward it along. */
693 90 : bmtrlist_push_tail( bmtree_free_list, tree );
694 90 : freelist_push_tail( complete_list, set );
695 90 : freelist_push_tail( free_list, freelist_pop_head( complete_list ) );
696 :
697 90 : *out_fec_set = set;
698 :
699 90 : return FD_FEC_RESOLVER_SHRED_COMPLETES;
700 90 : }
701 :
702 21 : void * fd_fec_resolver_leave( fd_fec_resolver_t * resolver ) {
703 21 : fd_sha512_leave( resolver->sha512 );
704 21 : bmtrlist_leave ( resolver->bmtree_free_list );
705 21 : freelist_leave ( resolver->complete_list );
706 21 : freelist_leave ( resolver->free_list );
707 21 : ctx_map_leave ( resolver->done_map );
708 21 : ctx_map_leave ( resolver->curr_map );
709 :
710 21 : return (void *)resolver;
711 21 : }
712 :
713 21 : void * fd_fec_resolver_delete( void * shmem ) {
714 21 : fd_fec_resolver_t * resolver = (fd_fec_resolver_t *)shmem;
715 21 : ulong depth = resolver->depth;
716 21 : ulong partial_depth = resolver->partial_depth;
717 21 : ulong complete_depth = resolver->complete_depth;
718 21 : ulong done_depth = resolver->done_depth;
719 :
720 21 : int lg_curr_map_cnt = fd_ulong_find_msb( depth + 1UL ) + 2;
721 21 : int lg_done_map_cnt = fd_ulong_find_msb( done_depth + 1UL ) + 2;
722 :
723 21 : FD_SCRATCH_ALLOC_INIT( l, shmem );
724 21 : /* self */ FD_SCRATCH_ALLOC_APPEND( l, FD_FEC_RESOLVER_ALIGN, sizeof(fd_fec_resolver_t) );
725 21 : void * curr = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_curr_map_cnt ) );
726 21 : void * done = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_done_map_cnt ) );
727 21 : void * free = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( depth+partial_depth+1UL ) );
728 21 : void * cmplst = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( complete_depth+1UL ) );
729 21 : void * bmfree = FD_SCRATCH_ALLOC_APPEND( l, bmtrlist_align(), bmtrlist_footprint( depth+1UL ) );
730 21 : FD_SCRATCH_ALLOC_FINI( l, FD_FEC_RESOLVER_ALIGN );
731 :
732 21 : fd_sha512_delete( resolver->sha512 );
733 21 : bmtrlist_delete ( bmfree );
734 21 : freelist_delete ( cmplst );
735 21 : freelist_delete ( free );
736 21 : ctx_map_delete ( done );
737 21 : ctx_map_delete ( curr );
738 :
739 21 : return shmem;
740 21 : }
|