Line data Source code
1 : #include "../../ballet/shred/fd_shred.h"
2 : #include "../../ballet/shred/fd_fec_set.h"
3 : #include "../../ballet/sha512/fd_sha512.h"
4 : #include "../../ballet/reedsol/fd_reedsol.h"
5 : #include "../metrics/fd_metrics.h"
6 : #include "fd_fec_resolver.h"
7 :
8 :
9 : typedef union {
10 : fd_ed25519_sig_t u;
11 : ulong l;
12 : } wrapped_sig_t;
13 :
14 : struct set_ctx;
15 : typedef struct set_ctx set_ctx_t;
16 :
17 : struct __attribute__((aligned(32UL))) set_ctx {
18 : wrapped_sig_t sig;
19 : fd_fec_set_t * set;
20 : fd_bmtree_commit_t * tree;
21 : fd_bmtree_node_t root;
22 : set_ctx_t * prev;
23 : set_ctx_t * next;
24 : ulong total_rx_shred_cnt;
25 : ulong fec_set_idx;
26 : /* The shred index of the first parity shred in this FEC set */
27 : ulong parity_idx0;
28 : uchar data_variant;
29 : uchar parity_variant;
30 : /* If this FEC set has resigned shreds, this is our signature of the
31 : root of the Merkle tree */
32 : wrapped_sig_t retransmitter_sig;
33 : };
34 : typedef struct set_ctx set_ctx_t;
35 :
36 : #define DEQUE_NAME freelist
37 594 : #define DEQUE_T fd_fec_set_t *
38 : #include "../../util/tmpl/fd_deque_dynamic.c"
39 :
40 : #define DEQUE_NAME bmtrlist
41 321 : #define DEQUE_T void *
42 : #include "../../util/tmpl/fd_deque_dynamic.c"
43 :
44 : static const wrapped_sig_t null_signature = {{0}};
45 :
46 28008 : #define MAP_KEY sig
47 19812 : #define MAP_KEY_T wrapped_sig_t
48 8196 : #define MAP_KEY_NULL null_signature
49 48285 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp( (k0).u, (k1).u, FD_ED25519_SIG_SZ ))
50 29442 : #define MAP_KEY_INVAL(k) MAP_KEY_EQUAL( k, MAP_KEY_NULL )
51 : #define MAP_KEY_EQUAL_IS_SLOW 1
52 19425 : #define MAP_KEY_HASH(key) ((MAP_HASH_T)fd_ulong_hash( key.l ))
53 : #define MAP_MEMOIZE 0
54 : #define MAP_NAME ctx_map
55 20256 : #define MAP_T set_ctx_t
56 : /* The prev and next fields of set_ctx_t thread a linked list through
57 : the map. The map can move elements around during a deletion though,
58 : so we need to update the links when it does. Thankfully it gives a
59 : perfect hook for doing so. */
60 6 : #define MAP_MOVE(d,s) do { \
61 6 : set_ctx_t * _d = &(d); \
62 6 : set_ctx_t * _s = &(s); \
63 6 : _s->prev->next = _d; \
64 6 : _s->next->prev = _d; \
65 6 : *_d = *_s; \
66 6 : } while( 0 )
67 : #include "../../util/tmpl/fd_map_dynamic.c"
68 :
69 :
70 : struct __attribute__((aligned(FD_FEC_RESOLVER_ALIGN))) fd_fec_resolver {
71 : /* depth stores the number of FEC sets this resolver can track
72 : simultaneously. done_depth stores the depth of the done tcache,
73 : i.e. the number of done FEC set keys that this resolver remembers.
74 : partial_depth stores the minimum size of the free FEC set list.
75 : completed_depth stores the size of the completed FEC set list. */
76 : ulong depth;
77 : ulong partial_depth;
78 : ulong complete_depth;
79 : ulong done_depth;
80 :
81 : /* expected_shred_version: discard all shreds with a shred version
82 : other than the specified value */
83 : ushort expected_shred_version;
84 :
85 : /* curr_map: A map (using fd_map_dynamic) from tags of signatures to
86 : the context object with its relevant data. This map contains at
87 : most `depth` elements at any time, but to improve query performance,
88 : we size it at 2*depth. */
89 : set_ctx_t * curr_map;
90 :
91 : /* curr_ll_sentinel: The elements of curr_map also make
92 : essentially a circular doubly linked list using the next and prev
93 : fields. To simplify the logic, we use a sentinel node that's
94 : stored here instead of in the map. Thus, the head (newest) and the
95 : tail (oldest) of the linked list are the next and prev pointers of
96 : this context (respectively). The other fields aren't used. */
97 : set_ctx_t curr_ll_sentinel[1];
98 :
99 : /* done: stores signatures of FEC sets that have recently been
100 : completed. This is like a tcache, but with a non-ulong key and
101 : using a linked list instead of a ring buffer. Any new packets
102 : matching tags in this set can be ignored. Since the data structure
103 : we need (map with linked list) is very similar to for curr_map, we
104 : just use the same fd_map_dynamic instantiation. Only fields sig,
105 : prev, and next are used. */
106 : set_ctx_t * done_map;
107 :
108 : /* done_ll_sentinel: Analogous to curr_ll_sentinel, but for the done
109 : map instead of the current map. */
110 : set_ctx_t done_ll_sentinel[1];
111 :
112 : /* free_list and complete_list are deques (using fd_deque_dynamic)
113 : that FEC sets that are not in contexts in curr_map. Similarly,
114 : bmtree_free_list stores footprints for the bmtree objects that are
115 : not in contexts in curr_map. These lists point to objects of
116 : indeterminate state and need to be cleared/reset when popped off.
117 : Invariant: at every entry and exit to fd_fec_resolver_add_shred:
118 : - free_list has between partial_depth and partial_depth+depth
119 : elements.
120 : - complete_list has complete_depth elements
121 : - bmtree_free_list has between 0 and depth elements
122 : (all these counts are inclusive). */
123 : fd_fec_set_t * * free_list;
124 : fd_fec_set_t * * complete_list;
125 : void * * bmtree_free_list;
126 :
127 : /* signer is used to sign shreds that require a retransmitter
128 : signature. sign_ctx is provided as the first argument to the
129 : function. */
130 : fd_fec_resolver_sign_fn * signer;
131 : void * sign_ctx;
132 :
133 : /* max_shred_idx is the exclusive upper bound for shred indices. We
134 : need to reject any shred with an index >= max_shred_idx, but we
135 : also want to reject anything that is part of an FEC set where the
136 : highest index of a shred in the FEC set will be >= max_shred_idx.
137 : */
138 : ulong max_shred_idx;
139 :
140 : /* sha512 and reedsol are used for calculations while adding a shred.
141 : Their state outside a call to add_shred is indeterminate. */
142 : fd_sha512_t sha512[1];
143 : fd_reedsol_t reedsol[1];
144 :
145 : /* The footprint for the objects follows the struct and is in the same
146 : order as the pointers, namely:
147 : curr_map map
148 : done_map map
149 : free_list deque
150 : complete_list deque
151 : bmtree_free_list deque
152 : Actual footprint for bmtrees */
153 : };
154 :
155 : typedef struct fd_fec_resolver fd_fec_resolver_t;
156 :
157 : FD_FN_PURE ulong
158 : fd_fec_resolver_footprint( ulong depth,
159 : ulong partial_depth,
160 : ulong complete_depth,
161 3 : ulong done_depth ) {
162 3 : if( FD_UNLIKELY( (depth==0UL) | (partial_depth==0UL) | (complete_depth==0UL) | (done_depth==0UL) ) ) return 0UL;
163 3 : if( FD_UNLIKELY( (depth>=(1UL<<62)-1UL) | (done_depth>=(1UL<<62)-1UL ) ) ) return 0UL; /* prevent overflow */
164 :
165 3 : int lg_curr_map_cnt = fd_ulong_find_msb( depth + 1UL ) + 2; /* See fd_tcache.h for the logic */
166 3 : int lg_done_map_cnt = fd_ulong_find_msb( done_depth + 1UL ) + 2; /* ... behind the + 2. */
167 :
168 3 : ulong footprint_per_bmtree = fd_bmtree_commit_footprint( FD_SHRED_MERKLE_LAYER_CNT );
169 :
170 3 : ulong layout = FD_LAYOUT_INIT;
171 3 : layout = FD_LAYOUT_APPEND( layout, FD_FEC_RESOLVER_ALIGN, sizeof(fd_fec_resolver_t) );
172 3 : layout = FD_LAYOUT_APPEND( layout, ctx_map_align(), ctx_map_footprint( lg_curr_map_cnt ) );
173 3 : layout = FD_LAYOUT_APPEND( layout, ctx_map_align(), ctx_map_footprint( lg_done_map_cnt ) );
174 3 : layout = FD_LAYOUT_APPEND( layout, freelist_align(), freelist_footprint( depth+partial_depth+1UL ) );
175 3 : layout = FD_LAYOUT_APPEND( layout, freelist_align(), freelist_footprint( complete_depth+1UL ) );
176 3 : layout = FD_LAYOUT_APPEND( layout, bmtrlist_align(), bmtrlist_footprint( depth+1UL ) );
177 3 : layout = FD_LAYOUT_APPEND( layout, FD_BMTREE_COMMIT_ALIGN, depth*footprint_per_bmtree );
178 :
179 3 : return FD_LAYOUT_FINI( layout, FD_FEC_RESOLVER_ALIGN );
180 3 : }
181 :
182 0 : FD_FN_CONST ulong fd_fec_resolver_align( void ) { return FD_FEC_RESOLVER_ALIGN; }
183 :
184 :
185 : void *
186 : fd_fec_resolver_new( void * shmem,
187 : fd_fec_resolver_sign_fn * signer,
188 : void * sign_ctx,
189 : ulong depth,
190 : ulong partial_depth,
191 : ulong complete_depth,
192 : ulong done_depth,
193 : fd_fec_set_t * sets,
194 210 : ulong max_shred_idx ) {
195 210 : if( FD_UNLIKELY( (depth==0UL) | (partial_depth==0UL) | (complete_depth==0UL) | (done_depth==0UL) ) ) return NULL;
196 210 : if( FD_UNLIKELY( (depth>=(1UL<<62)-1UL) | (done_depth>=(1UL<<62)-1UL ) ) ) return NULL;
197 :
198 210 : int lg_curr_map_cnt = fd_ulong_find_msb( depth + 1UL ) + 2;
199 210 : int lg_done_map_cnt = fd_ulong_find_msb( done_depth + 1UL ) + 2;
200 :
201 210 : ulong footprint_per_bmtree = fd_bmtree_commit_footprint( FD_SHRED_MERKLE_LAYER_CNT );
202 :
203 210 : FD_SCRATCH_ALLOC_INIT( l, shmem );
204 210 : void * self = FD_SCRATCH_ALLOC_APPEND( l, FD_FEC_RESOLVER_ALIGN, sizeof(fd_fec_resolver_t) );
205 210 : void * curr = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_curr_map_cnt ) );
206 210 : void * done = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_done_map_cnt ) );
207 210 : void * free = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( depth+partial_depth+1UL ) );
208 210 : void * cmplst = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( complete_depth+1UL ) );
209 210 : void * bmfree = FD_SCRATCH_ALLOC_APPEND( l, bmtrlist_align(), bmtrlist_footprint( depth+1UL ) );
210 210 : void * bmfootprint = FD_SCRATCH_ALLOC_APPEND( l, FD_BMTREE_COMMIT_ALIGN, depth*footprint_per_bmtree );
211 210 : FD_SCRATCH_ALLOC_FINI( l, FD_FEC_RESOLVER_ALIGN );
212 :
213 210 : fd_fec_resolver_t * resolver = (fd_fec_resolver_t *)self;
214 :
215 210 : if( FD_UNLIKELY( !ctx_map_new ( curr, lg_curr_map_cnt )) ) { FD_LOG_WARNING(( "curr map_new failed" )); return NULL; }
216 210 : if( FD_UNLIKELY( !ctx_map_new ( done, lg_done_map_cnt )) ) { FD_LOG_WARNING(( "done map_new failed" )); return NULL; }
217 210 : if( FD_UNLIKELY( !freelist_new ( free, depth+partial_depth+1UL )) ) { FD_LOG_WARNING(( "freelist_new failed" )); return NULL; }
218 210 : if( FD_UNLIKELY( !freelist_new ( cmplst, complete_depth+1UL )) ) { FD_LOG_WARNING(( "freelist_new failed" )); return NULL; }
219 210 : if( FD_UNLIKELY( !bmtrlist_new ( bmfree, depth )) ) { FD_LOG_WARNING(( "bmtrlist_new failed" )); return NULL; }
220 210 : if( FD_UNLIKELY( !fd_sha512_new( (void *)resolver->sha512 )) ) { FD_LOG_WARNING(( "sha512_new failed" )); return NULL; }
221 :
222 : /* Initialize all the lists */
223 210 : fd_fec_set_t * * free_list = freelist_join( free );
224 210 : fd_fec_set_t * * complete_list = freelist_join( cmplst );
225 840 : for( ulong i=0UL; i<depth+partial_depth; i++ ) { freelist_push_tail( free_list, sets+i ); }
226 420 : for( ulong i=depth+partial_depth; i<depth+partial_depth+complete_depth; i++ ) { freelist_push_tail( complete_list, sets+i ); }
227 210 : freelist_leave( complete_list );
228 210 : freelist_leave( free_list );
229 :
230 210 : void * * bmtree_list = bmtrlist_join( bmfree );
231 630 : for( ulong i=0UL; i<depth; i++ ) { bmtrlist_push_tail( bmtree_list, (uchar *)bmfootprint + i*footprint_per_bmtree ); }
232 210 : bmtrlist_leave( bmtree_list );
233 :
234 210 : resolver->curr_ll_sentinel->prev = resolver->curr_ll_sentinel;
235 210 : resolver->curr_ll_sentinel->next = resolver->curr_ll_sentinel;
236 210 : resolver->done_ll_sentinel->prev = resolver->done_ll_sentinel;
237 210 : resolver->done_ll_sentinel->next = resolver->done_ll_sentinel;
238 :
239 210 : resolver->depth = depth;
240 210 : resolver->partial_depth = partial_depth;
241 210 : resolver->complete_depth = complete_depth;
242 210 : resolver->done_depth = done_depth;
243 210 : resolver->expected_shred_version = 0;
244 210 : resolver->signer = signer;
245 210 : resolver->sign_ctx = sign_ctx;
246 210 : resolver->max_shred_idx = max_shred_idx;
247 210 : return shmem;
248 210 : }
249 :
250 : fd_fec_resolver_t *
251 210 : fd_fec_resolver_join( void * shmem ) {
252 210 : fd_fec_resolver_t * resolver = (fd_fec_resolver_t *)shmem;
253 210 : ulong depth = resolver->depth;
254 210 : ulong partial_depth = resolver->partial_depth;
255 210 : ulong complete_depth = resolver->complete_depth;
256 210 : ulong done_depth = resolver->done_depth;
257 :
258 210 : int lg_curr_map_cnt = fd_ulong_find_msb( depth + 1UL ) + 2;
259 210 : int lg_done_map_cnt = fd_ulong_find_msb( done_depth + 1UL ) + 2;
260 :
261 210 : FD_SCRATCH_ALLOC_INIT( l, shmem );
262 210 : /* self */ FD_SCRATCH_ALLOC_APPEND( l, FD_FEC_RESOLVER_ALIGN, sizeof(fd_fec_resolver_t) );
263 210 : void * curr = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_curr_map_cnt ) );
264 210 : void * done = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_done_map_cnt ) );
265 210 : void * free = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( depth+partial_depth+1UL ) );
266 210 : void * cmplst = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( complete_depth+1UL ) );
267 210 : void * bmfree = FD_SCRATCH_ALLOC_APPEND( l, bmtrlist_align(), bmtrlist_footprint( depth+1UL ) );
268 210 : FD_SCRATCH_ALLOC_FINI( l, FD_FEC_RESOLVER_ALIGN );
269 :
270 210 : resolver->curr_map = ctx_map_join ( curr ); if( FD_UNLIKELY( !resolver->curr_map ) ) return NULL;
271 210 : resolver->done_map = ctx_map_join ( done ); if( FD_UNLIKELY( !resolver->done_map ) ) return NULL;
272 210 : resolver->free_list = freelist_join ( free ); if( FD_UNLIKELY( !resolver->free_list ) ) return NULL;
273 210 : resolver->complete_list = freelist_join ( cmplst ); if( FD_UNLIKELY( !resolver->complete_list ) ) return NULL;
274 210 : resolver->bmtree_free_list = bmtrlist_join ( bmfree ); if( FD_UNLIKELY( !resolver->bmtree_free_list ) ) return NULL;
275 210 : if( FD_UNLIKELY( !fd_sha512_join( resolver->sha512 ) ) ) return NULL;
276 :
277 210 : return resolver;
278 210 : }
279 :
280 : void
281 : fd_fec_resolver_set_shred_version( fd_fec_resolver_t * resolver,
282 210 : ushort expected_shred_version ) {
283 210 : resolver->expected_shred_version = expected_shred_version;
284 210 : }
285 :
286 : /* Two helper functions for working with the linked lists that are
287 : threaded through maps. Use them as follows:
288 : ctx_ll_insert( <sentinel corresponding to map>, ctx_map_insert( <map>, key ) );
289 : ctx_map_remove( <map>, ctx_ll_remove( <node to remove> ) );
290 :
291 : */
292 : /* Removes r from the linked list */
293 : static set_ctx_t *
294 366 : ctx_ll_remove( set_ctx_t * r ) {
295 366 : r->next->prev = r->prev;
296 366 : r->prev->next = r->next;
297 366 : r->next = NULL;
298 366 : r->prev = NULL;
299 366 : return r;
300 366 : }
301 :
302 : /* Inserts c immediately after p. Returns c. */
303 : static set_ctx_t *
304 594 : ctx_ll_insert( set_ctx_t * p, set_ctx_t * c ) {
305 594 : c->next = p->next;
306 594 : c->prev = p;
307 594 : p->next->prev = c;
308 594 : p->next = c;
309 594 : return c;
310 594 : }
311 :
312 : int
313 : fd_fec_resolver_add_shred( fd_fec_resolver_t * resolver,
314 : fd_shred_t const * shred,
315 : ulong shred_sz,
316 : uchar const * leader_pubkey,
317 : fd_fec_set_t const * * out_fec_set,
318 : fd_shred_t const * * out_shred,
319 : fd_bmtree_node_t * out_merkle_root,
320 9618 : fd_fec_resolver_spilled_t * out_spilled ) {
321 : /* Unpack variables */
322 9618 : ulong partial_depth = resolver->partial_depth;
323 9618 : ulong done_depth = resolver->done_depth;
324 :
325 9618 : fd_fec_set_t * * free_list = resolver->free_list;
326 9618 : fd_fec_set_t * * complete_list = resolver->complete_list;
327 9618 : void * * bmtree_free_list = resolver->bmtree_free_list;
328 9618 : set_ctx_t * curr_map = resolver->curr_map;
329 9618 : set_ctx_t * done_map = resolver->done_map;
330 :
331 9618 : fd_reedsol_t * reedsol = resolver->reedsol;
332 9618 : fd_sha512_t * sha512 = resolver->sha512;
333 :
334 9618 : set_ctx_t * curr_ll_sentinel = resolver->curr_ll_sentinel;
335 9618 : set_ctx_t * done_ll_sentinel = resolver->done_ll_sentinel;
336 :
337 : /* Invariants:
338 : * no key is in both the done map and the current map
339 : * each set pointer provided to the new function is in exactly one
340 : of curr_map, freelist, or complete_list
341 : * bmtree_free_list has exactly partial_depth fewer elements than
342 : freelist
343 : */
344 9618 : wrapped_sig_t * w_sig = (wrapped_sig_t *)shred->signature;
345 :
346 : /* Immediately reject any shred with a 0 signature. */
347 9618 : if( FD_UNLIKELY( ctx_map_key_inval( *w_sig ) ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
348 :
349 : /* Are we already done with this FEC set? */
350 9618 : int found = !!ctx_map_query( done_map, *w_sig, NULL );
351 :
352 9618 : if( found ) return FD_FEC_RESOLVER_SHRED_IGNORED; /* With no packet loss, we expect found==1 about 50% of the time */
353 :
354 9180 : set_ctx_t * ctx = ctx_map_query( curr_map, *w_sig, NULL );
355 :
356 9180 : fd_bmtree_node_t leaf[1];
357 9180 : uchar variant = shred->variant;
358 9180 : uchar shred_type = fd_shred_type( variant );
359 :
360 9180 : if( FD_UNLIKELY( (shred_type==FD_SHRED_TYPE_LEGACY_DATA) | (shred_type==FD_SHRED_TYPE_LEGACY_CODE) ) ) {
361 : /* Reject any legacy shreds */
362 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
363 0 : }
364 :
365 9180 : if( FD_UNLIKELY( shred->version!=resolver->expected_shred_version ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
366 9177 : if( FD_UNLIKELY( shred_sz<fd_shred_sz( shred ) ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
367 9177 : if( FD_UNLIKELY( shred->idx>=resolver->max_shred_idx ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
368 :
369 9165 : int is_data_shred = fd_shred_is_data( shred_type );
370 :
371 9165 : if( !is_data_shred ) { /* Roughly 50/50 branch */
372 4707 : if( FD_UNLIKELY( (shred->code.data_cnt>FD_REEDSOL_DATA_SHREDS_MAX) | (shred->code.code_cnt>FD_REEDSOL_PARITY_SHREDS_MAX) ) )
373 3 : return FD_FEC_RESOLVER_SHRED_REJECTED;
374 4704 : if( FD_UNLIKELY( (shred->code.data_cnt==0UL) | (shred->code.code_cnt==0UL) ) )
375 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
376 4704 : if( FD_UNLIKELY( (ulong)shred->fec_set_idx+(ulong)shred->code.data_cnt>=resolver->max_shred_idx ) )
377 3 : return FD_FEC_RESOLVER_SHRED_REJECTED;
378 4701 : if( FD_UNLIKELY( (ulong)shred->idx + (ulong)shred->code.code_cnt - (ulong)shred->code.idx>=resolver->max_shred_idx ) )
379 3 : return FD_FEC_RESOLVER_SHRED_REJECTED;
380 4701 : }
381 :
382 :
383 : /* For the purposes of the shred header, tree_depth means the number
384 : of nodes, counting the leaf but excluding the root. For bmtree,
385 : depth means the number of layers, which counts both. */
386 9156 : ulong tree_depth = fd_shred_merkle_cnt( variant ); /* In [0, 15] */
387 9156 : ulong reedsol_protected_sz = 1115UL + FD_SHRED_DATA_HEADER_SZ - FD_SHRED_SIGNATURE_SZ - FD_SHRED_MERKLE_NODE_SZ*tree_depth
388 9156 : - FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type )
389 9156 : - FD_SHRED_SIGNATURE_SZ *fd_shred_is_resigned( shred_type); /* In [743, 1139] conservatively*/
390 9156 : ulong data_merkle_protected_sz = reedsol_protected_sz + FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type );
391 9156 : ulong parity_merkle_protected_sz = reedsol_protected_sz + FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type )+FD_SHRED_CODE_HEADER_SZ-FD_ED25519_SIG_SZ;
392 9156 : ulong merkle_protected_sz = fd_ulong_if( is_data_shred, data_merkle_protected_sz, parity_merkle_protected_sz );
393 :
394 9156 : fd_bmtree_hash_leaf( leaf, (uchar const *)shred + sizeof(fd_ed25519_sig_t), merkle_protected_sz, FD_BMTREE_LONG_PREFIX_SZ );
395 :
396 : /* in_type_idx is between [0, code.data_cnt) or [0, code.code_cnt),
397 : where data_cnt <= FD_REEDSOL_DATA_SHREDS_MAX and code_cnt <=
398 : FD_REEDSOL_PARITY_SHREDS_MAX.
399 : On the other hand, shred_idx, goes from [0, code.data_cnt +
400 : code.code_cnt), with all the data shreds having
401 : shred_idx < code.data_cnt and all the parity shreds having
402 : shred_idx >= code.data_cnt. */
403 9156 : ulong in_type_idx = fd_ulong_if( is_data_shred, shred->idx - shred->fec_set_idx, shred->code.idx );
404 9156 : ulong shred_idx = fd_ulong_if( is_data_shred, in_type_idx, in_type_idx + shred->code.data_cnt );
405 :
406 9156 : if( FD_UNLIKELY( in_type_idx >= fd_ulong_if( is_data_shred, FD_REEDSOL_DATA_SHREDS_MAX, FD_REEDSOL_PARITY_SHREDS_MAX ) ) )
407 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
408 : /* This, combined with the check on shred->code.data_cnt implies that
409 : shred_idx is in [0, DATA_SHREDS_MAX+PARITY_SHREDS_MAX). */
410 :
411 9156 : if( FD_UNLIKELY( tree_depth>FD_SHRED_MERKLE_LAYER_CNT-1UL ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
412 9156 : if( FD_UNLIKELY( fd_bmtree_depth( shred_idx+1UL ) > tree_depth+1UL ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
413 :
414 9156 : if( FD_UNLIKELY( !ctx ) ) { /* This is the first shred in the FEC set */
415 :
416 321 : if( FD_UNLIKELY( freelist_cnt( free_list )<=partial_depth ) ) {
417 : /* Packet loss is really high and we have a lot of in-progress FEC
418 : sets that we haven't been able to finish. Take the resources
419 : (FEC set and bmtree) from the oldest, and send the oldest FEC
420 : set to the back of the free list. */
421 36 : set_ctx_t * victim_ctx = resolver->curr_ll_sentinel->prev;
422 :
423 :
424 36 : if( FD_LIKELY( out_spilled ) ) {
425 6 : fd_fec_set_t * set = victim_ctx->set;
426 :
427 : /* Find the highest data shred received in the FEC set */
428 :
429 6 : ulong max_rcvd_dshred_idx = d_rcvd_last( set->data_shred_rcvd );
430 6 : fd_shred_t const * max_shred;
431 6 : if( FD_UNLIKELY( max_rcvd_dshred_idx == ~0UL ) ) {
432 0 : max_rcvd_dshred_idx = FD_SHRED_BLK_MAX; /* No data shreds received. Use a parity shred to determine the spilled slot and fec_set_idx */
433 0 : max_shred = fd_shred_parse( set->parity_shreds[ p_rcvd_first( set->parity_shred_rcvd ) ], FD_SHRED_MAX_SZ );
434 6 : } else {
435 6 : max_shred = fd_shred_parse( set->data_shreds [ max_rcvd_dshred_idx ], FD_SHRED_MIN_SZ );
436 6 : }
437 :
438 6 : if( FD_LIKELY( out_spilled ) ) {
439 6 : out_spilled->slot = max_shred->slot;
440 6 : out_spilled->fec_set_idx = max_shred->fec_set_idx;
441 6 : out_spilled->max_dshred_idx = (uint)max_rcvd_dshred_idx;
442 6 : }
443 :
444 6 : FD_LOG_INFO(("Spilled from fec_resolver in-progress map %lu %u, data_shreds_rcvd %lu, parity_shreds_rcvd %lu, max_d_rcvd_shred_idx %lu", max_shred->slot, max_shred->fec_set_idx, d_rcvd_cnt( set->data_shred_rcvd ), p_rcvd_cnt( set->parity_shred_rcvd ), max_rcvd_dshred_idx ));
445 6 : }
446 :
447 36 : freelist_push_tail( free_list, victim_ctx->set );
448 36 : bmtrlist_push_tail( bmtree_free_list, victim_ctx->tree );
449 :
450 : /* Remove from linked list and then from the map */
451 36 : ctx_map_remove( curr_map, ctx_ll_remove( victim_ctx ) );
452 :
453 36 : FD_MCNT_INC( SHRED, FEC_SET_SPILLED, 1UL );
454 36 : }
455 : /* Now we know |free_list|>partial_depth and |bmtree_free_list|>1 */
456 :
457 321 : fd_fec_set_t * set_to_use = freelist_pop_head( free_list );
458 321 : void * bmtree_mem = bmtrlist_pop_head( bmtree_free_list );
459 :
460 : /* Now we need to derive the root of the Merkle tree and verify the
461 : signature to prevent a DOS attack just by sending lots of invalid
462 : shreds. */
463 321 : fd_bmtree_commit_t * tree;
464 321 : tree = fd_bmtree_commit_init( bmtree_mem, FD_SHRED_MERKLE_NODE_SZ, FD_BMTREE_LONG_PREFIX_SZ, FD_SHRED_MERKLE_LAYER_CNT );
465 :
466 321 : fd_bmtree_node_t _root[1];
467 321 : fd_shred_merkle_t const * proof = fd_shred_merkle_nodes( shred );
468 321 : int rv = fd_bmtree_commitp_insert_with_proof( tree, shred_idx, leaf, (uchar const *)proof, tree_depth, _root );
469 321 : if( FD_UNLIKELY( !rv ) ) {
470 0 : freelist_push_head( free_list, set_to_use );
471 0 : bmtrlist_push_head( bmtree_free_list, bmtree_mem );
472 0 : FD_MCNT_INC( SHRED, SHRED_REJECTED_INITIAL, 1UL );
473 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
474 0 : }
475 :
476 321 : if( FD_UNLIKELY( FD_ED25519_SUCCESS != fd_ed25519_verify( _root->hash, 32UL, shred->signature, leader_pubkey, sha512 ) ) ) {
477 0 : freelist_push_head( free_list, set_to_use );
478 0 : bmtrlist_push_head( bmtree_free_list, bmtree_mem );
479 0 : FD_MCNT_INC( SHRED, SHRED_REJECTED_INITIAL, 1UL );
480 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
481 0 : }
482 :
483 : /* This seems like a legitimate FEC set, so we can reserve some
484 : resources for it. */
485 321 : ctx = ctx_ll_insert( curr_ll_sentinel, ctx_map_insert( curr_map, *w_sig ) );
486 321 : ctx->set = set_to_use;
487 321 : ctx->tree = tree;
488 321 : ctx->root = *_root;
489 321 : ctx->total_rx_shred_cnt = 0UL;
490 321 : ctx->data_variant = fd_uchar_if( is_data_shred, variant, fd_shred_variant( fd_shred_swap_type( shred_type ), (uchar)tree_depth ) );
491 321 : ctx->parity_variant = fd_uchar_if( !is_data_shred, variant, fd_shred_variant( fd_shred_swap_type( shred_type ), (uchar)tree_depth ) );
492 :
493 321 : if( FD_UNLIKELY( fd_shred_is_resigned( shred_type ) & !!(resolver->signer) ) ) {
494 3 : resolver->signer( resolver->sign_ctx, ctx->retransmitter_sig.u, _root->hash );
495 318 : } else {
496 318 : fd_memset( ctx->retransmitter_sig.u, 0, 64UL );
497 318 : }
498 :
499 : /* Reset the FEC set */
500 321 : ctx->set->data_shred_cnt = SHRED_CNT_NOT_SET;
501 321 : ctx->set->parity_shred_cnt = SHRED_CNT_NOT_SET;
502 321 : d_rcvd_join( d_rcvd_new( d_rcvd_delete( d_rcvd_leave( ctx->set->data_shred_rcvd ) ) ) );
503 321 : p_rcvd_join( p_rcvd_new( p_rcvd_delete( p_rcvd_leave( ctx->set->parity_shred_rcvd ) ) ) );
504 :
505 : /* Copy the merkle root into the output arg. */
506 321 : if( FD_LIKELY( out_merkle_root ) ) memcpy( out_merkle_root, ctx->root.hash, sizeof(fd_bmtree_node_t) );
507 :
508 8835 : } else {
509 : /* This is not the first shred in the set */
510 : /* First, check to make sure this is not a duplicate */
511 8835 : int shred_dup = fd_int_if( is_data_shred, d_rcvd_test( ctx->set->data_shred_rcvd, in_type_idx ),
512 8835 : p_rcvd_test( ctx->set->parity_shred_rcvd, in_type_idx ) );
513 :
514 8835 : if( FD_UNLIKELY( shred_dup ) ) return FD_FEC_RESOLVER_SHRED_IGNORED;
515 :
516 : /* Ensure that all the shreds in the FEC set have consistent
517 : variants. They all must have the same tree_depth and the same
518 : chained/not chained, resigned/not resigned bits. */
519 8622 : if( FD_UNLIKELY( variant!=fd_uchar_if( is_data_shred, ctx->data_variant, ctx->parity_variant ) ) ) {
520 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
521 0 : }
522 :
523 8622 : fd_shred_merkle_t const * proof = fd_shred_merkle_nodes( shred );
524 8622 : int rv = fd_bmtree_commitp_insert_with_proof( ctx->tree, shred_idx, leaf, (uchar const *)proof, tree_depth, out_merkle_root );
525 8622 : if( !rv ) return FD_FEC_RESOLVER_SHRED_REJECTED;
526 8622 : }
527 :
528 8937 : if( FD_UNLIKELY( (ctx->set->data_shred_cnt==SHRED_CNT_NOT_SET) & (!is_data_shred) ) ) {
529 294 : ctx->set->data_shred_cnt = shred->code.data_cnt;
530 294 : ctx->set->parity_shred_cnt = shred->code.code_cnt;
531 294 : ctx->parity_idx0 = shred->idx - in_type_idx;
532 294 : ctx->fec_set_idx = shred->fec_set_idx;
533 294 : }
534 :
535 : /* At this point, the shred has passed Merkle validation and is new.
536 : We also know that ctx is a pointer to the slot for signature in the
537 : current map. */
538 :
539 : /* Copy the shred to memory the FEC resolver owns */
540 8937 : uchar * dst = fd_ptr_if( is_data_shred, ctx->set->data_shreds[ in_type_idx ], ctx->set->parity_shreds[ in_type_idx ] );
541 8937 : fd_memcpy( dst, shred, fd_shred_sz( shred ) );
542 :
543 : /* If the shred needs a retransmitter signature, set it */
544 8937 : if( FD_UNLIKELY( fd_shred_is_resigned( shred_type ) ) ) {
545 729 : memcpy( dst + fd_shred_retransmitter_sig_off( (fd_shred_t *)dst ), ctx->retransmitter_sig.u, 64UL );
546 729 : }
547 :
548 8937 : d_rcvd_insert_if( ctx->set->data_shred_rcvd, is_data_shred, in_type_idx );
549 8937 : p_rcvd_insert_if( ctx->set->parity_shred_rcvd, !is_data_shred, in_type_idx );
550 8937 : ctx->total_rx_shred_cnt++;
551 :
552 8937 : *out_shred = (fd_shred_t const *)dst;
553 :
554 : /* Do we have enough to begin reconstruction? */
555 8937 : if( FD_LIKELY( ctx->total_rx_shred_cnt < ctx->set->data_shred_cnt ) ) return FD_FEC_RESOLVER_SHRED_OKAY;
556 :
557 : /* At this point, the FEC set is either valid or permanently invalid,
558 : so we can consider it done either way. First though, since ctx_map_remove
559 : can change what's at *ctx, so unpack the values before we do that */
560 270 : fd_fec_set_t * set = ctx->set;
561 270 : fd_bmtree_commit_t * tree = ctx->tree;
562 270 : ulong fec_set_idx = ctx->fec_set_idx;
563 270 : ulong parity_idx0 = ctx->parity_idx0;
564 270 : wrapped_sig_t retran_sig = ctx->retransmitter_sig;
565 270 : uchar parity_variant = ctx->parity_variant;
566 270 : uchar data_variant = ctx->data_variant;
567 :
568 270 : ctx_ll_insert( done_ll_sentinel, ctx_map_insert( done_map, ctx->sig ) );
569 270 : if( FD_UNLIKELY( ctx_map_key_cnt( done_map ) > done_depth ) ) ctx_map_remove( done_map, ctx_ll_remove( done_ll_sentinel->prev ) );
570 :
571 270 : ctx_map_remove( curr_map, ctx_ll_remove( ctx ) );
572 :
573 270 : reedsol = fd_reedsol_recover_init( (void*)reedsol, reedsol_protected_sz );
574 8826 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) {
575 8556 : uchar * rs_payload = set->data_shreds[ i ] + sizeof(fd_ed25519_sig_t);
576 8556 : if( d_rcvd_test( set->data_shred_rcvd, i ) ) fd_reedsol_recover_add_rcvd_shred ( reedsol, 1, rs_payload );
577 4416 : else fd_reedsol_recover_add_erased_shred( reedsol, 1, rs_payload );
578 8556 : }
579 8988 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ ) {
580 8718 : uchar * rs_payload = set->parity_shreds[ i ] + FD_SHRED_CODE_HEADER_SZ;
581 8718 : if( p_rcvd_test( set->parity_shred_rcvd, i ) ) fd_reedsol_recover_add_rcvd_shred ( reedsol, 0, rs_payload );
582 4254 : else fd_reedsol_recover_add_erased_shred( reedsol, 0, rs_payload );
583 8718 : }
584 :
585 270 : if( FD_UNLIKELY( FD_REEDSOL_SUCCESS != fd_reedsol_recover_fini( reedsol ) ) ) {
586 : /* A few lines up, we already checked to make sure it wasn't the
587 : insufficient case, so it must be the inconsistent case. That
588 : means the leader signed a shred with invalid Reed-Solomon FEC
589 : set. This shouldn't happen in practice, but we need to handle it
590 : for the malicious leader case. This should probably be a
591 : slash-able offense. */
592 0 : freelist_push_tail( free_list, set );
593 0 : bmtrlist_push_tail( bmtree_free_list, tree );
594 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
595 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
596 0 : }
597 :
598 270 : uchar const * chained_root = fd_ptr_if( fd_shred_is_chained( shred_type ), (uchar *)shred+fd_shred_chain_off( variant ), NULL );
599 :
600 : /* Iterate over recovered shreds, add them to the Merkle tree,
601 : populate headers and signatures. */
602 8826 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) {
603 8556 : if( !d_rcvd_test( set->data_shred_rcvd, i ) ) {
604 4416 : fd_memcpy( set->data_shreds[i], shred, sizeof(fd_ed25519_sig_t) );
605 4416 : if( FD_LIKELY( fd_shred_is_chained( shred_type ) ) ) {
606 2997 : fd_memcpy( set->data_shreds[i]+fd_shred_chain_off( data_variant ), chained_root, FD_SHRED_MERKLE_ROOT_SZ );
607 2997 : }
608 4416 : fd_bmtree_hash_leaf( leaf, set->data_shreds[i]+sizeof(fd_ed25519_sig_t), data_merkle_protected_sz, FD_BMTREE_LONG_PREFIX_SZ );
609 4416 : if( FD_UNLIKELY( !fd_bmtree_commitp_insert_with_proof( tree, i, leaf, NULL, 0, NULL ) ) ) {
610 0 : freelist_push_tail( free_list, set );
611 0 : bmtrlist_push_tail( bmtree_free_list, tree );
612 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
613 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
614 0 : }
615 :
616 4416 : }
617 8556 : }
618 :
619 8988 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ ) {
620 8718 : if( !p_rcvd_test( set->parity_shred_rcvd, i ) ) {
621 4254 : fd_shred_t * p_shred = (fd_shred_t *)set->parity_shreds[i]; /* We can't parse because we haven't populated the header */
622 4254 : fd_memcpy( p_shred->signature, shred->signature, sizeof(fd_ed25519_sig_t) );
623 4254 : p_shred->variant = parity_variant;
624 4254 : p_shred->slot = shred->slot;
625 4254 : p_shred->idx = (uint)(i + parity_idx0);
626 4254 : p_shred->version = shred->version;
627 4254 : p_shred->fec_set_idx = (uint)fec_set_idx;
628 4254 : p_shred->code.data_cnt = (ushort)set->data_shred_cnt;
629 4254 : p_shred->code.code_cnt = (ushort)set->parity_shred_cnt;
630 4254 : p_shred->code.idx = (ushort)i;
631 :
632 4254 : if( FD_LIKELY( fd_shred_is_chained( shred_type ) ) ) {
633 3162 : fd_memcpy( set->parity_shreds[i]+fd_shred_chain_off( parity_variant ), chained_root, FD_SHRED_MERKLE_ROOT_SZ );
634 3162 : }
635 :
636 4254 : fd_bmtree_hash_leaf( leaf, set->parity_shreds[i]+ sizeof(fd_ed25519_sig_t), parity_merkle_protected_sz, FD_BMTREE_LONG_PREFIX_SZ );
637 4254 : if( FD_UNLIKELY( !fd_bmtree_commitp_insert_with_proof( tree, set->data_shred_cnt + i, leaf, NULL, 0, NULL ) ) ) {
638 0 : freelist_push_tail( free_list, set );
639 0 : bmtrlist_push_tail( bmtree_free_list, tree );
640 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
641 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
642 0 : }
643 4254 : }
644 8718 : }
645 :
646 : /* Check that the whole Merkle tree is consistent. */
647 270 : if( FD_UNLIKELY( !fd_bmtree_commitp_fini( tree, set->data_shred_cnt + set->parity_shred_cnt ) ) ) {
648 0 : freelist_push_tail( free_list, set );
649 0 : bmtrlist_push_tail( bmtree_free_list, tree );
650 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
651 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
652 0 : }
653 :
654 : /* Check that all the fields that are supposed to be consistent across
655 : an FEC set actually are. */
656 270 : fd_shred_t const * base_data_shred = fd_shred_parse( set->data_shreds [ 0 ], FD_SHRED_MIN_SZ );
657 270 : fd_shred_t const * base_parity_shred = fd_shred_parse( set->parity_shreds[ 0 ], FD_SHRED_MAX_SZ );
658 270 : int reject = (!base_data_shred) | (!base_parity_shred);
659 :
660 8556 : for( ulong i=1UL; (!reject) & (i<set->data_shred_cnt); i++ ) {
661 : /* Technically, we only need to re-parse the ones we recovered with
662 : Reedsol, but parsing is pretty cheap and the rest of the
663 : validation we need to do on all of them. */
664 8286 : fd_shred_t const * parsed = fd_shred_parse( set->data_shreds[ i ], FD_SHRED_MIN_SZ );
665 8286 : if( FD_UNLIKELY( !parsed ) ) { reject = 1; break; }
666 8286 : reject |= parsed->variant != base_data_shred->variant;
667 8286 : reject |= parsed->slot != base_data_shred->slot;
668 8286 : reject |= parsed->version != base_data_shred->version;
669 8286 : reject |= parsed->fec_set_idx != base_data_shred->fec_set_idx;
670 8286 : reject |= parsed->data.parent_off != base_data_shred->data.parent_off;
671 :
672 8286 : reject |= fd_shred_is_chained( fd_shred_type( parsed->variant ) ) &&
673 8286 : !fd_memeq( (uchar *)parsed +fd_shred_chain_off( parsed->variant ),
674 5817 : (uchar *)base_data_shred+fd_shred_chain_off( base_data_shred->variant ), FD_SHRED_MERKLE_ROOT_SZ );
675 8286 : }
676 :
677 8988 : for( ulong i=0UL; (!reject) & (i<set->parity_shred_cnt); i++ ) {
678 8718 : fd_shred_t const * parsed = fd_shred_parse( set->parity_shreds[ i ], FD_SHRED_MAX_SZ );
679 8718 : if( FD_UNLIKELY( !parsed ) ) { reject = 1; break; }
680 8718 : reject |= fd_shred_type( parsed->variant ) != fd_shred_swap_type( fd_shred_type( base_data_shred->variant ) );
681 8718 : reject |= fd_shred_merkle_cnt( parsed->variant ) != fd_shred_merkle_cnt( base_data_shred->variant );
682 8718 : reject |= parsed->slot != base_data_shred->slot;
683 8718 : reject |= parsed->version != base_data_shred->version;
684 8718 : reject |= parsed->fec_set_idx != base_data_shred->fec_set_idx;
685 8718 : reject |= parsed->code.data_cnt != base_parity_shred->code.data_cnt;
686 8718 : reject |= parsed->code.code_cnt != base_parity_shred->code.code_cnt;
687 8718 : reject |= parsed->code.idx != (ushort)i;
688 :
689 8718 : reject |= fd_shred_is_chained( fd_shred_type( parsed->variant ) ) &&
690 8718 : !fd_memeq( (uchar *)parsed +fd_shred_chain_off( parsed->variant ),
691 6174 : (uchar *)base_data_shred+fd_shred_chain_off( base_data_shred->variant ), FD_SHRED_MERKLE_ROOT_SZ );
692 8718 : }
693 270 : if( FD_UNLIKELY( reject ) ) {
694 0 : freelist_push_tail( free_list, set );
695 0 : bmtrlist_push_tail( bmtree_free_list, tree );
696 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
697 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
698 0 : }
699 :
700 : /* Populate missing Merkle proofs */
701 8826 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) if( !d_rcvd_test( set->data_shred_rcvd, i ) )
702 4416 : fd_bmtree_get_proof( tree, set->data_shreds[i] + fd_shred_merkle_off( (fd_shred_t *)set->data_shreds[i] ), i );
703 :
704 8988 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ ) if( !p_rcvd_test( set->parity_shred_rcvd, i ) )
705 4254 : fd_bmtree_get_proof( tree, set->parity_shreds[i] + fd_shred_merkle_off( (fd_shred_t *)set->parity_shreds[i] ), set->data_shred_cnt+i );
706 :
707 : /* Set the retransmitter signature for shreds that need one */
708 270 : if( FD_UNLIKELY( fd_shred_is_resigned( shred_type ) ) ) {
709 747 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) if( !d_rcvd_test( set->data_shred_rcvd, i ) )
710 324 : memcpy( set->data_shreds[i] + fd_shred_retransmitter_sig_off( (fd_shred_t *)set->data_shreds[i] ), retran_sig.u, 64UL );
711 :
712 747 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ ) if( !p_rcvd_test( set->parity_shred_rcvd, i ) )
713 399 : memcpy( set->parity_shreds[i] + fd_shred_retransmitter_sig_off( (fd_shred_t *)set->parity_shreds[i] ), retran_sig.u, 64UL );
714 21 : }
715 :
716 : /* Finally... A valid FEC set. Forward it along. */
717 270 : bmtrlist_push_tail( bmtree_free_list, tree );
718 270 : freelist_push_tail( complete_list, set );
719 270 : freelist_push_tail( free_list, freelist_pop_head( complete_list ) );
720 :
721 270 : *out_fec_set = set;
722 :
723 270 : return FD_FEC_RESOLVER_SHRED_COMPLETES;
724 270 : }
725 :
726 : int
727 : fd_fec_resolver_done_contains( fd_fec_resolver_t * resolver,
728 0 : fd_ed25519_sig_t const * signature ) {
729 0 : wrapped_sig_t * w_sig = (wrapped_sig_t *)signature;
730 0 : if( FD_UNLIKELY( ctx_map_key_inval( *w_sig ) ) ) return 0;
731 0 : return !!ctx_map_query( resolver->done_map, *w_sig, NULL );
732 0 : }
733 :
734 : int
735 : fd_fec_resolver_shred_query( fd_fec_resolver_t * resolver,
736 : fd_ed25519_sig_t const * signature,
737 : uint shred_idx,
738 0 : uchar * out_shred ) {
739 0 : wrapped_sig_t * w_sig = (wrapped_sig_t *)signature;
740 0 : if( FD_UNLIKELY( ctx_map_key_inval( *w_sig ) ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
741 :
742 0 : set_ctx_t * ctx = ctx_map_query( resolver->curr_map, *w_sig, NULL );
743 0 : if( FD_UNLIKELY( !ctx ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
744 :
745 0 : fd_fec_set_t * set = ctx->set;
746 0 : fd_shred_t const * data_shred = (fd_shred_t const *)fd_type_pun_const( set->data_shreds[ shred_idx ] );
747 :
748 0 : ulong sz = fd_ulong_min( fd_shred_sz( data_shred ), FD_SHRED_MIN_SZ );
749 0 : fd_memcpy( out_shred, data_shred, sz );
750 0 : return FD_FEC_RESOLVER_SHRED_OKAY;
751 0 : }
752 :
753 : /* TODO code is copy-pasted because this function is intended to be
754 : removed as soon as an upgrade to the repair protocol to support
755 : requesting coding shreds is made available. */
756 :
757 : int
758 : fd_fec_resolver_force_complete( fd_fec_resolver_t * resolver,
759 : fd_shred_t const * last_shred,
760 : fd_fec_set_t const ** out_fec_set,
761 12 : fd_bmtree_node_t * out_merkle_root ) {
762 :
763 : /* Error if last_shred is obviously invalid... don't even
764 : try to process the associated FEC set. */
765 :
766 12 : ulong idx_in_set = last_shred->idx - last_shred->fec_set_idx;
767 :
768 12 : if( FD_UNLIKELY( idx_in_set >= FD_REEDSOL_DATA_SHREDS_MAX ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
769 :
770 : /* Error if can't find the last_shred's FEC set. */
771 :
772 12 : wrapped_sig_t * w_sig = (wrapped_sig_t *)last_shred->signature;
773 12 : if( FD_UNLIKELY( ctx_map_key_inval( *w_sig ) ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
774 :
775 : /* Error if already done. */
776 :
777 12 : int found = !!ctx_map_query( resolver->done_map, *w_sig, NULL );
778 12 : if( found ) return FD_FEC_RESOLVER_SHRED_IGNORED;
779 :
780 : /* Error if FEC associated with last_shred not found. */
781 :
782 12 : set_ctx_t * ctx = ctx_map_query( resolver->curr_map, *w_sig, NULL );
783 12 : if( FD_UNLIKELY( !ctx ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
784 :
785 : /* Error if already received parity shred (cnts are only knowable from
786 : receiving a coding shred). */
787 :
788 12 : if( FD_UNLIKELY( ctx->set->data_shred_cnt != SHRED_CNT_NOT_SET ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
789 12 : if( FD_UNLIKELY( ctx->set->parity_shred_cnt != SHRED_CNT_NOT_SET ) ) return FD_FEC_RESOLVER_SHRED_REJECTED;
790 :
791 : /* Error if gaps in receives to last data shred. Implies that the FEC
792 : set is still incomplete. */
793 :
794 126 : for( ulong i=0UL; i<=idx_in_set; i++ ) if( !d_rcvd_test( ctx->set->data_shred_rcvd, i ) ) {
795 3 : return FD_FEC_RESOLVER_SHRED_REJECTED;
796 3 : }
797 :
798 : /* Error if last shred is not in fact last shred and FEC resolver has
799 : seen a shred with a higher idx. */
800 :
801 117 : for( ulong i=idx_in_set + 1; i<FD_REEDSOL_DATA_SHREDS_MAX; i++ ) if( d_rcvd_test( ctx->set->data_shred_rcvd, i ) ) {
802 6 : return FD_FEC_RESOLVER_SHRED_REJECTED;
803 6 : }
804 :
805 : /* Now we know the caller has provided a FEC set that is not obviously
806 : incomplete or invalid, we validate the FEC set itself. */
807 :
808 3 : fd_shred_t const * base_data_shred = fd_shred_parse( ctx->set->data_shreds[0], FD_SHRED_MIN_SZ );
809 3 : int reject = (!base_data_shred);
810 :
811 96 : for( ulong i=1UL; (!reject) & (i<=idx_in_set); i++ ) {
812 :
813 : /* casting is safe because these data shreds must have been all rcvd
814 : from the network and not recovered. */
815 :
816 93 : fd_shred_t const * parsed = (fd_shred_t const *)fd_type_pun_const( ctx->set->data_shreds[ i ] );
817 93 : if( FD_UNLIKELY( !parsed ) ) { reject = 1; break; }
818 93 : reject |= parsed->variant != base_data_shred->variant;
819 93 : reject |= parsed->slot != base_data_shred->slot;
820 93 : reject |= parsed->version != base_data_shred->version;
821 93 : reject |= parsed->fec_set_idx != base_data_shred->fec_set_idx;
822 93 : reject |= parsed->data.parent_off != base_data_shred->data.parent_off;
823 :
824 93 : reject |= fd_shred_is_chained( fd_shred_type( parsed->variant ) ) &&
825 93 : !fd_memeq( (uchar *)parsed +fd_shred_chain_off( parsed->variant ),
826 0 : (uchar *)base_data_shred+fd_shred_chain_off( base_data_shred->variant ), FD_SHRED_MERKLE_ROOT_SZ );
827 93 : }
828 :
829 : /* Populate correct shred cnts for post-completion processing. These
830 : are normally populated by the parity shred header, but in the case
831 : of force_complete, a parity shred was never received. */
832 :
833 3 : ctx->set->data_shred_cnt = idx_in_set + 1UL;
834 3 : ctx->set->parity_shred_cnt = 0UL;
835 :
836 : /* Reject FEC set if it didn't validate and return mem to the pools */
837 3 : set_ctx_t * done_ll_sentinel = resolver->done_ll_sentinel;
838 3 : set_ctx_t * curr_map = resolver->curr_map;
839 3 : set_ctx_t * done_map = resolver->done_map;
840 3 : ulong done_depth = resolver->done_depth;
841 :
842 3 : if( FD_UNLIKELY( reject ) ) {
843 0 : freelist_push_tail( resolver->free_list, ctx->set );
844 0 : bmtrlist_push_tail( resolver->bmtree_free_list, ctx->tree );
845 0 : ctx_map_remove( curr_map, ctx_ll_remove( ctx ));
846 0 : FD_MCNT_INC( SHRED, FEC_REJECTED_FATAL, 1UL );
847 0 : return FD_FEC_RESOLVER_SHRED_REJECTED;
848 0 : }
849 :
850 : /* Copy out the merkle root. */
851 :
852 3 : if( FD_LIKELY( out_merkle_root) ) memcpy( out_merkle_root, ctx->root.hash, sizeof(fd_bmtree_node_t) );
853 :
854 : /* Don't need to populate merkle proofs or retransmitter signatures
855 : because it is by definition the full set of rcvd data shreds. */
856 :
857 3 : fd_fec_set_t * set = ctx->set;
858 3 : fd_bmtree_commit_t * tree = ctx->tree;
859 :
860 3 : ctx_ll_insert( done_ll_sentinel, ctx_map_insert( done_map, ctx->sig ) );
861 3 : if( FD_UNLIKELY( ctx_map_key_cnt( done_map ) > done_depth ) ) ctx_map_remove( done_map, ctx_ll_remove( done_ll_sentinel->prev ) );
862 3 : ctx_map_remove( curr_map, ctx_ll_remove( ctx ) );
863 :
864 3 : bmtrlist_push_tail( resolver->bmtree_free_list, tree );
865 3 : freelist_push_tail( resolver->complete_list, set );
866 3 : freelist_push_tail( resolver->free_list, freelist_pop_head( resolver->complete_list ) );
867 :
868 3 : *out_fec_set = set;
869 :
870 3 : return FD_FEC_RESOLVER_SHRED_COMPLETES;
871 3 : }
872 :
873 114 : void * fd_fec_resolver_leave( fd_fec_resolver_t * resolver ) {
874 114 : fd_sha512_leave( resolver->sha512 );
875 114 : bmtrlist_leave ( resolver->bmtree_free_list );
876 114 : freelist_leave ( resolver->complete_list );
877 114 : freelist_leave ( resolver->free_list );
878 114 : ctx_map_leave ( resolver->done_map );
879 114 : ctx_map_leave ( resolver->curr_map );
880 :
881 114 : return (void *)resolver;
882 114 : }
883 :
884 114 : void * fd_fec_resolver_delete( void * shmem ) {
885 114 : fd_fec_resolver_t * resolver = (fd_fec_resolver_t *)shmem;
886 114 : ulong depth = resolver->depth;
887 114 : ulong partial_depth = resolver->partial_depth;
888 114 : ulong complete_depth = resolver->complete_depth;
889 114 : ulong done_depth = resolver->done_depth;
890 :
891 114 : int lg_curr_map_cnt = fd_ulong_find_msb( depth + 1UL ) + 2;
892 114 : int lg_done_map_cnt = fd_ulong_find_msb( done_depth + 1UL ) + 2;
893 :
894 114 : FD_SCRATCH_ALLOC_INIT( l, shmem );
895 114 : /* self */ FD_SCRATCH_ALLOC_APPEND( l, FD_FEC_RESOLVER_ALIGN, sizeof(fd_fec_resolver_t) );
896 114 : void * curr = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_curr_map_cnt ) );
897 114 : void * done = FD_SCRATCH_ALLOC_APPEND( l, ctx_map_align(), ctx_map_footprint( lg_done_map_cnt ) );
898 114 : void * free = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( depth+partial_depth+1UL ) );
899 114 : void * cmplst = FD_SCRATCH_ALLOC_APPEND( l, freelist_align(), freelist_footprint( complete_depth+1UL ) );
900 114 : void * bmfree = FD_SCRATCH_ALLOC_APPEND( l, bmtrlist_align(), bmtrlist_footprint( depth+1UL ) );
901 114 : FD_SCRATCH_ALLOC_FINI( l, FD_FEC_RESOLVER_ALIGN );
902 :
903 114 : fd_sha512_delete( resolver->sha512 );
904 114 : bmtrlist_delete ( bmfree );
905 114 : freelist_delete ( cmplst );
906 114 : freelist_delete ( free );
907 114 : ctx_map_delete ( done );
908 114 : ctx_map_delete ( curr );
909 :
910 114 : return shmem;
911 114 : }
|