Line data Source code
1 : #include "../fd_choreo_base.h"
2 : #include "fd_eqvoc.h"
3 : #include "../../ballet/shred/fd_shred.h"
4 : #include "../../disco/shred/fd_fec_set.h"
5 : /* fd_eqvoc maintains four bounded maps:
6 :
7 : dup_map (capacity dup_max): maps slot -> equivocation result,
8 : recording slots we've already verified are duplicates
9 : (equivocations). LRU-evicts when at capacity: querying an
10 : entry moves it to the tail of the recency list; when an
11 : insert is needed and the map is full, the head
12 : (least-recently-used) entry is evicted.
13 :
14 : fec_map (capacity fec_max): maps (slot, fec_set_idx) -> shred,
15 : storing the first shred seen in each FEC set so it can be
16 : compared against later siblings for equivocation. Same LRU
17 : eviction policy as dup_map. Entries are also explicitly
18 : removed when equivocation is confirmed (proof constructed).
19 :
20 : prf_map (capacity per_vtr_max * vtr_max): maps (slot, voter_pubkey)
21 : -> in-progress proof ("chunks") assembly state, tracking
22 : proofs per voter per slot. Entries are LRU-evicted per
23 : voter when that voter's in-progress proof count reaches
24 : per_vtr_max. Entries are also removed when proof assembly
25 : completes (all chunks received), regardless of verification
26 : outcome, or when the corresponding voter is removed from
27 : vtr_map.
28 :
29 : vtr_map (capacity vtr_max): maps voter pubkey -> per-voter proof-
30 : assembly state. vtr entries are not evicted automatically;
31 : they are explicitly inserted and removed by
32 : fd_eqvoc_update_voters when the epoch stake set changes.
33 : Each vtr has a pre-allocated prf_dlist that tracks
34 : in-progress proofs for that voter. Each voter's in-progress
35 : proof count is bounded by per_vtr_max; when that limit is
36 : reached, the oldest proof for that voter is evicted.
37 :
38 : dup_map fec_map
39 : map[0] +--------------------+ map[0] +--------------------+
40 : | (dup_t) { | | (fec_t) { |
41 : | .slot = 1, | | .key = 5|0, |
42 : | ... | | ... |
43 : | } | | } |
44 : map[1] +--------------------+ map[1] +--------------------+
45 : | (dup_t) { | | (fec_t) { |
46 : | .slot = 2, | | .key = 6|0, |
47 : | ... | | ... |
48 : | } | | } |
49 : +--------------------+ +--------------------+
50 :
51 : vtr_map prf_map
52 : map[0] +--------------------+ map[0] +--------------------+
53 : | (vtr_t) { | | (prf_t) { |
54 : | .from = X, | | .key.slot = 5, |
55 : | ... | | .key.from = Y, |<-----+
56 : | ... | | ... | |
57 : | } | | } | |
58 : map[1] +--------------------+ map[1] +--------------------+ |
59 : | (vtr_t) { | | (prf_t) { | |
60 : | .from = Y, | | .key.slot = 6, | |
61 : | ... | | .key.from = Y, |<--+ |
62 : | .prf_dlist = + | | ... | | |
63 : | } | | | } | | |
64 : +----------------|---+ +--------------------+ | |
65 : | | |
66 : | | |
67 : | +------------------------+ |
68 : | | |
69 : V | +------------------+
70 : prf_dlist | |
71 : +---------+---------+---------+
72 : | (prf_t) | (prf_t) | (prf_t) |
73 : | ... | ... | ... |
74 : | } | } | } |
75 : +---------+---------+---------+
76 : oldest newest
77 :
78 : Each vtr_t owns a prf_dlist of in-progress proofs (prf_t).
79 : prf_t elements are also in the global prf_map for lookup by
80 : (slot, from). When prf_dlist_cnt == per_vtr_max, the oldest
81 : prf is evicted from both prf_dlist and prf_map. */
82 :
83 : typedef struct {
84 : ulong slot;
85 : ulong next; /* pool next */
86 : struct {
87 : ulong prev;
88 : ulong next;
89 : } map;
90 : struct {
91 : ulong prev;
92 : ulong next;
93 : } dlist;
94 : int err; /* positive error code (FD_EQVOC_SUCCESS_{...}) if inserted */
95 : } dup_t;
96 :
97 : #define POOL_NAME dup_pool
98 42 : #define POOL_T dup_t
99 : #include "../../util/tmpl/fd_pool.c"
100 :
101 : #define MAP_NAME dup_map
102 0 : #define MAP_ELE_T dup_t
103 0 : #define MAP_KEY slot
104 0 : #define MAP_PREV map.prev
105 0 : #define MAP_NEXT map.next
106 : #define MAP_OPTIMIZE_RANDOM_ACCESS_REMOVAL 1
107 : #include "../../util/tmpl/fd_map_chain.c"
108 :
109 : #define DLIST_NAME dup_dlist
110 : #define DLIST_ELE_T dup_t
111 0 : #define DLIST_PREV dlist.prev
112 0 : #define DLIST_NEXT dlist.next
113 : #include "../../util/tmpl/fd_dlist.c"
114 :
115 : typedef struct {
116 : ulong key; /* 32 bits = slot | 32 lsb = fec_set_idx */
117 : ulong next; /* pool next */
118 : struct {
119 : ulong prev;
120 : ulong next;
121 : } map;
122 : struct {
123 : ulong prev;
124 : ulong next;
125 : } dlist;
126 : union {
127 : fd_shred_t shred;
128 : uchar bytes[FD_SHRED_MAX_SZ]; /* entire shred, both header and payload */
129 : };
130 : } fec_t;
131 :
132 : #define POOL_NAME fec_pool
133 42 : #define POOL_T fec_t
134 : #include "../../util/tmpl/fd_pool.c"
135 :
136 : #define MAP_NAME fec_map
137 0 : #define MAP_ELE_T fec_t
138 0 : #define MAP_PREV map.prev
139 0 : #define MAP_NEXT map.next
140 : #define MAP_OPTIMIZE_RANDOM_ACCESS_REMOVAL 1
141 : #include "../../util/tmpl/fd_map_chain.c"
142 :
143 : #define DLIST_NAME fec_dlist
144 : #define DLIST_ELE_T fec_t
145 0 : #define DLIST_PREV dlist.prev
146 0 : #define DLIST_NEXT dlist.next
147 : #include "../../util/tmpl/fd_dlist.c"
148 :
149 : typedef struct {
150 : ulong slot;
151 : fd_pubkey_t from;
152 : } xid_t;
153 :
154 : struct prf {
155 : xid_t key;
156 : ulong next;
157 : struct {
158 : ulong prev;
159 : ulong next;
160 : } map;
161 : struct {
162 : ulong prev;
163 : ulong next;
164 : } dlist;
165 : uchar idxs; /* [0, 7]. bit vec encoding which of the chunk idxs have been received (at most FD_EQVOC_CHUNK_CNT = 3). */
166 : ulong buf_sz;
167 : uchar buf[2 * FD_SHRED_MAX_SZ + 2 * sizeof(ulong)];
168 : };
169 : typedef struct prf prf_t;
170 :
171 : #define POOL_NAME prf_pool
172 42 : #define POOL_T prf_t
173 : #include "../../util/tmpl/fd_pool.c"
174 :
175 : #define MAP_NAME prf_map
176 0 : #define MAP_ELE_T prf_t
177 : #define MAP_KEY_T xid_t
178 0 : #define MAP_PREV map.prev
179 0 : #define MAP_NEXT map.next
180 0 : #define MAP_KEY_EQ(k0,k1) ((((k0)->slot)==((k1)->slot)) & !(memcmp(((k0)->from.uc),((k1)->from.uc),sizeof(fd_pubkey_t))))
181 0 : #define MAP_KEY_HASH(key,seed) fd_ulong_hash( ((key)->slot) ^ ((key)->from.ul[0]) ^ (seed) )
182 : #define MAP_OPTIMIZE_RANDOM_ACCESS_REMOVAL 1
183 : #include "../../util/tmpl/fd_map_chain.c"
184 :
185 : #define DLIST_NAME prf_dlist
186 : #define DLIST_ELE_T prf_t
187 0 : #define DLIST_PREV dlist.prev
188 0 : #define DLIST_NEXT dlist.next
189 : #include "../../util/tmpl/fd_dlist.c"
190 :
191 : struct vtr {
192 : fd_pubkey_t from;
193 : ulong next; /* pool next; reused as kept flag during update_voters */
194 : struct {
195 : ulong prev;
196 : ulong next;
197 : } map;
198 : struct {
199 : ulong prev;
200 : ulong next;
201 : } dlist;
202 : ulong prf_dlist_cnt;
203 : prf_dlist_t * prf_dlist;
204 : };
205 : typedef struct vtr vtr_t;
206 :
207 : #define POOL_NAME vtr_pool
208 63 : #define POOL_T vtr_t
209 : #include "../../util/tmpl/fd_pool.c"
210 :
211 : #define MAP_NAME vtr_map
212 0 : #define MAP_ELE_T vtr_t
213 : #define MAP_KEY_T fd_pubkey_t
214 2100 : #define MAP_KEY from
215 2205 : #define MAP_PREV map.prev
216 2205 : #define MAP_NEXT map.next
217 105 : #define MAP_KEY_EQ(k0,k1) (!memcmp((k0)->key,(k1)->key,sizeof(fd_pubkey_t)))
218 4200 : #define MAP_KEY_HASH(key,seed) ((ulong)((key)->ul[1]^(seed)))
219 : #define MAP_OPTIMIZE_RANDOM_ACCESS_REMOVAL 1
220 : #include "../../util/tmpl/fd_map_chain.c"
221 :
222 : #define DLIST_NAME vtr_dlist
223 : #define DLIST_ELE_T vtr_t
224 2121 : #define DLIST_PREV dlist.prev
225 2142 : #define DLIST_NEXT dlist.next
226 : #include "../../util/tmpl/fd_dlist.c"
227 :
228 : struct fd_eqvoc {
229 :
230 : /* copy */
231 :
232 : ulong dup_max;
233 : ulong fec_max;
234 : ulong per_vtr_max;
235 : ulong vtr_max;
236 :
237 : /* owned */
238 :
239 : fd_sha512_t * sha512;
240 : void * bmtree_mem;
241 : dup_t * dup_pool;
242 : dup_map_t * dup_map;
243 : dup_dlist_t * dup_dlist;
244 : fec_t * fec_pool;
245 : fec_map_t * fec_map;
246 : fec_dlist_t * fec_dlist;
247 : prf_t * prf_pool;
248 : prf_map_t * prf_map;
249 : vtr_t * vtr_pool;
250 : vtr_map_t * vtr_map;
251 : vtr_dlist_t * vtr_dlist;
252 : };
253 : typedef struct fd_eqvoc fd_eqvoc_t;
254 :
255 : ulong
256 273 : fd_eqvoc_align( void ) {
257 273 : return 128UL;
258 273 : }
259 :
260 : ulong
261 : fd_eqvoc_footprint( ulong dup_max,
262 : ulong fec_max,
263 : ulong per_vtr_max,
264 63 : ulong vtr_max ) {
265 :
266 63 : dup_max = fd_ulong_pow2_up( dup_max );
267 63 : fec_max = fd_ulong_pow2_up( fec_max );
268 63 : per_vtr_max = fd_ulong_pow2_up( per_vtr_max );
269 63 : vtr_max = fd_ulong_pow2_up( vtr_max );
270 63 : ulong prf_max = per_vtr_max * vtr_max;
271 :
272 63 : ulong l = FD_LAYOUT_INIT;
273 63 : l = FD_LAYOUT_APPEND( l, alignof(fd_eqvoc_t), sizeof(fd_eqvoc_t) );
274 63 : l = FD_LAYOUT_APPEND( l, fd_sha512_align(), fd_sha512_footprint() );
275 63 : l = FD_LAYOUT_APPEND( l, FD_BMTREE_COMMIT_ALIGN, FD_BMTREE_COMMIT_FOOTPRINT( FD_SHRED_MERKLE_LAYER_CNT ) );
276 63 : l = FD_LAYOUT_APPEND( l, dup_pool_align(), dup_pool_footprint( dup_max ) );
277 63 : l = FD_LAYOUT_APPEND( l, dup_map_align(), dup_map_footprint( dup_map_chain_cnt_est( dup_max ) ) );
278 63 : l = FD_LAYOUT_APPEND( l, dup_dlist_align(), dup_dlist_footprint() );
279 63 : l = FD_LAYOUT_APPEND( l, fec_pool_align(), fec_pool_footprint( fec_max ) );
280 63 : l = FD_LAYOUT_APPEND( l, fec_map_align(), fec_map_footprint( fec_map_chain_cnt_est( fec_max ) ) );
281 63 : l = FD_LAYOUT_APPEND( l, fec_dlist_align(), fec_dlist_footprint() );
282 63 : l = FD_LAYOUT_APPEND( l, prf_pool_align(), prf_pool_footprint( prf_max ) );
283 63 : l = FD_LAYOUT_APPEND( l, prf_map_align(), prf_map_footprint( prf_map_chain_cnt_est( prf_max ) ) );
284 63 : l = FD_LAYOUT_APPEND( l, vtr_pool_align(), vtr_pool_footprint( vtr_max ) );
285 63 : l = FD_LAYOUT_APPEND( l, vtr_map_align(), vtr_map_footprint( vtr_map_chain_cnt_est( vtr_max ) ) );
286 63 : l = FD_LAYOUT_APPEND( l, vtr_dlist_align(), vtr_dlist_footprint() );
287 129087 : for( ulong i = 0UL; i < vtr_max; i++ ) {
288 129024 : l = FD_LAYOUT_APPEND( l, prf_dlist_align(), prf_dlist_footprint() );
289 129024 : }
290 63 : return FD_LAYOUT_FINI( l, fd_eqvoc_align() );
291 63 : }
292 :
293 : void *
294 : fd_eqvoc_new( void * shmem,
295 : ulong dup_max,
296 : ulong fec_max,
297 : ulong per_vtr_max,
298 : ulong vtr_max,
299 21 : ulong seed ) {
300 :
301 21 : if( FD_UNLIKELY( !shmem ) ) {
302 0 : FD_LOG_WARNING(( "NULL mem" ));
303 0 : return NULL;
304 0 : }
305 :
306 21 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shmem, fd_eqvoc_align() ) ) ) {
307 0 : FD_LOG_WARNING(( "misaligned mem" ));
308 0 : return NULL;
309 0 : }
310 :
311 21 : ulong footprint = fd_eqvoc_footprint( dup_max, fec_max, per_vtr_max, vtr_max );
312 21 : if( FD_UNLIKELY( !footprint ) ) {
313 0 : FD_LOG_WARNING(( "bad dup_max (%lu), fec_max (%lu), or vtr_max (%lu)", dup_max, fec_max, vtr_max ));
314 0 : return NULL;
315 0 : }
316 :
317 21 : dup_max = fd_ulong_pow2_up( dup_max );
318 21 : fec_max = fd_ulong_pow2_up( fec_max );
319 21 : per_vtr_max = fd_ulong_pow2_up( per_vtr_max );
320 21 : vtr_max = fd_ulong_pow2_up( vtr_max );
321 21 : ulong prf_max = per_vtr_max * vtr_max;
322 :
323 21 : FD_SCRATCH_ALLOC_INIT( l, shmem );
324 21 : void * eqvoc_mem = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_eqvoc_t), sizeof(fd_eqvoc_t) );
325 21 : void * sha512 = FD_SCRATCH_ALLOC_APPEND( l, fd_sha512_align(), fd_sha512_footprint() );
326 21 : void * bmtree_mem = FD_SCRATCH_ALLOC_APPEND( l, FD_BMTREE_COMMIT_ALIGN, FD_BMTREE_COMMIT_FOOTPRINT( FD_SHRED_MERKLE_LAYER_CNT ) );
327 21 : void * dup_pool = FD_SCRATCH_ALLOC_APPEND( l, dup_pool_align(), dup_pool_footprint( dup_max ) );
328 21 : void * dup_map = FD_SCRATCH_ALLOC_APPEND( l, dup_map_align(), dup_map_footprint( dup_map_chain_cnt_est( dup_max ) ) );
329 21 : void * dup_dlist = FD_SCRATCH_ALLOC_APPEND( l, dup_dlist_align(), dup_dlist_footprint() );
330 21 : void * fec_pool = FD_SCRATCH_ALLOC_APPEND( l, fec_pool_align(), fec_pool_footprint( fec_max ) );
331 21 : void * fec_map = FD_SCRATCH_ALLOC_APPEND( l, fec_map_align(), fec_map_footprint( fec_map_chain_cnt_est( fec_max ) ) );
332 21 : void * fec_dlist = FD_SCRATCH_ALLOC_APPEND( l, fec_dlist_align(), fec_dlist_footprint() );
333 21 : void * prf_pool = FD_SCRATCH_ALLOC_APPEND( l, prf_pool_align(), prf_pool_footprint( prf_max ) );
334 21 : void * prf_map = FD_SCRATCH_ALLOC_APPEND( l, prf_map_align(), prf_map_footprint( prf_map_chain_cnt_est( prf_max ) ) );
335 21 : void * vtr_pool = FD_SCRATCH_ALLOC_APPEND( l, vtr_pool_align(), vtr_pool_footprint( vtr_max ) );
336 21 : void * vtr_map = FD_SCRATCH_ALLOC_APPEND( l, vtr_map_align(), vtr_map_footprint( vtr_map_chain_cnt_est( vtr_max ) ) );
337 21 : void * vtr_dlist = FD_SCRATCH_ALLOC_APPEND( l, vtr_dlist_align(), vtr_dlist_footprint() );
338 :
339 0 : fd_eqvoc_t * eqvoc = (fd_eqvoc_t *)eqvoc_mem;
340 21 : eqvoc->dup_max = dup_max;
341 21 : eqvoc->fec_max = fec_max;
342 21 : eqvoc->per_vtr_max = per_vtr_max;
343 21 : eqvoc->vtr_max = vtr_max;
344 :
345 21 : eqvoc->sha512 = fd_sha512_new( sha512 );
346 21 : eqvoc->bmtree_mem = bmtree_mem;
347 21 : eqvoc->dup_pool = dup_pool_new ( dup_pool, dup_max );
348 21 : eqvoc->dup_map = dup_map_new ( dup_map, dup_map_chain_cnt_est( dup_max ), seed );
349 21 : eqvoc->dup_dlist = dup_dlist_new( dup_dlist );
350 21 : eqvoc->fec_pool = fec_pool_new ( fec_pool, fec_max );
351 21 : eqvoc->fec_map = fec_map_new ( fec_map, fec_map_chain_cnt_est( fec_max ), seed );
352 21 : eqvoc->fec_dlist = fec_dlist_new( fec_dlist );
353 21 : eqvoc->prf_pool = prf_pool_new ( prf_pool, prf_max );
354 21 : eqvoc->prf_map = prf_map_new ( prf_map, prf_map_chain_cnt_est( prf_max ), seed );
355 21 : eqvoc->vtr_pool = vtr_pool_new ( vtr_pool, vtr_max );
356 21 : eqvoc->vtr_map = vtr_map_new ( vtr_map, vtr_map_chain_cnt_est( vtr_max ), seed );
357 21 : eqvoc->vtr_dlist = vtr_dlist_new( vtr_dlist );
358 :
359 21 : vtr_t * pool_join = vtr_pool_join( eqvoc->vtr_pool );
360 43029 : for( ulong i = 0UL; i < vtr_max; i++ ) {
361 43008 : void * prf_dlist = FD_SCRATCH_ALLOC_APPEND( l, prf_dlist_align(), prf_dlist_footprint() );
362 0 : pool_join[i].prf_dlist_cnt = 0;
363 43008 : pool_join[i].prf_dlist = prf_dlist_new( prf_dlist );
364 43008 : }
365 21 : FD_TEST( FD_SCRATCH_ALLOC_FINI( l, fd_eqvoc_align() )==(ulong)shmem + footprint );
366 :
367 21 : return shmem;
368 21 : }
369 :
370 : fd_eqvoc_t *
371 21 : fd_eqvoc_join( void * sheqvoc ) {
372 :
373 21 : if( FD_UNLIKELY( !sheqvoc ) ) {
374 0 : FD_LOG_WARNING(( "NULL eqvoc" ));
375 0 : return NULL;
376 0 : }
377 :
378 21 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)sheqvoc, fd_eqvoc_align() ) ) ) {
379 0 : FD_LOG_WARNING(( "misaligned eqvoc" ));
380 0 : return NULL;
381 0 : }
382 :
383 21 : fd_eqvoc_t * eqvoc = (fd_eqvoc_t *)sheqvoc;
384 21 : eqvoc->sha512 = fd_sha512_join( eqvoc->sha512 );
385 : /* bmtree */
386 21 : eqvoc->dup_pool = dup_pool_join ( eqvoc->dup_pool );
387 21 : eqvoc->dup_map = dup_map_join ( eqvoc->dup_map );
388 21 : eqvoc->dup_dlist = dup_dlist_join( eqvoc->dup_dlist );
389 21 : eqvoc->fec_pool = fec_pool_join ( eqvoc->fec_pool );
390 21 : eqvoc->fec_map = fec_map_join ( eqvoc->fec_map );
391 21 : eqvoc->fec_dlist = fec_dlist_join( eqvoc->fec_dlist );
392 21 : eqvoc->prf_pool = prf_pool_join ( eqvoc->prf_pool );
393 21 : eqvoc->prf_map = prf_map_join ( eqvoc->prf_map );
394 21 : eqvoc->vtr_pool = vtr_pool_join ( eqvoc->vtr_pool );
395 21 : eqvoc->vtr_map = vtr_map_join ( eqvoc->vtr_map );
396 21 : eqvoc->vtr_dlist = vtr_dlist_join( eqvoc->vtr_dlist );
397 43029 : for( ulong i = 0UL; i < eqvoc->vtr_max; i++ ) {
398 43008 : eqvoc->vtr_pool[i].prf_dlist = prf_dlist_join( eqvoc->vtr_pool[i].prf_dlist );
399 43008 : }
400 :
401 21 : return (fd_eqvoc_t *)sheqvoc;
402 21 : }
403 :
404 : void *
405 0 : fd_eqvoc_leave( fd_eqvoc_t const * eqvoc ) {
406 :
407 0 : if( FD_UNLIKELY( !eqvoc ) ) {
408 0 : FD_LOG_WARNING(( "NULL eqvoc" ));
409 0 : return NULL;
410 0 : }
411 :
412 0 : return (void *)eqvoc;
413 0 : }
414 :
415 : void *
416 0 : fd_eqvoc_delete( void * eqvoc ) {
417 :
418 0 : if( FD_UNLIKELY( !eqvoc ) ) {
419 0 : FD_LOG_WARNING(( "NULL eqvoc" ));
420 0 : return NULL;
421 0 : }
422 :
423 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)eqvoc, fd_eqvoc_align() ) ) ) {
424 0 : FD_LOG_WARNING(( "misaligned eqvoc" ));
425 0 : return NULL;
426 0 : }
427 :
428 0 : return eqvoc;
429 0 : }
430 :
431 : static dup_t *
432 : dup_query( fd_eqvoc_t * eqvoc,
433 0 : ulong slot ) {
434 0 : dup_t * dup = dup_map_ele_query( eqvoc->dup_map, &slot, NULL, eqvoc->dup_pool );
435 0 : if( FD_LIKELY( dup ) ) {
436 0 : dup_dlist_ele_remove( eqvoc->dup_dlist, dup, eqvoc->dup_pool );
437 0 : dup_dlist_ele_push_tail( eqvoc->dup_dlist, dup, eqvoc->dup_pool );
438 0 : }
439 0 : return dup;
440 0 : }
441 :
442 : static fec_t *
443 : fec_query( fd_eqvoc_t * eqvoc,
444 : ulong slot,
445 0 : ulong fec_set_idx ) {
446 0 : ulong key = slot << 32 | fec_set_idx;
447 0 : fec_t * fec = fec_map_ele_query( eqvoc->fec_map, &key, NULL, eqvoc->fec_pool );
448 0 : if( FD_LIKELY( fec ) ) {
449 0 : fec_dlist_ele_remove( eqvoc->fec_dlist, fec, eqvoc->fec_pool );
450 0 : fec_dlist_ele_push_tail( eqvoc->fec_dlist, fec, eqvoc->fec_pool );
451 0 : }
452 0 : return fec;
453 0 : }
454 :
455 : static prf_t *
456 : prf_query( fd_eqvoc_t * eqvoc,
457 : vtr_t * vtr,
458 0 : ulong slot ) {
459 0 : xid_t key = { .slot = slot, .from = vtr->from };
460 0 : prf_t * prf = prf_map_ele_query( eqvoc->prf_map, &key, NULL, eqvoc->prf_pool );
461 0 : if( FD_LIKELY( prf ) ) {
462 0 : prf_dlist_ele_remove( vtr->prf_dlist, prf, eqvoc->prf_pool );
463 0 : prf_dlist_ele_push_tail( vtr->prf_dlist, prf, eqvoc->prf_pool );
464 0 : }
465 0 : return prf;
466 0 : }
467 :
468 : static dup_t *
469 : dup_insert( fd_eqvoc_t * eqvoc,
470 0 : ulong slot ) {
471 :
472 : /* FIFO evict if full. Invariant: iff in dlist then in map / pool. */
473 :
474 0 : if( FD_UNLIKELY( !dup_pool_free( eqvoc->dup_pool ) ) ) {
475 0 : dup_t * dup = dup_dlist_ele_pop_head( eqvoc->dup_dlist, eqvoc->dup_pool );
476 0 : dup_map_ele_remove_fast( eqvoc->dup_map, dup, eqvoc->dup_pool );
477 0 : dup_pool_ele_release( eqvoc->dup_pool, dup );
478 0 : }
479 :
480 : /* Insert. Invariant: pool free => map / dlist free. */
481 :
482 0 : dup_t * dup = dup_pool_ele_acquire( eqvoc->dup_pool );
483 0 : dup->slot = slot;
484 0 : dup_map_ele_insert( eqvoc->dup_map, dup, eqvoc->dup_pool );
485 0 : dup_dlist_ele_push_tail( eqvoc->dup_dlist, dup, eqvoc->dup_pool );
486 0 : return dup;
487 0 : }
488 :
489 : static fec_t *
490 : fec_insert( fd_eqvoc_t * eqvoc,
491 : ulong slot,
492 0 : uint fec_set_idx ) {
493 :
494 0 : ulong key = slot << 32 | fec_set_idx;
495 :
496 : /* FIFO evict if full. Invariant: iff in dlist then in map / pool. */
497 :
498 0 : if( FD_UNLIKELY( !fec_pool_free( eqvoc->fec_pool ) ) ) {
499 0 : fec_t * fec = fec_dlist_ele_pop_head( eqvoc->fec_dlist, eqvoc->fec_pool );
500 0 : fec_map_ele_remove_fast( eqvoc->fec_map, fec, eqvoc->fec_pool );
501 0 : fec_pool_ele_release( eqvoc->fec_pool, fec );
502 0 : }
503 :
504 : /* Insert. Invariant: pool free => map / dlist free. */
505 :
506 0 : fec_t * fec = fec_pool_ele_acquire( eqvoc->fec_pool );
507 0 : fec->key = key;
508 0 : fec_map_ele_insert( eqvoc->fec_map, fec, eqvoc->fec_pool );
509 0 : fec_dlist_ele_push_tail( eqvoc->fec_dlist, fec, eqvoc->fec_pool );
510 0 : return fec;
511 0 : }
512 :
513 : static prf_t *
514 : prf_insert( fd_eqvoc_t * eqvoc,
515 : ulong slot,
516 0 : fd_pubkey_t const * from ) {
517 :
518 0 : vtr_t * vtr = vtr_map_ele_query( eqvoc->vtr_map, from, NULL, eqvoc->vtr_pool );
519 0 : FD_TEST( vtr );
520 :
521 : /* Each from pubkey in gossip is limited to per_vtr_max proofs.
522 : If we receive more than per_vtr_max from one pubkey, FIFO evict.
523 : We group by pubkey to prevent a single pubkey from spamming
524 : junk proofs. */
525 :
526 0 : if( FD_UNLIKELY( vtr->prf_dlist_cnt==eqvoc->per_vtr_max ) ) {
527 0 : prf_t * evict = prf_dlist_ele_pop_head( vtr->prf_dlist, eqvoc->prf_pool );
528 0 : prf_map_ele_remove_fast( eqvoc->prf_map, evict, eqvoc->prf_pool );
529 0 : prf_pool_ele_release( eqvoc->prf_pool, evict );
530 0 : vtr->prf_dlist_cnt--;
531 0 : }
532 :
533 0 : xid_t key = { .slot = slot, .from = *from };
534 0 : prf_t * prf = prf_pool_ele_acquire( eqvoc->prf_pool );
535 0 : prf->key = key;
536 0 : prf->idxs = 0;
537 0 : prf->buf_sz = 0;
538 0 : prf_map_ele_insert( eqvoc->prf_map, prf, eqvoc->prf_pool );
539 0 : prf_dlist_ele_push_tail( vtr->prf_dlist, prf, eqvoc->prf_pool );
540 0 : vtr->prf_dlist_cnt++;
541 0 : return prf;
542 0 : }
543 :
544 : static int
545 0 : is_last_shred( fd_shred_t const * shred ) {
546 0 : return fd_shred_is_data( fd_shred_type( shred->variant ) ) && shred->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE;
547 0 : }
548 :
549 : /* construct_proof constructs a DuplicateShred proof from shred1 and
550 : shred2. Assumes shred1 and shred2 have already been verified via
551 : verify_proof. On return, chunks_out will be populated with the
552 : serialized format of the proof.
553 :
554 : [ shred1_sz (8 bytes) | shred1 | shred2_sz (8 bytes) | shred2 ]
555 :
556 : Caller supplies `chunks_out`, which is an array that MUST contain
557 : FD_EQVOC_CHUNK_CNT elements. */
558 :
559 : static void
560 : construct_proof( fd_shred_t const * shred1,
561 : fd_shred_t const * shred2,
562 0 : fd_gossip_duplicate_shred_t chunks_out[static FD_EQVOC_CHUNK_CNT] ) {
563 :
564 0 : for (uchar i = 0; i < FD_EQVOC_CHUNK_CNT; i++ ) {
565 0 : chunks_out[i].index = i;
566 0 : chunks_out[i].slot = shred1->slot;
567 0 : chunks_out[i].num_chunks = FD_EQVOC_CHUNK_CNT;
568 0 : chunks_out[i].chunk_index = i;
569 0 : }
570 :
571 0 : ulong shred1_sz = fd_shred_sz( shred1 );
572 0 : ulong shred2_sz = fd_shred_sz( shred2 );
573 :
574 : /* Populate chunk0 */
575 :
576 0 : FD_STORE( ulong, chunks_out[0].chunk, shred1_sz );
577 0 : memcpy( chunks_out[0].chunk + sizeof(ulong), shred1, FD_EQVOC_CHUNK_SZ - sizeof(ulong) );
578 0 : chunks_out[0].chunk_len = FD_EQVOC_CHUNK_SZ;
579 :
580 : /* Populate chunk1 */
581 :
582 0 : ulong shred1_off = FD_EQVOC_CHUNK_SZ - sizeof(ulong);
583 0 : ulong shred1_rem = shred1_sz - shred1_off;
584 0 : memcpy( chunks_out[1].chunk, (uchar *)shred1 + shred1_off, shred1_rem );
585 0 : FD_STORE( ulong, chunks_out[1].chunk + shred1_rem, shred2_sz );
586 0 : ulong chunk1_off = shred1_rem + sizeof(ulong);
587 0 : ulong chunk1_rem = FD_EQVOC_CHUNK_SZ - chunk1_off;
588 0 : memcpy( chunks_out[1].chunk + chunk1_off, shred2, chunk1_rem );
589 0 : chunks_out[1].chunk_len = FD_EQVOC_CHUNK_SZ;
590 :
591 : /* Populate chunk2 */
592 :
593 0 : ulong shred2_off = chunk1_rem;
594 0 : ulong shred2_rem = shred2_sz - shred2_off;
595 0 : memcpy( chunks_out[2].chunk, (uchar *)shred2 + shred2_off, shred2_rem );
596 0 : chunks_out[2].chunk_len = shred2_rem;
597 0 : }
598 :
599 : /* verify_proof verifies that the two shreds contained in `proof` do in
600 : fact equivocate. If leader_schedule is NULL, then both shreds have
601 : already been sig-verified to have originated from the correct leader
602 : for that slot.
603 :
604 : Returns: FD_EQVOC_SUCCESS if no effect
605 : FD_EQVOC_SUCCESS_{...} if they do
606 : FD_EQVOC_ERR_{...} if the shreds were not valid inputs
607 :
608 : The implementation mirrors the Agave version very closely. See: https://github.com/anza-xyz/agave/blob/v3.1/gossip/src/duplicate_shred.rs#L137-L142
609 :
610 : Two shreds equivocate if they satisfy any of the following:
611 :
612 : 1. Both shreds specify the same index and shred type, however their
613 : payloads differ.
614 : 2. Both shreds specify the same FEC set, however their merkle roots
615 : differ.
616 : 3. Both shreds specify the same FEC set and are coding shreds,
617 : however their erasure configs conflict.
618 : 4. The shreds specify different FEC sets, the lower index shred is a
619 : coding shred, and its erasure meta indicates an FEC set overlap.
620 : 5. The shreds specify different FEC sets, the lower index shred has a
621 : merkle root that is not equal to the chained merkle root of the
622 : higher index shred.
623 : 6. The shreds are data shreds with different indices and the shred
624 : with the lower index has the LAST_SHRED_IN_SLOT flag set.
625 :
626 : Ref: https://github.com/solana-foundation/solana-improvement-documents/blob/main/proposals/0204-slashable-event-verification.md#proof-verification
627 :
628 : Note: two shreds are in the same FEC set if they have the same verified
629 : and FEC set index.
630 :
631 : To prevent false positives, this function also performs the following
632 : input validation on the shreds:
633 :
634 : 1. shred1 and shred2 are for the same verified.
635 : 2. shred1 and shred2 are both the expected shred_version.
636 : 3. shred1 and shred2 are either chained merkle or chained resigned
637 : merkle variants.
638 : 4. shred1 and shred2 contain valid signatures signed by the same
639 : producer pubkey.
640 :
641 : If any of the above input validation fails, this function returns
642 : FD_EQVOC_ERR_{...}.
643 :
644 : The validation does duplicate some of what's in the shred tile, but
645 : because this proof is sourced from gossip (which doesn't go through
646 : shred) we have to also do it. */
647 :
648 : static int
649 : verify_proof( fd_eqvoc_t const * eqvoc,
650 : ushort shred_version,
651 : fd_epoch_leaders_t const * leader_schedule,
652 : fd_shred_t const * shred1,
653 0 : fd_shred_t const * shred2 ) {
654 :
655 : /* A valid duplicate proof must have shreds for the same slot. */
656 :
657 0 : if( FD_UNLIKELY( shred1->slot != shred2->slot ) ) return FD_EQVOC_ERR_SLOT;
658 :
659 : /* We only process proofs for the current shred version. */
660 :
661 0 : if( FD_UNLIKELY( shred1->version != shred_version ) ) return FD_EQVOC_ERR_VERSION;
662 0 : if( FD_UNLIKELY( shred2->version != shred_version ) ) return FD_EQVOC_ERR_VERSION;
663 :
664 : /* Dropping non-CMR shreds has been activated on mainnet, so we ignore
665 : any proofs containing non-CMR shreds. Currently Agave does not have
666 : an equivalent check. */
667 :
668 0 : if( FD_UNLIKELY( !fd_shred_is_chained ( fd_shred_type( shred1->variant ) ) &&
669 0 : !fd_shred_is_resigned( fd_shred_type( shred1->variant ) ) ) ) {
670 0 : return FD_EQVOC_ERR_TYPE;
671 0 : }
672 0 : if( FD_UNLIKELY( !fd_shred_is_chained ( fd_shred_type( shred2->variant ) ) &&
673 0 : !fd_shred_is_resigned( fd_shred_type( shred2->variant ) ) ) ) {
674 0 : return FD_EQVOC_ERR_TYPE;
675 0 : }
676 :
677 : /* Check both shreds contain valid signatures from the assigned leader
678 : to that verified. This requires deriving the merkle root and
679 : sig-verifying it, because the leader signs the merkle root for
680 : merkle shreds. */
681 :
682 0 : fd_bmtree_node_t root1;
683 0 : if( FD_UNLIKELY( !fd_shred_merkle_root( shred1, eqvoc->bmtree_mem, &root1 ) ) ) return FD_EQVOC_ERR_MERKLE;
684 :
685 0 : fd_bmtree_node_t root2;
686 0 : if( FD_UNLIKELY( !fd_shred_merkle_root( shred2, eqvoc->bmtree_mem, &root2 ) ) ) return FD_EQVOC_ERR_MERKLE;
687 :
688 0 : if( FD_UNLIKELY( leader_schedule ) ) {
689 0 : fd_pubkey_t const * leader = fd_epoch_leaders_get( leader_schedule, shred1->slot );
690 0 : if( FD_UNLIKELY( !leader ) ) return FD_EQVOC_ERR_SIG;
691 :
692 0 : fd_sha512_t _sha512[1];
693 0 : fd_sha512_t * sha512 = fd_sha512_join( fd_sha512_new( _sha512 ) );
694 0 : if( FD_UNLIKELY( FD_ED25519_SUCCESS != fd_ed25519_verify( root1.hash, 32UL, shred1->signature, leader->uc, sha512 ) ||
695 0 : FD_ED25519_SUCCESS != fd_ed25519_verify( root2.hash, 32UL, shred2->signature, leader->uc, sha512 ) ) ) {
696 0 : return FD_EQVOC_ERR_SIG;
697 0 : }
698 0 : }
699 :
700 : /* If both are data shreds, then check if one is marked the last shred
701 : in the verified and the other is a higher shred idx than that one. */
702 :
703 0 : if( FD_LIKELY( fd_shred_is_data( fd_shred_type( shred1->variant ) ) && fd_shred_is_data( fd_shred_type( shred2->variant ) ) ) ) {
704 0 : if( FD_LIKELY( ( shred1->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE && shred2->idx > shred1->idx ) ||
705 0 : ( shred2->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE && shred1->idx > shred2->idx ) ) ) {
706 0 : return FD_EQVOC_SUCCESS_LAST;
707 0 : }
708 0 : }
709 :
710 0 : if( FD_UNLIKELY( shred1->fec_set_idx != shred2->fec_set_idx ) ) {
711 :
712 : /* Different FEC set index checks. Lower FEC set index shred must be a
713 : coding shred. */
714 :
715 0 : fd_shred_t const * lo = fd_ptr_if( shred1->fec_set_idx < shred2->fec_set_idx, shred1, shred2 );
716 0 : fd_shred_t const * hi = fd_ptr_if( shred1->fec_set_idx > shred2->fec_set_idx, shred1, shred2 );
717 :
718 : /* Test for conflicting chained merkle roots when shred1 and shred2
719 : are in adjacent FEC sets. We know the FEC sets are adjacent if
720 : the lower FEC set index + FD_FEC_SHRED_CNT is the higher FEC set
721 : index. */
722 :
723 0 : if( FD_UNLIKELY( lo->fec_set_idx+FD_FEC_SHRED_CNT == hi->fec_set_idx ) ) {
724 0 : uchar * merkle_hash = fd_ptr_if( shred1->fec_set_idx < shred2->fec_set_idx,
725 0 : (uchar *)root1.hash,
726 0 : (uchar *)root2.hash );
727 0 : uchar * chained_hash = fd_ptr_if( shred1->fec_set_idx > shred2->fec_set_idx,
728 0 : (uchar *)shred1 + fd_shred_chain_off( shred1->variant ),
729 0 : (uchar *)shred2 + fd_shred_chain_off( shred2->variant ) );
730 0 : if( FD_LIKELY( 0!=memcmp( merkle_hash, chained_hash, FD_SHRED_MERKLE_ROOT_SZ ) ) ) {
731 0 : return FD_EQVOC_SUCCESS_CHAINED;
732 0 : }
733 0 : }
734 0 : return FD_EQVOC_SUCCESS; /* these shreds in different FEC sets do not prove equivocation */
735 0 : }
736 :
737 : /* At this point, the two shreds are in the same FEC set. */
738 :
739 : /* If two shreds in the same FEC set have different merkle roots, they
740 : equivocate. */
741 :
742 0 : if( FD_LIKELY( 0!=memcmp( root1.hash, root2.hash, sizeof(root1.hash)) ) ) {
743 0 : return FD_EQVOC_SUCCESS_MERKLE;
744 0 : }
745 :
746 : /* Remaining checks require the two shreds to be the same type. */
747 :
748 0 : if( FD_UNLIKELY( fd_shred_type( shred1->variant )!=fd_shred_type( shred2->variant ) ) ) {
749 0 : return FD_EQVOC_SUCCESS;
750 0 : }
751 :
752 : /* Agave does a payload comparison if two shreds have the same index,
753 : but it's not necessary for us to do the same because we only
754 : process merkle shreds (see first conditional in this function).
755 : You can't generate the same merkle root from different payloads for
756 : the same leaf in the tree. */
757 :
758 0 : if( FD_UNLIKELY( shred1->idx==shred2->idx ) ) {
759 0 : return FD_EQVOC_SUCCESS;
760 0 : }
761 :
762 : /* Shreds do not prove equivocation. */
763 :
764 0 : return FD_EQVOC_SUCCESS;
765 0 : }
766 :
767 : int
768 : fd_eqvoc_shred_insert( fd_eqvoc_t * eqvoc,
769 : ushort shred_version,
770 : ulong root,
771 : fd_shred_t const * shred,
772 0 : fd_gossip_duplicate_shred_t chunks_out[static FD_EQVOC_CHUNK_CNT] ) {
773 :
774 0 : if( FD_UNLIKELY( shred->slot < root ) ) return FD_EQVOC_ERR_IGNORED_SLOT;
775 :
776 : /* Short-circuit if we already know this shred equivocates. */
777 :
778 0 : ulong slot = shred->slot;
779 0 : dup_t * dup = dup_query( eqvoc, slot );
780 0 : if( FD_UNLIKELY( dup && dup->err > FD_EQVOC_SUCCESS ) ) return FD_EQVOC_SUCCESS; /* already verified this slot equivocates */
781 :
782 : /* For FD_EQVOC_SUCCESS_LAST we specially index a key for the last
783 : shred in a slot. If we get two shreds for the same slot but
784 : different index that are both marked last, that is a conflict. */
785 :
786 0 : if( FD_UNLIKELY( is_last_shred( shred ) ) ) {
787 0 : fec_t * last = fec_query( eqvoc, shred->slot, UINT_MAX );
788 0 : if( FD_LIKELY( !last ) ) {
789 0 : last = fec_insert( eqvoc, slot, UINT_MAX );
790 0 : fd_memcpy( &last->shred, shred, fd_shred_sz( shred ) ); /* shred is already validated */
791 0 : }
792 0 : if( FD_UNLIKELY( shred->idx!=last->shred.idx ) ) {
793 0 : construct_proof( shred, &last->shred, chunks_out );
794 0 : dup_t * dup = dup_insert( eqvoc, slot );
795 0 : dup->err = FD_EQVOC_SUCCESS_LAST;
796 0 : return FD_EQVOC_SUCCESS_LAST;
797 0 : }
798 0 : }
799 :
800 : /* Every other equivocation check except FD_EQVOC_SUCCESS_LAST above
801 : is based on conflicts between two shreds within the same FEC set,
802 : so we index shreds by a composite key of 32 msb slot and 32 lsb
803 : fec_set_idx to compare sibling shreds in the same FEC set. */
804 :
805 0 : fec_t * fec = fec_query( eqvoc, shred->slot, shred->fec_set_idx );
806 0 : if( FD_UNLIKELY( !fec ) ) { /* no sibling yet, so nothing more to do */
807 0 : fec = fec_insert( eqvoc, shred->slot, shred->fec_set_idx );
808 0 : fd_memcpy( &fec->shred, shred, fd_shred_sz( shred ) ); /* shred is already validated */
809 0 : return FD_EQVOC_SUCCESS;
810 0 : }
811 :
812 : /* Verify if the shred equivocates and construct a proof if so. */
813 :
814 0 : int err = verify_proof( eqvoc, shred_version, NULL, &fec->shred, shred );
815 0 : if( FD_UNLIKELY( err>FD_EQVOC_SUCCESS ) ) {
816 0 : construct_proof( &fec->shred, shred, chunks_out );
817 0 : dup_t * dup = dup_insert( eqvoc, slot );
818 0 : dup->err = err;
819 0 : fec_dlist_ele_remove( eqvoc->fec_dlist, fec, eqvoc->fec_pool );
820 0 : fec_map_ele_remove_fast( eqvoc->fec_map, fec, eqvoc->fec_pool );
821 0 : fec_pool_ele_release( eqvoc->fec_pool, fec );
822 0 : }
823 0 : return err;
824 0 : }
825 :
826 : int
827 : fd_eqvoc_chunk_insert( fd_eqvoc_t * eqvoc,
828 : ushort shred_version,
829 : ulong root,
830 : fd_epoch_leaders_t const * leader_schedule,
831 : fd_pubkey_t const * from,
832 : fd_gossip_duplicate_shred_t const * chunk,
833 0 : fd_gossip_duplicate_shred_t chunks_out[static FD_EQVOC_CHUNK_CNT] ) {
834 :
835 0 : if( FD_UNLIKELY( !leader_schedule || chunk->slot < root ) ) return FD_EQVOC_ERR_IGNORED_SLOT;
836 :
837 0 : vtr_t * vtr = vtr_map_ele_query( eqvoc->vtr_map, from, NULL, eqvoc->vtr_pool );
838 0 : if( FD_UNLIKELY( !vtr ) ) return FD_EQVOC_ERR_IGNORED_FROM;
839 :
840 0 : if( FD_UNLIKELY( chunk->num_chunks !=FD_EQVOC_CHUNK_CNT ) ) return FD_EQVOC_ERR_CHUNK_CNT;
841 0 : if( FD_UNLIKELY( chunk->chunk_index>=FD_EQVOC_CHUNK_CNT ) ) return FD_EQVOC_ERR_CHUNK_IDX;
842 :
843 0 : if( FD_UNLIKELY( chunk->chunk_index==0 && chunk->chunk_len!=FD_EQVOC_CHUNK0_LEN ) ) return FD_EQVOC_ERR_CHUNK_LEN;
844 0 : if( FD_UNLIKELY( chunk->chunk_index==1 && chunk->chunk_len!=FD_EQVOC_CHUNK1_LEN ) ) return FD_EQVOC_ERR_CHUNK_LEN;
845 0 : if( FD_UNLIKELY( chunk->chunk_index==2 && chunk->chunk_len!=FD_EQVOC_CHUNK2_LEN_CC &&
846 0 : chunk->chunk_len!=FD_EQVOC_CHUNK2_LEN_DD &&
847 0 : chunk->chunk_len!=FD_EQVOC_CHUNK2_LEN_DC &&
848 0 : chunk->chunk_len!=FD_EQVOC_CHUNK2_LEN_CD ) ) return FD_EQVOC_ERR_CHUNK_LEN;
849 :
850 0 : dup_t * dup = dup_query( eqvoc, chunk->slot );
851 0 : if( FD_UNLIKELY( dup && dup->err > FD_EQVOC_SUCCESS ) ) return FD_EQVOC_SUCCESS; /* already verified an equivocation proof for this slot */
852 :
853 0 : prf_t * prf = prf_query( eqvoc, vtr, chunk->slot );
854 0 : if( FD_UNLIKELY( !prf ) ) prf = prf_insert( eqvoc, chunk->slot, from );
855 0 : if( FD_UNLIKELY( fd_uchar_extract_bit( prf->idxs, chunk->chunk_index ) ) ) return FD_EQVOC_SUCCESS; /* already processed chunk */
856 0 : fd_memcpy( prf->buf + chunk->chunk_index * FD_EQVOC_CHUNK_SZ, chunk->chunk, chunk->chunk_len );
857 0 : prf->buf_sz += chunk->chunk_len;
858 0 : prf->idxs = fd_uchar_set_bit( prf->idxs, chunk->chunk_index );
859 0 : if( FD_UNLIKELY( prf->idxs!=(1 << FD_EQVOC_CHUNK_CNT) - 1 ) ) return FD_EQVOC_SUCCESS; /* not all chunks received yet */
860 :
861 0 : int err = FD_EQVOC_ERR_SERDE; ulong off = 0;
862 :
863 0 : if( FD_UNLIKELY( prf->buf_sz - off < sizeof(ulong) ) ) goto cleanup;
864 0 : ulong shred1_sz = fd_ulong_load_8( prf->buf );
865 0 : off += sizeof(ulong);
866 :
867 0 : if( FD_UNLIKELY( prf->buf_sz - off < shred1_sz ) ) goto cleanup;
868 0 : fd_shred_t const * shred1 = fd_shred_parse( prf->buf + off, shred1_sz );
869 0 : if( FD_UNLIKELY( !shred1 || fd_shred_sz( shred1 )!=shred1_sz ) ) goto cleanup; /* check the sz matches parsed shred's type */
870 0 : off += shred1_sz;
871 :
872 0 : if( FD_UNLIKELY( prf->buf_sz - off < sizeof(ulong) ) ) goto cleanup;
873 0 : ulong shred2_sz = fd_ulong_load_8( prf->buf + off );
874 0 : off += sizeof(ulong);
875 :
876 0 : if( FD_UNLIKELY( prf->buf_sz - off < shred2_sz ) ) goto cleanup;
877 0 : fd_shred_t const * shred2 = fd_shred_parse( prf->buf + off, shred2_sz );
878 0 : if( FD_UNLIKELY( !shred2 || fd_shred_sz( shred2 )!=shred2_sz ) ) goto cleanup; /* check the sz matches parsed shred's type */
879 0 : off += shred2_sz;
880 :
881 0 : if( FD_UNLIKELY( off!=prf->buf_sz ) ) goto cleanup;
882 :
883 0 : err = verify_proof( eqvoc, shred_version, leader_schedule, shred1, shred2 );
884 0 : if( FD_UNLIKELY( err > FD_EQVOC_SUCCESS ) ) {
885 0 : construct_proof( shred1, shred2, chunks_out );
886 0 : dup_t * dup = dup_insert( eqvoc, chunk->slot );
887 0 : dup->err = err;
888 0 : }
889 :
890 0 : cleanup:;
891 0 : prf_dlist_ele_remove( vtr->prf_dlist, prf, eqvoc->prf_pool );
892 0 : prf_map_ele_remove_fast( eqvoc->prf_map, prf, eqvoc->prf_pool );
893 0 : prf_pool_ele_release( eqvoc->prf_pool, prf );
894 0 : vtr->prf_dlist_cnt--;
895 0 : return err;
896 0 : }
897 :
898 : int
899 : fd_eqvoc_query( fd_eqvoc_t * eqvoc,
900 0 : ulong slot ) {
901 0 : return !!dup_query( eqvoc, slot );
902 0 : }
903 :
904 : void
905 : fd_eqvoc_update_voters( fd_eqvoc_t * eqvoc,
906 : fd_pubkey_t const * id_keys,
907 21 : ulong cnt ) {
908 :
909 21 : for( vtr_dlist_iter_t iter = vtr_dlist_iter_fwd_init( eqvoc->vtr_dlist, eqvoc->vtr_pool );
910 21 : !vtr_dlist_iter_done( iter, eqvoc->vtr_dlist, eqvoc->vtr_pool );
911 21 : iter = vtr_dlist_iter_fwd_next( iter, eqvoc->vtr_dlist, eqvoc->vtr_pool ) ) {
912 0 : eqvoc->vtr_pool[iter].next = 1; /* mark for removal */
913 0 : }
914 :
915 : /* Move all voters in the new voters set to the back of the
916 : dlist. We mark them by setting their `next` field to null. */
917 :
918 2121 : for( ulong i=0UL; i<cnt; i++ ) {
919 2100 : fd_pubkey_t const * id = &id_keys[i];
920 2100 : vtr_t * vtr = vtr_map_ele_query( eqvoc->vtr_map, id, NULL, eqvoc->vtr_pool );
921 2100 : if( FD_UNLIKELY( !vtr ) ) {
922 2100 : vtr = vtr_pool_ele_acquire( eqvoc->vtr_pool );
923 2100 : vtr->from = *id;
924 2100 : vtr->prf_dlist_cnt = 0;
925 2100 : vtr_map_ele_insert( eqvoc->vtr_map, vtr, eqvoc->vtr_pool );
926 2100 : } else {
927 0 : vtr_dlist_ele_remove( eqvoc->vtr_dlist, vtr, eqvoc->vtr_pool );
928 0 : }
929 2100 : vtr->next = 0; /* unmark for removal */
930 2100 : vtr_dlist_ele_push_tail( eqvoc->vtr_dlist, vtr, eqvoc->vtr_pool );
931 2100 : }
932 :
933 : /* Pop unwanted voters from the head until we hit a kept voter. */
934 :
935 21 : while( FD_LIKELY( !vtr_dlist_is_empty( eqvoc->vtr_dlist, eqvoc->vtr_pool ) ) ) {
936 21 : vtr_t * vtr = vtr_dlist_ele_pop_head( eqvoc->vtr_dlist, eqvoc->vtr_pool );
937 21 : if( FD_UNLIKELY( !vtr->next ) ) { /* can short-circuit since all the existing and new voters were appended */
938 21 : vtr_dlist_ele_push_tail( eqvoc->vtr_dlist, vtr, eqvoc->vtr_pool );
939 21 : break;
940 21 : }
941 0 : while( FD_LIKELY( !prf_dlist_is_empty( vtr->prf_dlist, eqvoc->prf_pool ) ) ) {
942 0 : prf_t * prf = prf_dlist_ele_pop_head( vtr->prf_dlist, eqvoc->prf_pool );
943 0 : prf_map_ele_remove_fast( eqvoc->prf_map, prf, eqvoc->prf_pool );
944 0 : prf_pool_ele_release( eqvoc->prf_pool, prf );
945 0 : }
946 0 : vtr_map_ele_remove_fast( eqvoc->vtr_map, vtr, eqvoc->vtr_pool );
947 0 : vtr_pool_ele_release( eqvoc->vtr_pool, vtr );
948 0 : }
949 21 : }
|