Line data Source code
1 : #include "fd_eqvoc.h"
2 :
3 : void *
4 0 : fd_eqvoc_new( void * shmem, ulong key_max, ulong seed ) {
5 :
6 0 : if( FD_UNLIKELY( !shmem ) ) {
7 0 : FD_LOG_WARNING(( "NULL mem" ));
8 0 : return NULL;
9 0 : }
10 :
11 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shmem, fd_eqvoc_align() ) ) ) {
12 0 : FD_LOG_WARNING(( "misaligned mem" ));
13 0 : return NULL;
14 0 : }
15 :
16 0 : FD_SCRATCH_ALLOC_INIT( l, shmem );
17 0 : fd_eqvoc_t * eqvoc = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_eqvoc_t), sizeof(fd_eqvoc_t) );
18 0 : void * pool = FD_SCRATCH_ALLOC_APPEND( l, fd_eqvoc_pool_align(), fd_eqvoc_pool_footprint( key_max ) );
19 0 : void * map = FD_SCRATCH_ALLOC_APPEND( l, fd_eqvoc_map_align(), fd_eqvoc_map_footprint( key_max ) );
20 0 : void * sha512 = FD_SCRATCH_ALLOC_APPEND( l, fd_sha512_align(), fd_sha512_footprint() );
21 0 : void * bmtree_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_bmtree_commit_align(), fd_bmtree_commit_footprint( FD_SHRED_MERKLE_LAYER_CNT ) );
22 0 : FD_SCRATCH_ALLOC_FINI( l, fd_eqvoc_align() );
23 :
24 0 : eqvoc->key_max = key_max;
25 0 : fd_eqvoc_pool_new( pool, key_max );
26 0 : fd_eqvoc_map_new( map, key_max, seed );
27 0 : fd_sha512_new( sha512 );
28 0 : (void)bmtree_mem; /* does not require `new` */
29 :
30 0 : return shmem;
31 0 : }
32 :
33 : fd_eqvoc_t *
34 0 : fd_eqvoc_join( void * sheqvoc ) {
35 :
36 0 : if( FD_UNLIKELY( !sheqvoc ) ) {
37 0 : FD_LOG_WARNING(( "NULL eqvoc" ));
38 0 : return NULL;
39 0 : }
40 :
41 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)sheqvoc, fd_eqvoc_align() ) ) ) {
42 0 : FD_LOG_WARNING(( "misaligned eqvoc" ));
43 0 : return NULL;
44 0 : }
45 :
46 0 : FD_SCRATCH_ALLOC_INIT( l, sheqvoc );
47 0 : fd_eqvoc_t * eqvoc = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_eqvoc_t), sizeof(fd_eqvoc_t) );
48 0 : void * pool = FD_SCRATCH_ALLOC_APPEND( l, fd_eqvoc_pool_align(), fd_eqvoc_pool_footprint( eqvoc->key_max ) );
49 0 : void * map = FD_SCRATCH_ALLOC_APPEND( l, fd_eqvoc_map_align(), fd_eqvoc_map_footprint( eqvoc->key_max ) );
50 0 : void * sha512 = FD_SCRATCH_ALLOC_APPEND( l, fd_sha512_align(), fd_sha512_footprint() );
51 0 : void * bmtree_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_bmtree_commit_align(), fd_bmtree_commit_footprint( FD_SHRED_MERKLE_LAYER_CNT ) );
52 0 : FD_SCRATCH_ALLOC_FINI( l, fd_eqvoc_align() );
53 :
54 0 : (void)eqvoc; /* does not require `join` */
55 0 : eqvoc->pool = fd_eqvoc_pool_join( pool );
56 0 : eqvoc->map = fd_eqvoc_map_join( map );
57 0 : eqvoc->sha512 = fd_sha512_join( sha512 );
58 0 : eqvoc->bmtree_mem = bmtree_mem;
59 :
60 0 : return eqvoc;
61 0 : }
62 :
63 : void *
64 0 : fd_eqvoc_leave( fd_eqvoc_t const * eqvoc ) {
65 :
66 0 : if( FD_UNLIKELY( !eqvoc ) ) {
67 0 : FD_LOG_WARNING(( "NULL eqvoc" ));
68 0 : return NULL;
69 0 : }
70 :
71 0 : return (void *)eqvoc;
72 0 : }
73 :
74 : void *
75 0 : fd_eqvoc_delete( void * eqvoc ) {
76 :
77 0 : if( FD_UNLIKELY( !eqvoc ) ) {
78 0 : FD_LOG_WARNING(( "NULL eqvoc" ));
79 0 : return NULL;
80 0 : }
81 :
82 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)eqvoc, fd_eqvoc_align() ) ) ) {
83 0 : FD_LOG_WARNING(( "misaligned eqvoc" ));
84 0 : return NULL;
85 0 : }
86 :
87 0 : return eqvoc;
88 0 : }
89 :
90 : void
91 0 : fd_eqvoc_insert( fd_eqvoc_t * eqvoc, fd_shred_t const * shred ) {
92 0 : fd_eqvoc_key_t key = { shred->slot, shred->fec_set_idx };
93 0 : fd_eqvoc_entry_t * entry = fd_eqvoc_map_ele_query( eqvoc->map, &key, NULL, eqvoc->pool );
94 0 : if( FD_UNLIKELY( !entry ) ) {
95 :
96 : /* TODO eviction logic */
97 :
98 0 : entry = fd_eqvoc_pool_ele_acquire( eqvoc->pool );
99 0 : entry->key.slot = shred->slot;
100 0 : entry->key.fec_set_idx = shred->fec_set_idx;
101 0 : entry->code_cnt = 0;
102 0 : entry->data_cnt = 0;
103 0 : entry->last_idx = FD_SHRED_IDX_NULL;
104 0 : memcpy( entry->sig, shred->signature, FD_ED25519_SIG_SZ );
105 0 : }
106 :
107 0 : if( FD_LIKELY( fd_shred_is_code( fd_shred_type( shred->variant ) ) ) ) { /* optimize for coding shreds (code_cnt >= data_cnt) */
108 0 : entry->code_cnt = shred->code.code_cnt;
109 0 : entry->data_cnt = shred->code.data_cnt;
110 0 : } else if( FD_UNLIKELY( shred->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE ) ) {
111 0 : entry->last_idx = shred->idx;
112 0 : }
113 :
114 : /* This cannot fail */
115 0 : fd_eqvoc_map_ele_insert( eqvoc->map, entry, eqvoc->pool );
116 0 : }
117 :
118 : fd_eqvoc_entry_t const *
119 0 : fd_eqvoc_search( fd_eqvoc_t const * eqvoc, fd_shred_t const * shred ) {
120 0 : fd_eqvoc_entry_t const * entry = fd_eqvoc_query( eqvoc, shred->slot, shred->fec_set_idx );
121 :
122 : /* If we've already seen a shred in this FEC set */
123 :
124 0 : if( FD_LIKELY( entry ) ) {
125 :
126 : /* Make sure the signature matches. Every merkle shred in the FEC
127 : set must have the same signature. */
128 :
129 0 : if( FD_UNLIKELY( 0 != memcmp( entry->sig, shred->signature, FD_ED25519_SIG_SZ ) ) ) {
130 0 : return entry;
131 0 : }
132 :
133 : /* Check if this shred's idx is higher than another shred that claimed
134 : to be the last_idx. This indicates equivocation. */
135 :
136 0 : if( FD_UNLIKELY( shred->idx > entry->last_idx ) ) {
137 0 : return entry;
138 0 : }
139 0 : }
140 :
141 : /* Look backward FEC_MAX idxs for overlap. */
142 :
143 0 : for( uint i = 1; shred->fec_set_idx >= i && i < FD_EQVOC_FEC_MAX; i++ ) {
144 0 : fd_eqvoc_entry_t const * conflict = fd_eqvoc_query( eqvoc, shred->slot, shred->fec_set_idx - i );
145 0 : if( FD_UNLIKELY( conflict &&
146 0 : conflict->data_cnt > 0 &&
147 0 : conflict->key.fec_set_idx + conflict->data_cnt > shred->fec_set_idx ) ) {
148 0 : return conflict;
149 0 : }
150 0 : }
151 :
152 : /* Look forward data_cnt idxs for overlap. */
153 :
154 0 : for( uint i = 1; entry && i < entry->data_cnt; i++ ) {
155 0 : fd_eqvoc_entry_t const * conflict = fd_eqvoc_query( eqvoc, shred->slot, shred->fec_set_idx + i );
156 0 : if( FD_UNLIKELY( conflict ) ) return conflict;
157 0 : }
158 :
159 0 : return NULL; /* No conflicts */
160 0 : }
161 :
162 : int
163 0 : shred_merkle_root( fd_eqvoc_t const * eqvoc, fd_shred_t const * shred, fd_bmtree_node_t * root_out ) {
164 0 : fd_bmtree_commit_t * tree = fd_bmtree_commit_init( eqvoc->bmtree_mem,
165 0 : FD_SHRED_MERKLE_NODE_SZ,
166 0 : FD_BMTREE_LONG_PREFIX_SZ,
167 0 : FD_SHRED_MERKLE_LAYER_CNT );
168 :
169 0 : uchar shred_type = fd_shred_type( shred->variant );
170 0 : int is_data_shred = fd_shred_is_data( shred_type );
171 0 : ulong in_type_idx = fd_ulong_if( is_data_shred, shred->idx - shred->fec_set_idx, shred->code.idx );
172 0 : ulong shred_idx = fd_ulong_if( is_data_shred, in_type_idx, in_type_idx + shred->code.data_cnt );
173 :
174 0 : ulong tree_depth = fd_shred_merkle_cnt( shred->variant ); /* In [0, 15] */
175 0 : ulong reedsol_protected_sz = 1115UL + FD_SHRED_DATA_HEADER_SZ - FD_SHRED_SIGNATURE_SZ - FD_SHRED_MERKLE_NODE_SZ*tree_depth
176 0 : - FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type )
177 0 : - FD_SHRED_SIGNATURE_SZ *fd_shred_is_resigned( shred_type); /* In [743, 1139] conservatively*/
178 0 : ulong data_merkle_protected_sz = reedsol_protected_sz + FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type );
179 0 : ulong parity_merkle_protected_sz = reedsol_protected_sz + FD_SHRED_MERKLE_ROOT_SZ*fd_shred_is_chained ( shred_type )+FD_SHRED_CODE_HEADER_SZ-FD_ED25519_SIG_SZ;
180 0 : ulong merkle_protected_sz = fd_ulong_if( is_data_shred, data_merkle_protected_sz, parity_merkle_protected_sz );
181 0 : fd_bmtree_node_t leaf;
182 0 : fd_bmtree_hash_leaf( &leaf, (uchar const *)shred + sizeof(fd_ed25519_sig_t), merkle_protected_sz, FD_BMTREE_LONG_PREFIX_SZ );
183 :
184 0 : return fd_bmtree_commitp_insert_with_proof( tree, shred_idx, &leaf, (uchar const *)fd_shred_merkle_nodes( shred ), fd_shred_merkle_cnt( shred->variant ), root_out );
185 0 : }
186 :
187 : /* https://github.com/anza-xyz/agave/blob/v2.0.3/gossip/src/duplicate_shred.rs#L107-L177 */
188 : int
189 0 : fd_eqvoc_test( fd_eqvoc_t const * eqvoc, fd_shred_t * shred1, fd_shred_t * shred2 ) {
190 :
191 : /* Optimize for valid equivocation proof */
192 :
193 0 : if( FD_UNLIKELY( shred1->slot != shred2->slot ) ) {
194 0 : return 0;
195 0 : }
196 :
197 0 : if( FD_UNLIKELY( shred1->version != eqvoc->shred_version ) ) {
198 0 : return 0;
199 0 : }
200 :
201 0 : if( FD_UNLIKELY( shred2->version != eqvoc->shred_version ) ) {
202 0 : return 0;
203 0 : }
204 :
205 : /* Verify both shreds contain valid signatures for the leader of their
206 : slot, which requires deriving the merkle root and sig-verifying it
207 : because the leader signs the merkle root for merkle shreds. */
208 :
209 0 : fd_pubkey_t const * leader = fd_epoch_leaders_get( eqvoc->leaders, shred1->slot );
210 0 : fd_bmtree_node_t root1;
211 0 : if( FD_UNLIKELY( !shred_merkle_root( eqvoc, shred1, &root1 ) ) ) {
212 0 : return 0;
213 0 : }
214 0 : fd_bmtree_node_t root2;
215 0 : if( FD_UNLIKELY( !shred_merkle_root( eqvoc, shred2, &root2 ) ) ) {
216 0 : return 0;
217 0 : }
218 0 : if( FD_UNLIKELY( FD_ED25519_SUCCESS != fd_ed25519_verify( root1.hash,
219 0 : 32UL,
220 0 : shred1->signature,
221 0 : leader->uc,
222 0 : eqvoc->sha512 ) ||
223 0 : FD_ED25519_SUCCESS != fd_ed25519_verify( root2.hash,
224 0 : 32UL,
225 0 : shred2->signature,
226 0 : leader->uc,
227 0 : eqvoc->sha512 ) ) ) {
228 0 : return 0;
229 0 : }
230 :
231 0 : if( FD_UNLIKELY( shred1->fec_set_idx == shred2->fec_set_idx
232 0 : && 0 != memcmp( &root1, &root2, sizeof( fd_bmtree_node_t ) ) ) ) {
233 0 : return 1;
234 0 : }
235 :
236 0 : if( FD_UNLIKELY( fd_shred_type( shred1->variant ) != fd_shred_type( shred2->variant ) ) ) {
237 0 : return 0;
238 0 : }
239 :
240 0 : if( FD_UNLIKELY( shred1->idx == shred2->idx ) ) {
241 0 : if( FD_LIKELY( 0 != memcmp( shred1->signature, shred2->signature, FD_ED25519_SIG_SZ ) ) ) {
242 0 : return 1;
243 0 : }
244 0 : return 0;
245 0 : }
246 :
247 0 : if( FD_UNLIKELY( fd_shred_is_data( fd_shred_type( shred1->variant ) ) ) ) {
248 0 : if( FD_UNLIKELY( ( shred1->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE && shred2->idx > shred1->idx ) )
249 0 : || ( shred2->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE && shred1->idx > shred2->idx ) ) {
250 0 : return 1;
251 0 : }
252 0 : }
253 :
254 0 : fd_eqvoc_entry_t const * entry1 = fd_eqvoc_query( eqvoc, shred1->slot, shred1->fec_set_idx );
255 0 : fd_eqvoc_entry_t const * entry2 = fd_eqvoc_query( eqvoc, shred2->slot, shred2->fec_set_idx );
256 :
257 : /* If the FEC set idx is the same but any metadata is different, mark
258 : it as equivocating. */
259 :
260 0 : if( FD_UNLIKELY( shred1->fec_set_idx == shred2->fec_set_idx &&
261 0 : ( entry1->code_cnt != entry2->code_cnt ||
262 0 : entry1->data_cnt != entry2->data_cnt ||
263 0 : entry1->last_idx != entry2->last_idx ) ) ) {
264 0 : return 1;
265 0 : }
266 :
267 : /* This is only reachable if shred1 and shred2 are in different FEC
268 : sets, so check for overlap. */
269 :
270 0 : ulong lo = fd_ulong_min( shred1->fec_set_idx, shred2->fec_set_idx );
271 0 : ulong hi = fd_ulong_max( shred1->fec_set_idx, shred2->fec_set_idx );
272 :
273 0 : fd_eqvoc_entry_t const * lo_entry = fd_ptr_if( shred1->fec_set_idx < shred2->fec_set_idx, entry1, entry2 );
274 0 : FD_LOG_NOTICE(("lo %lu hi %lu data_cnt %lu %lu", lo, hi, lo_entry->data_cnt, lo + lo_entry->data_cnt ));
275 :
276 : /* The FEC sets must overlap in data shred indices if the lower FEC
277 : set index crosses into the higher FEC set index based on the data
278 : shred count. */
279 :
280 0 : if ( FD_UNLIKELY( lo_entry && lo_entry->data_cnt > 0 && lo + lo_entry->data_cnt >= hi ) ) {
281 0 : return 1;
282 0 : }
283 :
284 0 : return 0;
285 0 : }
286 :
287 : void
288 : fd_eqvoc_from_chunks( FD_PARAM_UNUSED fd_eqvoc_t const * eqvoc,
289 : fd_gossip_duplicate_shred_t * chunks,
290 : fd_shred_t * shred1_out,
291 0 : fd_shred_t * shred2_out ) {
292 : /* FIXME add validation */
293 :
294 0 : uchar * shred1_bytes = (uchar *)shred1_out;
295 0 : uchar * shred2_bytes = (uchar *)shred2_out;
296 :
297 0 : ulong chunk_cnt = chunks[0].num_chunks;
298 0 : ulong chunk_len = chunks[0].chunk_len;
299 :
300 0 : ulong off = 0;
301 0 : ulong shred1_sz = 0;
302 0 : ulong shred2_sz = 0;
303 0 : for( ulong i = 0; i < chunk_cnt; i++ ) {
304 0 : for( ulong j = 0; j < chunk_cnt; j++ ) {
305 :
306 : /* FIXME O(n^2). DOS for small chunks */
307 :
308 0 : if( chunks[j].chunk_index == i ) {
309 :
310 0 : if( FD_LIKELY( off > FD_SHRED_VARIANT_OFF ) ) {
311 0 : shred1_sz = fd_shred_sz( shred1_out );
312 0 : }
313 :
314 0 : if( FD_LIKELY( off > shred1_sz + FD_SHRED_VARIANT_OFF ) ) {
315 0 : shred2_sz = fd_shred_sz( shred2_out );
316 0 : }
317 :
318 0 : if( !shred1_sz || off + chunk_len <= shred1_sz ) {
319 :
320 : /* copy from chunk into shred1 */
321 :
322 0 : fd_memcpy( shred1_bytes + off, chunks[j].chunk, chunk_len );
323 0 : off += chunk_len;
324 :
325 0 : } else if( off < shred1_sz ) {
326 :
327 : /* copy prefix of chunk into shred1 and suffix of chunk into shred2 */
328 :
329 0 : ulong len = shred1_sz - off;
330 0 : fd_memcpy( shred1_bytes + off, chunks[j].chunk, len );
331 0 : off += len;
332 :
333 0 : fd_memcpy( shred2_bytes + off - shred1_sz, chunks[j].chunk + len, chunk_len - len );
334 0 : off += chunk_len - len;
335 :
336 0 : } else {
337 :
338 : /* copy from chunk into shred2 */
339 :
340 0 : ulong len = fd_ulong_min( chunk_len,
341 0 : fd_ulong_if( (int)shred2_sz,
342 0 : shred2_sz - ( off - shred1_sz ),
343 0 : chunk_len ) );
344 0 : fd_memcpy( shred2_bytes + off - shred1_sz, chunks[j].chunk, len );
345 0 : off += chunk_len;
346 0 : }
347 0 : }
348 0 : }
349 0 : }
350 0 : }
351 :
352 : void
353 : fd_eqvoc_to_chunks( FD_PARAM_UNUSED fd_eqvoc_t const * eqvoc,
354 : fd_shred_t const * shred1,
355 : fd_shred_t const * shred2,
356 : ulong chunk_len,
357 0 : fd_gossip_duplicate_shred_t * chunks_out ) {
358 0 : uchar * shred1_bytes = (uchar *)shred1;
359 0 : uchar * shred2_bytes = (uchar *)shred2;
360 :
361 0 : ulong off = 0;
362 0 : while( FD_LIKELY( off < fd_shred_sz( shred1 ) + fd_shred_sz( shred2 ) ) ) {
363 0 : ulong chunk_idx = off / chunk_len;
364 :
365 0 : if( off + chunk_len < fd_shred_sz( shred1 ) ) {
366 :
367 : /* copy from shred1 into chunk */
368 :
369 0 : fd_memcpy( chunks_out[chunk_idx].chunk, shred1_bytes + off, chunk_len );
370 0 : off += chunk_len;
371 :
372 0 : } else if( off < fd_shred_sz( shred1 ) ) {
373 :
374 : /* copy suffix of shred1 and prefix of shred2 into chunk */
375 :
376 0 : ulong suffix = fd_shred_sz( shred1 ) - off;
377 0 : fd_memcpy( chunks_out[chunk_idx].chunk, shred1_bytes + off, suffix );
378 0 : off += suffix;
379 :
380 0 : ulong prefix = chunk_len - suffix;
381 0 : fd_memcpy( chunks_out[chunk_idx].chunk + suffix, shred2_bytes, prefix );
382 0 : off += prefix;
383 :
384 0 : } else {
385 :
386 : /* copy from shred2 into chunk */
387 :
388 0 : ulong len = fd_ulong_min( chunk_len,
389 0 : fd_shred_sz( shred2 ) - ( off - fd_shred_sz( shred1 ) ) );
390 0 : fd_memcpy( chunks_out[chunk_idx].chunk, shred2_bytes + off - fd_shred_sz( shred1 ), len );
391 0 : off += len;
392 0 : }
393 0 : }
394 0 : ulong sz = fd_shred_sz( shred1 ) + fd_shred_sz( shred2 );
395 0 : ulong cnt = sz / chunk_len;
396 0 : cnt = fd_ulong_if( (int)( sz % chunk_len ), cnt + 1, cnt );
397 0 : }
|