Line data Source code
1 : #include <stdio.h>
2 : #include <string.h>
3 :
4 : #include "fd_tower.h"
5 : #include "../../flamenco/txn/fd_txn_generate.h"
6 : #include "../../flamenco/runtime/fd_system_ids.h"
7 :
8 0 : #define THRESHOLD_DEPTH (8)
9 0 : #define THRESHOLD_RATIO (2.0 / 3.0)
10 : #define SWITCH_RATIO (0.38)
11 :
12 : static fd_vote_acc_vote_t const *
13 0 : v4_off( fd_vote_acc_t const * voter ) {
14 0 : return (fd_vote_acc_vote_t const *)( voter->v4.bls_pubkey_compressed + voter->v4.has_bls_pubkey_compressed * sizeof(voter->v4.bls_pubkey_compressed) + sizeof(ulong) );
15 0 : }
16 :
17 : /* expiration calculates the expiration slot of vote given a slot and
18 : confirmation count. */
19 :
20 : static inline ulong
21 270 : expiration_slot( fd_tower_t const * vote ) {
22 270 : ulong lockout = 1UL << vote->conf;
23 270 : return vote->slot + lockout;
24 270 : }
25 :
26 : /* simulate_vote simulates voting for slot, popping all votes from the
27 : top that would be consecutively expired by voting for slot. */
28 :
29 : static ulong
30 : simulate_vote( fd_tower_t const * tower,
31 282 : ulong slot ) {
32 282 : ulong cnt = fd_tower_cnt( tower );
33 300 : while( cnt ) {
34 270 : fd_tower_t const * top_vote = fd_tower_peek_index_const( tower, cnt - 1 );
35 270 : if( FD_LIKELY( expiration_slot( top_vote ) >= slot ) ) break; /* expire only if consecutive */
36 18 : cnt--;
37 18 : }
38 282 : return cnt;
39 282 : }
40 :
41 : /* push_vote pushes a new vote for slot onto the tower. Pops and
42 : returns the new root (bottom of the tower) if it reaches max lockout
43 : as a result of the new vote. Otherwise, returns ULONG_MAX.
44 :
45 : Max lockout is equivalent to 1 << FD_TOWER_VOTE_MAX + 1 (which
46 : implies confirmation count is FD_TOWER_VOTE_MAX + 1). As a result,
47 : fd_tower_vote also maintains the invariant that the tower contains at
48 : most FD_TOWER_VOTE_MAX votes, because (in addition to vote expiry)
49 : there will always be a pop before reaching FD_TOWER_VOTE_MAX + 1. */
50 :
51 : static ulong
52 : push_vote( fd_tower_t * tower,
53 279 : ulong slot ) {
54 :
55 : /* Sanity check: slot should always be greater than previous vote slot in tower. */
56 :
57 279 : fd_tower_t const * vote = fd_tower_peek_tail_const( tower );
58 279 : if( FD_UNLIKELY( vote && slot <= vote->slot ) ) FD_LOG_CRIT(( "[%s] slot %lu <= vote->slot %lu", __func__, slot, vote->slot ));
59 :
60 : /* Use simulate_vote to determine how many expired votes to pop. */
61 :
62 279 : ulong cnt = simulate_vote( tower, slot );
63 :
64 : /* Pop everything that got expired. */
65 :
66 294 : while( FD_LIKELY( fd_tower_cnt( tower ) > cnt ) ) {
67 15 : fd_tower_pop_tail( tower );
68 15 : }
69 :
70 : /* If the tower is still full after expiring, then pop and return the
71 : bottom vote slot as the new root because this vote has incremented
72 : it to max lockout. Otherwise this is a no-op and there is no new
73 : root (ULONG_MAX). */
74 :
75 279 : ulong root = ULONG_MAX;
76 279 : if( FD_LIKELY( fd_tower_full( tower ) ) ) { /* optimize for full tower */
77 6 : root = fd_tower_pop_head( tower ).slot;
78 6 : }
79 :
80 : /* Increment confirmations (double lockouts) for consecutive
81 : confirmations in prior votes. */
82 :
83 279 : ulong prev_conf = 0;
84 279 : for( fd_tower_iter_t iter = fd_tower_iter_init_rev( tower );
85 3387 : !fd_tower_iter_done_rev( tower, iter );
86 3111 : iter = fd_tower_iter_prev ( tower, iter ) ) {
87 3111 : fd_tower_t * vote = fd_tower_iter_ele( tower, iter );
88 3111 : if( FD_UNLIKELY( vote->conf != ++prev_conf ) ) break;
89 3108 : vote->conf++;
90 3108 : }
91 :
92 : /* Add the new vote to the tower. */
93 :
94 279 : fd_tower_push_tail( tower, (fd_tower_t){ .slot = slot, .conf = 1 } );
95 :
96 : /* Return the new root (FD_SLOT_NULL if there is none). */
97 :
98 279 : return root;
99 279 : }
100 :
101 : /* lockout_check checks if we are locked out from voting for slot.
102 : Returns 1 if we can vote for slot without violating lockout, 0
103 : otherwise.
104 :
105 : After voting for a slot n, we are locked out for 2^k slots, where k
106 : is the confirmation count of that vote. Once locked out, we cannot
107 : vote for a different fork until that previously-voted fork expires at
108 : slot n+2^k. This implies the earliest slot in which we can switch
109 : from the previously-voted fork is (n+2^k)+1. We use `ghost` to
110 : determine whether `slot` is on the same or different fork as previous
111 : vote slots.
112 :
113 : In the case of the tower, every vote has its own expiration slot
114 : depending on confirmations. The confirmation count is the max number
115 : of consecutive votes that have been pushed on top of the vote, and
116 : not necessarily its current height in the tower.
117 :
118 : For example, the following is a diagram of a tower pushing and
119 : popping with each vote:
120 :
121 :
122 : slot | confirmation count
123 : -----|-------------------
124 : 4 | 1 <- vote
125 : 3 | 2
126 : 2 | 3
127 : 1 | 4
128 :
129 :
130 : slot | confirmation count
131 : -----|-------------------
132 : 9 | 1 <- vote
133 : 2 | 3
134 : 1 | 4
135 :
136 :
137 : slot | confirmation count
138 : -----|-------------------
139 : 10 | 1 <- vote
140 : 9 | 2
141 : 2 | 3
142 : 1 | 4
143 :
144 :
145 : slot | confirmation count
146 : -----|-------------------
147 : 11 | 1 <- vote
148 : 10 | 2
149 : 9 | 3
150 : 2 | 4
151 : 1 | 5
152 :
153 :
154 : slot | confirmation count
155 : -----|-------------------
156 : 18 | 1 <- vote
157 : 2 | 4
158 : 1 | 5
159 :
160 :
161 : In the final tower, note the gap in confirmation counts between slot
162 : 18 and slot 2, even though slot 18 is directly above slot 2. */
163 :
164 : static int
165 : lockout_check( fd_tower_t const * tower,
166 : fd_tower_blocks_t * forks,
167 0 : ulong slot ) {
168 :
169 0 : if( FD_UNLIKELY( fd_tower_empty( tower ) ) ) return 1; /* always not locked out if we haven't voted. */
170 0 : if( FD_UNLIKELY( slot <= fd_tower_peek_tail_const( tower )->slot ) ) return 0; /* always locked out from voting for slot <= last vote slot */
171 :
172 : /* Simulate a vote to pop off all the votes that would be expired by
173 : voting for slot. Then check if the newly top-of-tower vote is on
174 : the same fork as slot (if so this implies we can vote for it). */
175 :
176 0 : ulong cnt = simulate_vote( tower, slot ); /* pop off votes that would be expired */
177 0 : if( FD_UNLIKELY( !cnt ) ) return 1; /* tower is empty after popping expired votes */
178 :
179 0 : fd_tower_t const * vote = fd_tower_peek_index_const( tower, cnt - 1 ); /* newly top-of-tower */
180 0 : int lockout = fd_tower_blocks_is_slot_descendant( forks, vote->slot, slot ); /* check if on same fork */
181 0 : FD_LOG_INFO(( "[%s] lockout? %d. last_vote_slot: %lu. slot: %lu", __func__, lockout, vote->slot, slot ));
182 0 : return lockout;
183 0 : }
184 :
185 : /* switch_check checks if we can switch to the fork of `slot`. Returns
186 : 1 if we can switch, 0 otherwise. Assumes tower is non-empty.
187 :
188 : There are two forks of interest: our last vote fork ("vote fork") and
189 : the fork we want to switch to ("switch fork"). The switch fork is on
190 : the fork of `slot`.
191 :
192 : In order to switch, FD_TOWER_SWITCH_PCT of stake must have voted for
193 : a slot that satisfies the following conditions: the
194 : GCA(slot, last_vote) is an ancestor of the switch_slot
195 :
196 : Recall from the lockout check a validator is locked out from voting
197 : for our last vote slot when their last vote slot is on a different
198 : fork, and that vote's expiration slot > our last vote slot.
199 :
200 : The following pseudocode describes the algorithm:
201 :
202 : ```
203 : for every fork f in the fork tree, take the most recently executed
204 : slot `s` (the leaf of the fork).
205 :
206 : Take the greatest common ancestor of the `s` and the our last vote
207 : slot. If the switch_slot is a descendant of this GCA, then votes for
208 : `s` can count towards the switch threshold.
209 :
210 : query banks(`s`) for vote accounts in `s`
211 : for all vote accounts v in `s`
212 : if v's locked out[1] from voting for our latest vote slot
213 : add v's stake to switch stake
214 :
215 : return switch stake >= FD_TOWER_SWITCH_PCT
216 : ```
217 :
218 : The switch check is used to safeguard optimistic confirmation.
219 : Specifically: FD_TOWER_OPT_CONF_PCT + FD_TOWER_SWITCH_PCT >= 1. */
220 :
221 : static int
222 : is_purged( fd_tower_blocks_t * blocks,
223 489 : fd_ghost_blk_t * blk ) {
224 489 : fd_tower_blk_t * tower_blk = fd_tower_blocks_query( blocks, blk->slot );
225 489 : return tower_blk->confirmed && memcmp( &tower_blk->confirmed_block_id, &blk->id, sizeof(fd_hash_t) );
226 489 : }
227 :
228 : static int
229 : switch_check( fd_tower_t const * tower,
230 : fd_ghost_t * ghost,
231 : fd_tower_blocks_t * blocks,
232 : fd_tower_lockos_t * lockos,
233 : fd_tower_stakes_t * stakes,
234 : ulong total_stake,
235 42 : ulong switch_slot ) {
236 :
237 42 : ulong switch_stake = 0;
238 42 : ulong last_vote_slot = fd_tower_peek_tail_const( tower )->slot;
239 42 : ulong root_slot = fd_tower_peek_head_const( tower )->slot;
240 :
241 42 : ulong null = fd_ghost_blk_idx_null( ghost );
242 42 : fd_ghost_blk_t * head = fd_ghost_blk_map_remove( ghost, fd_ghost_root( ghost ) );
243 42 : fd_ghost_blk_t * tail = head;
244 42 : head->next = null;
245 :
246 525 : while( FD_LIKELY( head ) ) {
247 504 : fd_ghost_blk_t * blk = head; /* guaranteed to not be purged */
248 :
249 : /* Because agave has particular behavior where if they replay a
250 : equivocating version of a slot and then the correct version, the
251 : original version and all of it's children get purged from all
252 : structures. None of the nodes on this subtree can be considered
253 : for the switch proof. Note that this means as we BFS, a node
254 : can be considered a "valid leaf" if either it has no children,
255 : or if all of it's children are purged/superseded slots. We
256 : detect this by comparing against tower_blocks confirmed. */
257 :
258 504 : int is_valid_leaf = 1;
259 504 : fd_ghost_blk_t * child = fd_ghost_blk_child( ghost, head );
260 993 : while( FD_LIKELY( child ) ) {
261 489 : if( FD_LIKELY( !is_purged( blocks, child ) ) ) {
262 483 : fd_ghost_blk_map_remove( ghost, child );
263 483 : tail->next = fd_ghost_blk_idx( ghost, child );
264 483 : tail = child;
265 483 : tail->next = null;
266 483 : is_valid_leaf = 0;
267 483 : }
268 489 : child = fd_ghost_blk_sibling( ghost, child );
269 489 : }
270 :
271 504 : head = fd_ghost_blk_next( ghost, blk ); /* pop queue head */
272 504 : fd_ghost_blk_map_insert( ghost, blk ); /* re-insert into map */
273 :
274 504 : if( FD_UNLIKELY( !is_valid_leaf ) ) continue; /* not a real candidate */
275 :
276 129 : ulong candidate_slot = blk->slot;
277 129 : ulong lca = fd_tower_blocks_lowest_common_ancestor( blocks, candidate_slot, last_vote_slot );
278 129 : if( FD_UNLIKELY( candidate_slot == last_vote_slot ) ) continue;
279 120 : if( FD_UNLIKELY( lca==ULONG_MAX ) ) continue; /* unlikely but this leaf is an already pruned minority fork */
280 :
281 120 : if( FD_UNLIKELY( fd_tower_blocks_is_slot_descendant( blocks, lca, switch_slot ) ) ) {
282 :
283 : /* This candidate slot may be considered for the switch proof, if
284 : it passes the following conditions:
285 :
286 : https://github.com/anza-xyz/agave/blob/c7b97bc77addacf03b229c51b47c18650d909576/core/src/consensus.rs#L1117
287 :
288 : Now for this candidate slot, look at the lockouts that were
289 : created at the time that we processed the bank for this
290 : candidate slot. */
291 :
292 99 : for( fd_tower_lockos_slot_t const * slot = fd_tower_lockos_slot_map_ele_query_const( lockos->slot_map, &candidate_slot, NULL, lockos->slot_pool );
293 105 : slot;
294 99 : slot = fd_tower_lockos_slot_map_ele_next_const ( slot, NULL, lockos->slot_pool ) ) {
295 27 : ulong interval_end = slot->interval_end;
296 27 : ulong key = fd_tower_lockos_interval_key( candidate_slot, interval_end );
297 :
298 : /* Intervals are keyed by the end of the interval. If the end of
299 : the interval is < the last vote slot, then these vote
300 : accounts with this particular lockout are NOT locked out from
301 : voting for the last vote slot, which means we can skip this
302 : set of intervals. */
303 :
304 27 : if( FD_LIKELY( interval_end < last_vote_slot ) ) continue;
305 :
306 : /* At this point we can actually query for the intervals by
307 : end interval to get the vote accounts. */
308 :
309 21 : for( fd_tower_lockos_interval_t const * interval = fd_tower_lockos_interval_map_ele_query_const( lockos->interval_map, &key, NULL, lockos->interval_pool );
310 30 : interval;
311 30 : interval = fd_tower_lockos_interval_map_ele_next_const( interval, NULL, lockos->interval_pool ) ) {
312 30 : ulong vote_slot = interval->start;
313 30 : fd_hash_t const * vote_acc = &interval->addr;
314 :
315 30 : if( FD_UNLIKELY( !fd_tower_blocks_is_slot_descendant( blocks, vote_slot, last_vote_slot ) && vote_slot > root_slot ) ) {
316 30 : fd_tower_stakes_vtr_xid_t key = { .addr = *vote_acc, .slot = switch_slot };
317 30 : fd_tower_stakes_vtr_t const * voter_stake = fd_tower_stakes_vtr_map_ele_query_const( stakes->vtr_map, &key, NULL, stakes->vtr_pool );
318 30 : if( FD_UNLIKELY( !voter_stake ) ) {
319 0 : FD_BASE58_ENCODE_32_BYTES( vote_acc->key, vote_acc_b58 );
320 0 : FD_LOG_CRIT(( "missing voter stake for vote account %s on slot %lu. Is this an error?", vote_acc_b58, switch_slot ));
321 0 : }
322 30 : ulong voter_idx = fd_tower_stakes_vtr_pool_idx( stakes->vtr_pool, voter_stake );
323 30 : if( FD_UNLIKELY( fd_used_acc_scratch_test( stakes->used_acc_scratch, voter_idx ) ) ) continue; /* exclude already counted voters */
324 30 : fd_used_acc_scratch_insert( stakes->used_acc_scratch, voter_idx );
325 30 : switch_stake += voter_stake->stake;
326 30 : if( FD_LIKELY( (double)switch_stake >= (double)total_stake * SWITCH_RATIO ) ) {
327 21 : fd_used_acc_scratch_null( stakes->used_acc_scratch );
328 21 : FD_LOG_INFO(( "[%s] switch? 1. last_vote_slot: %lu. switch_slot: %lu. pct: %.0lf%%", __func__, last_vote_slot, switch_slot, (double)switch_stake / (double)total_stake * 100.0 ));
329 42 : while( FD_LIKELY( head ) ) { /* cleanup: re-insert remaining BFS queue into map */
330 21 : fd_ghost_blk_t * next = fd_ghost_blk_next( ghost, head );
331 21 : fd_ghost_blk_map_insert( ghost, head );
332 21 : head = next;
333 21 : }
334 21 : return 1;
335 21 : }
336 30 : }
337 30 : }
338 21 : }
339 99 : }
340 120 : }
341 21 : fd_used_acc_scratch_null( stakes->used_acc_scratch );
342 21 : FD_LOG_INFO(( "[%s] switch? 0. last_vote_slot: %lu. switch_slot: %lu. pct: %.0lf%%", __func__, last_vote_slot, switch_slot, (double)switch_stake / (double)total_stake * 100.0 ));
343 21 : return 0;
344 42 : }
345 :
346 : /* threshold_check checks if we pass the threshold required to vote for
347 : `slot`. Returns 1 if we pass the threshold check, 0 otherwise.
348 :
349 : The following psuedocode describes the algorithm:
350 :
351 : ```
352 : simulate that we have voted for `slot`
353 :
354 : for all vote accounts in the current epoch
355 :
356 : simulate that the vote account has voted for `slot`
357 :
358 : pop all votes expired by that simulated vote
359 :
360 : if the validator's latest tower vote after expiry >= our threshold
361 : slot ie. our vote from THRESHOLD_DEPTH back also after simulating,
362 : then add validator's stake to threshold_stake.
363 :
364 : return threshold_stake >= FD_TOWER_THRESHOLD_RATIO
365 : ```
366 :
367 : The threshold check simulates voting for the current slot to expire
368 : stale votes. This is to prevent validators that haven't voted in a
369 : long time from counting towards the threshold stake. */
370 :
371 : static int
372 : threshold_check( fd_tower_t const * tower,
373 : fd_tower_voters_t const * accts,
374 : ulong total_stake,
375 0 : ulong slot ) {
376 :
377 : /* First, simulate a vote on our tower, popping off everything that
378 : would be expired by voting for slot. */
379 :
380 0 : ulong cnt = simulate_vote( tower, slot );
381 :
382 : /* We can always vote if our tower is not at least THRESHOLD_DEPTH
383 : deep after simulating. */
384 :
385 0 : if( FD_UNLIKELY( cnt < THRESHOLD_DEPTH ) ) return 1;
386 :
387 : /* Get the vote slot from THRESHOLD_DEPTH back. Note THRESHOLD_DEPTH
388 : is the 8th index back _including_ the simulated vote at index 0. */
389 :
390 0 : ulong threshold_slot = fd_tower_peek_index_const( tower, cnt - THRESHOLD_DEPTH )->slot;
391 0 : ulong threshold_stake = 0;
392 0 : for( fd_tower_voters_iter_t iter = fd_tower_voters_iter_init( accts );
393 0 : !fd_tower_voters_iter_done( accts, iter );
394 0 : iter = fd_tower_voters_iter_next( accts, iter ) ) {
395 0 : fd_tower_voters_t const * acct = fd_tower_voters_iter_ele_const( accts, iter );
396 :
397 0 : ulong cnt = simulate_vote( acct->tower, slot ); /* expire votes */
398 0 : if( FD_UNLIKELY( !cnt ) ) continue; /* no votes left after expiry */
399 :
400 : /* Count their stake towards the threshold check if their last vote
401 : slot >= our threshold slot.
402 :
403 : We know these votes are for our own fork because towers are
404 : sourced from vote _accounts_, not vote _transactions_.
405 :
406 : Because we are iterating vote accounts on the same fork that we
407 : we want to vote for, we know these slots must all occur along the
408 : same fork ancestry.
409 :
410 : Therefore, if their latest vote slot >= our threshold slot, we
411 : know that vote must be for the threshold slot itself or one of
412 : threshold slot's descendants. */
413 :
414 0 : ulong last_vote = fd_tower_peek_index_const( acct->tower, cnt - 1 )->slot;
415 0 : if( FD_LIKELY( last_vote >= threshold_slot ) ) threshold_stake += acct->stake;
416 0 : }
417 :
418 0 : double threshold_pct = (double)threshold_stake / (double)total_stake;
419 0 : int threshold = threshold_pct > THRESHOLD_RATIO;
420 0 : FD_LOG_INFO(( "[%s] threshold? %d. top: %lu. threshold: %lu. pct: %.0lf%%.", __func__, threshold, fd_tower_peek_tail_const( tower )->slot, threshold_slot, threshold_pct * 100.0 ));
421 0 : return threshold;
422 0 : }
423 :
424 : static int
425 : propagated_check( fd_tower_t * tower FD_PARAM_UNUSED,
426 : fd_tower_blocks_t * blocks,
427 0 : ulong slot ) {
428 :
429 0 : fd_tower_blk_t * blk = fd_tower_blocks_query( blocks, slot );
430 0 : if( FD_UNLIKELY( !blk ) ) return 1;
431 :
432 0 : if( FD_LIKELY( blk->leader ) ) return 1; /* can always vote for slot in which we're leader */
433 0 : if( FD_LIKELY( blk->prev_leader_slot==ULONG_MAX ) ) return 1; /* haven't been leader yet */
434 :
435 0 : fd_tower_blk_t * prev_leader = fd_tower_blocks_query( blocks, blk->prev_leader_slot );
436 0 : if( FD_LIKELY( !prev_leader ) ) return 1; /* already pruned / rooted */
437 :
438 0 : return prev_leader->propagated;
439 0 : }
440 :
441 : fd_tower_out_t
442 : fd_tower_vote_and_reset( fd_tower_t * tower,
443 : fd_tower_blocks_t * blocks,
444 : fd_tower_lockos_t * lockos,
445 : fd_tower_stakes_t * stakes,
446 : fd_tower_voters_t * voters,
447 : fd_ghost_t * ghost,
448 0 : fd_votes_t * votes FD_PARAM_UNUSED ) {
449 :
450 0 : uchar flags = 0;
451 0 : fd_ghost_blk_t const * best_blk = fd_ghost_best( ghost, fd_ghost_root( ghost ) );
452 0 : fd_ghost_blk_t const * reset_blk = NULL;
453 0 : fd_ghost_blk_t const * vote_blk = NULL;
454 :
455 : /* Case 0: if we haven't voted yet then we can always vote and reset
456 : to ghost_best.
457 :
458 : https://github.com/anza-xyz/agave/blob/v2.3.7/core/src/consensus.rs#L933-L935 */
459 :
460 0 : if( FD_UNLIKELY( fd_tower_empty( tower ) ) ) {
461 0 : fd_tower_blk_t * fork = fd_tower_blocks_query( blocks, best_blk->slot );
462 0 : fork->voted = 1;
463 0 : fork->voted_block_id = best_blk->id;
464 0 : return (fd_tower_out_t){
465 0 : .flags = flags,
466 0 : .reset_slot = best_blk->slot,
467 0 : .reset_block_id = best_blk->id,
468 0 : .vote_slot = best_blk->slot,
469 0 : .vote_block_id = best_blk->id,
470 0 : .root_slot = push_vote( tower, best_blk->slot )
471 0 : };
472 0 : }
473 :
474 0 : ulong prev_vote_slot = fd_tower_peek_tail_const( tower )->slot;
475 0 : fd_tower_blk_t * prev_vote_fork = fd_tower_blocks_query( blocks, prev_vote_slot ); /* must exist */
476 :
477 0 : fd_hash_t * prev_vote_block_id = &prev_vote_fork->voted_block_id;
478 0 : fd_ghost_blk_t * prev_vote_blk = fd_ghost_query( ghost, prev_vote_block_id );
479 :
480 : /* Case 1: if an ancestor of our prev vote (including prev vote
481 : itself) is an unconfirmed duplicate, then our prev vote was on a
482 : duplicate fork.
483 :
484 : There are two subcases to check. */
485 :
486 0 : int invalid_ancestor = !!fd_ghost_invalid_ancestor( ghost, prev_vote_blk );
487 0 : if( FD_UNLIKELY( invalid_ancestor ) ) { /* do we have an invalid ancestor? */
488 :
489 : /* Case 1a: ghost_best is an ancestor of prev vote. This means
490 : ghost_best is rolling back to an ancestor that precedes the
491 : duplicate ancestor on the same fork as our prev vote. In this
492 : case, we can't vote on our ancestor, but we do reset to that
493 : ancestor.
494 :
495 : https://github.com/anza-xyz/agave/blob/v2.3.7/core/src/consensus.rs#L1016-L1019 */
496 :
497 0 : int ancestor_rollback = prev_vote_blk != best_blk && !!fd_ghost_ancestor( ghost, prev_vote_blk, &best_blk->id );
498 0 : if( FD_LIKELY( ancestor_rollback ) ) {
499 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_ANCESTOR_ROLLBACK );
500 0 : reset_blk = best_blk;
501 0 : }
502 :
503 : /* Case 1b: ghost_best is not an ancestor, but prev_vote is a
504 : duplicate and we've confirmed its duplicate sibling. In this
505 : case, we allow switching to ghost_best without a switch proof.
506 :
507 : Example: slot 5 is a duplicate. We first receive, replay and
508 : vote for block 5, so that is our prev vote. We later receive
509 : block 5' and observe that it is duplicate confirmed. ghost_best
510 : now returns block 5' and we both vote and reset to block 5'
511 : regardless of the switch check.
512 :
513 : https://github.com/anza-xyz/agave/blob/v2.3.7/core/src/consensus.rs#L1021-L1024 */
514 :
515 0 : int sibling_confirmed = prev_vote_fork->confirmed && 0!=memcmp( &prev_vote_fork->voted_block_id, &prev_vote_fork->confirmed_block_id, sizeof(fd_hash_t) );
516 0 : if( FD_LIKELY( sibling_confirmed ) ) {
517 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_SIBLING_CONFIRMED );
518 0 : reset_blk = best_blk;
519 0 : vote_blk = best_blk;
520 0 : }
521 :
522 : /* At this point our prev vote was on a duplicate fork but didn't
523 : match either of the above subcases.
524 :
525 : In this case, we have to pass the switch check to reset to a
526 : different fork from prev vote (same as non-duplicate case). */
527 0 : }
528 :
529 : /* Case 2: if our prev vote slot is an ancestor of the best slot, then
530 : they are on the same fork and we can both reset to it. We can also
531 : vote for it if we pass the can_vote checks.
532 :
533 : https://github.com/anza-xyz/agave/blob/v2.3.7/core/src/consensus.rs#L1057 */
534 :
535 0 : else if( FD_LIKELY( best_blk->slot == prev_vote_slot || fd_tower_blocks_is_slot_ancestor( blocks, best_blk->slot, prev_vote_slot ) ) ) {
536 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_SAME_FORK );
537 0 : reset_blk = best_blk;
538 0 : vote_blk = best_blk;
539 0 : }
540 :
541 : /* Case 3: if our prev vote is not an ancestor of the best block, then
542 : it is on a different fork. If we pass the switch check, we can
543 : reset to it. If we additionally pass the lockout check, we can
544 : also vote for it.
545 :
546 : https://github.com/anza-xyz/agave/blob/v2.3.7/core/src/consensus.rs#L1208-L1215
547 :
548 : Note also Agave uses the best blk's total stake for checking the
549 : threshold.
550 :
551 : https://github.com/anza-xyz/agave/blob/v2.3.7/core/src/consensus/fork_choice.rs#L443-L445 */
552 :
553 0 : else if( FD_LIKELY( switch_check( tower, ghost, blocks, lockos, stakes, best_blk->total_stake, best_blk->slot ) ) ) {
554 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_SWITCH_PASS );
555 0 : reset_blk = best_blk;
556 0 : vote_blk = best_blk;
557 0 : }
558 :
559 : /* Case 4: same as case 3 but we didn't pass the switch check. In
560 : this case we reset to either ghost_best or ghost_deepest beginning
561 : from our prev vote blk.
562 :
563 : We must reset to a block beginning from our prev vote fork to
564 : ensure votes get a chance to propagate. Because in order for votes
565 : to land, someone needs to build a block on that fork.
566 :
567 : We reset to ghost_best or ghost_deepest depending on whether our
568 : prev vote is valid. When it's invalid we use ghost_deepest instead
569 : of ghost_best, because ghost_best won't be able to return a valid
570 : block beginning from our prev_vote because by definition the entire
571 : subtree will be invalid.
572 :
573 : When our prev vote fork is not a duplicate, we want to propagate
574 : votes that might allow others to switch to our fork. In addition,
575 : if our prev vote fork is a duplicate, we want to propagate votes
576 : that might "duplicate confirm" that block (reach 52% of stake).
577 :
578 : See top-level documentation in fd_tower.h for more details on vote
579 : propagation. */
580 :
581 0 : else {
582 :
583 : /* Case 4a: failed switch check and last vote's fork is invalid.
584 :
585 : https://github.com/anza-xyz/agave/blob/v2.3.7/core/src/consensus/heaviest_subtree_fork_choice.rs#L1187 */
586 :
587 0 : if( FD_UNLIKELY( invalid_ancestor ) ) {
588 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_SWITCH_FAIL );
589 0 : reset_blk = fd_ghost_deepest( ghost, prev_vote_blk );
590 0 : }
591 :
592 : /* Case 4b: failed switch check and last vote's fork is valid.
593 :
594 : https://github.com/anza-xyz/agave/blob/v2.3.7/core/src/consensus/fork_choice.rs#L200 */
595 :
596 0 : else {
597 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_SWITCH_FAIL );
598 0 : reset_blk = fd_ghost_best( ghost, prev_vote_blk );
599 0 : }
600 0 : }
601 :
602 : /* If there is a block to vote for, there are a few additional checks
603 : to make sure we can actually vote for it.
604 :
605 : Specifically, we need to make sure we're not locked out, pass the
606 : threshold check and that our previous leader block has propagated
607 : (reached the prop threshold according to fd_votes).
608 :
609 : https://github.com/firedancer-io/agave/blob/master/core/src/consensus/fork_choice.rs#L382-L385
610 :
611 : Agave uses the total stake on the fork being threshold checked
612 : (vote_blk) for determining whether it meets the stake threshold. */
613 :
614 0 : if( FD_LIKELY( vote_blk ) ) {
615 0 : if ( FD_UNLIKELY( !lockout_check( tower, blocks, vote_blk->slot ) ) ) {
616 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_LOCKOUT_FAIL );
617 0 : vote_blk = NULL;
618 0 : }
619 0 : else if( FD_UNLIKELY( !threshold_check( tower, voters, vote_blk->total_stake, vote_blk->slot ) ) ) {
620 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_THRESHOLD_FAIL );
621 0 : vote_blk = NULL;
622 0 : }
623 0 : else if( FD_UNLIKELY( !propagated_check( tower, blocks, vote_blk->slot ) ) ) {
624 0 : flags = fd_uchar_set_bit( flags, FD_TOWER_FLAG_PROPAGATED_FAIL );
625 0 : vote_blk = NULL;
626 0 : }
627 0 : }
628 :
629 0 : FD_TEST( reset_blk ); /* always a reset_blk */
630 0 : fd_tower_out_t out = {
631 0 : .flags = flags,
632 0 : .reset_slot = reset_blk->slot,
633 0 : .reset_block_id = reset_blk->id,
634 0 : .vote_slot = ULONG_MAX,
635 0 : .root_slot = ULONG_MAX
636 0 : };
637 :
638 : /* Finally, if our vote passed all the checks, we actually push the
639 : vote onto the tower. */
640 :
641 0 : if( FD_LIKELY( vote_blk ) ) {
642 0 : out.vote_slot = vote_blk->slot;
643 0 : out.vote_block_id = vote_blk->id;
644 0 : out.root_slot = push_vote( tower, vote_blk->slot );
645 :
646 : /* Query our tower fork for this slot we're voting for. Note this
647 : can never be NULL because we record tower forks as we replay, and
648 : we should never be voting on something we haven't replayed. */
649 :
650 0 : fd_tower_blk_t * fork = fd_tower_blocks_query( blocks, vote_blk->slot );
651 0 : fork->voted = 1;
652 0 : fork->voted_block_id = vote_blk->id;
653 :
654 : /* Query the root slot's block id from tower forks. This block id
655 : may not necessarily be confirmed, because confirmation requires
656 : votes on the block itself (vs. block and its descendants).
657 :
658 : So if we have a confirmed block id, we return that. Otherwise
659 : we return our own vote block id for that slot, which we assume
660 : is the cluster converged on by the time we're rooting it.
661 :
662 : The only way it is possible for us to root the wrong version of
663 : a block (ie. not the one the cluster confirmed) is if there is
664 : mass equivocation (>2/3 of threshold check stake has voted for
665 : two versions of a block). This exceeds the equivocation safety
666 : threshold and we would eventually detect this via a bank hash
667 : mismatch and error out. */
668 :
669 0 : if( FD_LIKELY( out.root_slot!=ULONG_MAX ) ) {
670 0 : fd_tower_blk_t * root_fork = fd_tower_blocks_query( blocks, out.root_slot );
671 0 : out.root_block_id = *fd_ptr_if( root_fork->confirmed, &root_fork->confirmed_block_id, &root_fork->voted_block_id );
672 0 : }
673 0 : }
674 :
675 0 : FD_BASE58_ENCODE_32_BYTES( out.reset_block_id.uc, reset_block_id );
676 0 : FD_BASE58_ENCODE_32_BYTES( out.vote_block_id.uc, vote_block_id );
677 0 : FD_BASE58_ENCODE_32_BYTES( out.root_block_id.uc, root_block_id );
678 0 : FD_LOG_INFO(( "[%s] flags: %d. reset_slot: %lu (%s). vote_slot: %lu (%s). root_slot: %lu (%s).", __func__, out.flags, out.reset_slot, reset_block_id, out.vote_slot, vote_block_id, out.root_slot, root_block_id ));
679 0 : return out;
680 0 : }
681 :
682 : /* fd_tower_reconcile reconciles our local tower with the on-chain
683 : tower inside our vote account. Mirrors what Agave does. Also
684 : updates tower_blocks vote metadata to match the updated tower.
685 :
686 : It's possible prev_vote_fork->voted is not set even though we popped
687 : it from the top of our tower. This can happen when there are
688 : multiple nodes operating with the same vote account.
689 :
690 : In a typical setup involving a primary staked node and backup
691 : unstaked node, the two nodes' towers will usually be identical
692 : but occassionally diverge when one node observes a minority fork
693 : the other doesn't. As a result, one node might be locked out
694 : from voting for a fork that the other node is not.
695 :
696 : This becomes a problem in our primary-backup setup when the
697 : unstaked node is locked out but the staked node is not. The
698 : staked node ultimately lands the vote into the on-chain vote
699 : account, so it's possible when the unstaked node reads back their
700 : on-chain vote account to diff against their local tower, there
701 : are votes in there they themselves did not vote for due to
702 : lockout (fd_tower_reconcile).
703 :
704 : As a result, `voted_block_id` will not be set for slots in their
705 : tower, which normally would be an invariant violation because the
706 : node must have set this value when they voted for the slot (and
707 : pushed it to their tower).
708 :
709 : So here we manually set the voted_block_id to most recent
710 : replayed_block_id if not already set. We know we must have replayed
711 : it, because to observe the on-chain tower you must have replayed all
712 : the slots in the tower.
713 :
714 : When there is equivocation, the backup may have replayed a different
715 : version of a block than what the main validator voted for. In a
716 : regular equivocation scenario, this is a non-issue: two equivocating
717 : blocks will cause different bank hashes, so the vote program will
718 : ensure that we will only see a vote for a certain version of a block
719 : if we have also replayed that version. Thus even though the vote
720 : account only stores slot numbers (not block_ids), reconcile can
721 : assume the version the main validator voted for was the last replayed
722 : version.
723 :
724 : This degenerates in the case where two equivocating blocks have the
725 : same bank hash (rare but possible, see fd_txncache.h for more
726 : details). In this case, the vote program does not distinguish the
727 : difference between votes for the two blocks for absolutely no good
728 : reason.
729 :
730 : Example:
731 :
732 : 2
733 : / \
734 : 3 3' (confirmed)
735 : | |
736 : 5 4
737 :
738 : Assume 3 and 3' have the same bank hash. Both slots 4 and 5 can
739 : contain votes for 3 and 3'. There are two cases depending on whether
740 : the backup is on the duplicate confirmed (DC) fork:
741 :
742 : Case 1: the backup is on the DC fork (replayed 3' first), the main
743 : validator replayed 3 first and voted on it.
744 :
745 : After replaying 4, reconcile sets voted_block_id for slot 3 to
746 : block_id(3'), which is the DC version. This is correct because the
747 : main validator will eventually switch to the DC fork as well.
748 :
749 : Case 2: the backup is on the non DC fork (replayed 3 first), the main
750 : validator replayed 3' first and voted on it.
751 :
752 : Reconcile initially sets voted_block_id for slot 3 to block_id(3), the
753 : non-DC version. After the backup later replays the DC version
754 : (3') and its descendants, voted_block_id will still point to the
755 : non-DC version because no new reconcile is triggered. This is
756 : handled by case 1b in fd_tower_vote_and_reset (sibling confirmed):
757 : voted_block_id != confirmed_block_id triggers a free switch to
758 : the DC fork without needing a switch proof. This is the same
759 : codepath a staked validator uses when it votes for a non-DC
760 : version and later observes duplicate confirmation of the sibling.
761 :
762 : Unsupported case: the main validator votes down a minority fork
763 : that the backup never observes. The backup could cast conflicting
764 : votes that violate lockout. Handling this would require additional
765 : codepaths (tower file, communication channel between backup and
766 : main, or gossip vote tracking) that are fully removed with
767 : Alpenglow, so we do not handle this case. */
768 : void
769 : fd_tower_reconcile( fd_tower_t * tower,
770 : ulong tower_root,
771 : uchar const * vote_account_data,
772 6 : fd_tower_blocks_t * tower_blocks ) {
773 6 : ulong on_chain_vote = fd_vote_acc_vote_slot( vote_account_data );
774 6 : ulong on_chain_root = fd_vote_acc_root_slot( vote_account_data );
775 :
776 6 : fd_tower_vote_t const * last_vote = fd_tower_peek_tail_const( tower );
777 6 : ulong last_vote_slot = last_vote ? last_vote->slot : ULONG_MAX;
778 :
779 6 : if( FD_UNLIKELY( ( on_chain_vote==ULONG_MAX && last_vote_slot==ULONG_MAX ) ) ) return;
780 6 : if( FD_LIKELY ( ( on_chain_vote!=ULONG_MAX && last_vote_slot!=ULONG_MAX
781 6 : && on_chain_vote <= last_vote_slot ) ) ) return;
782 :
783 : /* At this point our local tower is older than the on-chain tower, and
784 : we need to replace it with our on-chain tower. This mirrors the
785 : Agave logic:
786 : https://github.com/firedancer-io/agave/blob/master/core/src/replay_stage.rs#L3690-L3719
787 :
788 : TODO: pass on_chain_tower instead of raw vote account data to
789 : mirror agave logic one to one. */
790 :
791 24 : while( FD_LIKELY( !fd_tower_empty( tower ) ) ) {
792 18 : fd_tower_vote_t old_vote = fd_tower_pop_head( tower );
793 18 : fd_tower_blk_t * vote_blk = fd_tower_blocks_query( tower_blocks, old_vote.slot );
794 18 : if( FD_LIKELY( vote_blk ) ) vote_blk->voted = 0;
795 18 : }
796 :
797 6 : fd_vote_acc_t const * voter = (fd_vote_acc_t const *)fd_type_pun_const( vote_account_data );
798 6 : uint kind = fd_uint_load_4_fast( vote_account_data ); /* skip node_pubkey */
799 114 : for( ulong i=0; i<fd_vote_acc_vote_cnt( vote_account_data ); i++ ) {
800 108 : switch( kind ) {
801 0 : case FD_VOTE_ACC_V4: fd_tower_push_tail( tower, (fd_tower_vote_t){ .slot = v4_off( voter )[i].slot, .conf = v4_off( voter )[i].conf } ); break;
802 108 : case FD_VOTE_ACC_V3: fd_tower_push_tail( tower, (fd_tower_vote_t){ .slot = voter->v3.votes[i].slot, .conf = voter->v3.votes[i].conf } ); break;
803 0 : case FD_VOTE_ACC_V2: fd_tower_push_tail( tower, (fd_tower_vote_t){ .slot = voter->v2.votes[i].slot, .conf = voter->v2.votes[i].conf } ); break;
804 0 : default: FD_LOG_ERR(( "unknown kind: %u", kind ));
805 108 : }
806 108 : fd_tower_vote_t * new_vote = fd_tower_peek_tail( tower );
807 108 : fd_tower_blk_t * vote_blk = fd_tower_blocks_query( tower_blocks, new_vote->slot );
808 :
809 108 : if( FD_LIKELY( vote_blk ) ) {
810 108 : vote_blk->voted = 1;
811 108 : vote_blk->voted_block_id = vote_blk->replayed_block_id;
812 108 : }
813 108 : }
814 :
815 : /* It's possible our local root is newer than the on-chain root
816 : (even though the tower is older). The most likely reason is
817 : booting from a snapshot where snapshot slot > on-chain root.
818 : Filter out stale votes <= local root.
819 :
820 : After filtering, the tower may be empty if all on-chain votes
821 : were below our local root. In Agave this is handled by falling
822 : back to local_root as the last_voted_slot. Here it is fine to
823 : leave the tower empty because vote_and_reset handles an empty
824 : tower (case 0).
825 :
826 : There's no need to clear voted metadata in tower_blocks, which
827 : breaks our invariant that voted==1 iff slot is in tower. This is
828 : fine because pruning from tower_blocks is asynchronous with the
829 : tower. */
830 :
831 6 : if( FD_LIKELY( on_chain_root == ULONG_MAX || tower_root > on_chain_root ) ) {
832 3 : while( FD_LIKELY( !fd_tower_empty( tower ) ) ) {
833 3 : fd_tower_t const * vote = fd_tower_peek_head_const( tower );
834 3 : if( FD_LIKELY( vote->slot > tower_root ) ) break;
835 0 : fd_tower_pop_head( tower );
836 0 : }
837 3 : }
838 : /* Note its also possible the on_chain_root is ahead of our local
839 : root. In this case, our local root is technically !voted now, since
840 : we have updated our tower to match the on-chain tower. But this is
841 : not a problem because the next vote we make will pop the
842 : on_chain_root which we set above voted=1. */
843 6 : }
844 :
845 : void
846 : fd_tower_from_vote_acc( fd_tower_t * tower,
847 : ulong * root,
848 132 : uchar const * vote_acc ) {
849 132 : fd_vote_acc_t const * voter = (fd_vote_acc_t const *)fd_type_pun_const( vote_acc );
850 132 : uint kind = fd_uint_load_4_fast( vote_acc ); /* skip node_pubkey */
851 444 : for( ulong i=0; i<fd_vote_acc_vote_cnt( vote_acc ); i++ ) {
852 312 : switch( kind ) {
853 0 : case FD_VOTE_ACC_V4: fd_tower_push_tail( tower, (fd_tower_vote_t){ .slot = v4_off( voter )[i].slot, .conf = v4_off( voter )[i].conf } ); break;
854 219 : case FD_VOTE_ACC_V3: fd_tower_push_tail( tower, (fd_tower_vote_t){ .slot = voter->v3.votes[i].slot, .conf = voter->v3.votes[i].conf } ); break;
855 93 : case FD_VOTE_ACC_V2: fd_tower_push_tail( tower, (fd_tower_vote_t){ .slot = voter->v2.votes[i].slot, .conf = voter->v2.votes[i].conf } ); break;
856 0 : default: FD_LOG_ERR(( "unsupported voter account version: %u", kind ));
857 312 : }
858 312 : }
859 132 : *root = fd_vote_acc_root_slot( vote_acc );
860 132 : }
861 :
862 : ulong
863 : fd_tower_with_lat_from_vote_acc( fd_vote_acc_vote_t tower[ static FD_TOWER_VOTE_MAX ],
864 0 : uchar const * vote_acc ) {
865 0 : fd_vote_acc_t const * voter = (fd_vote_acc_t const *)fd_type_pun_const( vote_acc );
866 0 : uint kind = fd_uint_load_4_fast( vote_acc ); /* skip node_pubkey */
867 0 : for( ulong i=0; i<fd_vote_acc_vote_cnt( vote_acc ); i++ ) {
868 0 : switch( kind ) {
869 0 : case FD_VOTE_ACC_V4: tower[ i ] = (fd_vote_acc_vote_t){ .latency = v4_off( voter )[i].latency, .slot = v4_off( voter )[i].slot, .conf = v4_off( voter )[i].conf }; break;
870 0 : case FD_VOTE_ACC_V3: tower[ i ] = (fd_vote_acc_vote_t){ .latency = voter->v3.votes[i].latency, .slot = voter->v3.votes[i].slot, .conf = voter->v3.votes[i].conf }; break;
871 0 : case FD_VOTE_ACC_V2: tower[ i ] = (fd_vote_acc_vote_t){ .latency = UCHAR_MAX, .slot = voter->v2.votes[i].slot, .conf = voter->v2.votes[i].conf }; break;
872 0 : default: FD_LOG_ERR(( "unsupported voter account version: %u", kind ));
873 0 : }
874 0 : }
875 :
876 0 : return fd_vote_acc_vote_cnt( vote_acc );
877 0 : }
878 :
879 : void
880 : fd_tower_to_vote_txn( fd_tower_t const * tower,
881 : ulong root,
882 : fd_hash_t const * bank_hash,
883 : fd_hash_t const * block_id,
884 : fd_hash_t const * recent_blockhash,
885 : fd_pubkey_t const * validator_identity,
886 : fd_pubkey_t const * vote_authority,
887 : fd_pubkey_t const * vote_acc,
888 27 : fd_txn_p_t * vote_txn ) {
889 :
890 27 : FD_TEST( fd_tower_cnt( tower )<=FD_TOWER_VOTE_MAX );
891 27 : fd_compact_tower_sync_serde_t tower_sync_serde = {
892 27 : .root = fd_ulong_if( root == ULONG_MAX, 0UL, root ),
893 27 : .lockouts_cnt = (ushort)fd_tower_cnt( tower ),
894 : /* .lockouts populated below */
895 27 : .hash = *bank_hash,
896 27 : .timestamp_option = 1,
897 27 : .timestamp = fd_log_wallclock() / (long)1e9, /* seconds */
898 27 : .block_id = *block_id
899 27 : };
900 :
901 27 : ulong i = 0UL;
902 27 : ulong prev = tower_sync_serde.root;
903 27 : for( fd_tower_iter_t iter = fd_tower_iter_init( tower );
904 246 : !fd_tower_iter_done( tower, iter );
905 219 : iter = fd_tower_iter_next( tower, iter ) ) {
906 219 : fd_tower_t const * vote = fd_tower_iter_ele_const( tower, iter );
907 219 : tower_sync_serde.lockouts[i].offset = vote->slot - prev;
908 219 : tower_sync_serde.lockouts[i].confirmation_count = (uchar)vote->conf;
909 219 : prev = vote->slot;
910 219 : i++;
911 219 : }
912 :
913 27 : uchar * txn_out = vote_txn->payload;
914 27 : uchar * txn_meta_out = vote_txn->_;
915 :
916 27 : int same_addr = !memcmp( validator_identity, vote_authority, sizeof(fd_pubkey_t) );
917 27 : if( FD_LIKELY( same_addr ) ) {
918 :
919 : /* 0: validator identity
920 : 1: vote account address
921 : 2: vote program */
922 :
923 27 : fd_txn_accounts_t votes;
924 27 : votes.signature_cnt = 1;
925 27 : votes.readonly_signed_cnt = 0;
926 27 : votes.readonly_unsigned_cnt = 1;
927 27 : votes.acct_cnt = 3;
928 27 : votes.signers_w = validator_identity;
929 27 : votes.signers_r = NULL;
930 27 : votes.non_signers_w = vote_acc;
931 27 : votes.non_signers_r = &fd_solana_vote_program_id;
932 27 : FD_TEST( fd_txn_base_generate( txn_meta_out, txn_out, votes.signature_cnt, &votes, recent_blockhash->uc ) );
933 :
934 27 : } else {
935 :
936 : /* 0: validator identity
937 : 1: vote authority
938 : 2: vote account address
939 : 3: vote program */
940 :
941 0 : fd_txn_accounts_t votes;
942 0 : votes.signature_cnt = 2;
943 0 : votes.readonly_signed_cnt = 1;
944 0 : votes.readonly_unsigned_cnt = 1;
945 0 : votes.acct_cnt = 4;
946 0 : votes.signers_w = validator_identity;
947 0 : votes.signers_r = vote_authority;
948 0 : votes.non_signers_w = vote_acc;
949 0 : votes.non_signers_r = &fd_solana_vote_program_id;
950 0 : FD_TEST( fd_txn_base_generate( txn_meta_out, txn_out, votes.signature_cnt, &votes, recent_blockhash->uc ) );
951 0 : }
952 :
953 : /* Add the vote instruction to the transaction. */
954 :
955 27 : uchar vote_ix_buf[FD_TXN_MTU];
956 27 : ulong vote_ix_sz = 0;
957 27 : FD_STORE( uint, vote_ix_buf, FD_VOTE_IX_KIND_TOWER_SYNC );
958 27 : FD_TEST( 0==fd_compact_tower_sync_ser( &tower_sync_serde, vote_ix_buf + sizeof(uint), FD_TXN_MTU - sizeof(uint), &vote_ix_sz ) ); // cannot fail if fd_tower_cnt( tower ) <= FD_TOWER_VOTE_MAX
959 27 : vote_ix_sz += sizeof(uint);
960 27 : uchar program_id;
961 27 : uchar ix_accs[2];
962 27 : if( FD_LIKELY( same_addr ) ) {
963 27 : ix_accs[0] = 1; /* vote account address */
964 27 : ix_accs[1] = 0; /* vote authority */
965 27 : program_id = 2; /* vote program */
966 27 : } else {
967 0 : ix_accs[0] = 2; /* vote account address */
968 0 : ix_accs[1] = 1; /* vote authority */
969 0 : program_id = 3; /* vote program */
970 0 : }
971 27 : vote_txn->payload_sz = fd_txn_add_instr( txn_meta_out, txn_out, program_id, ix_accs, 2, vote_ix_buf, vote_ix_sz );
972 27 : }
973 :
974 : int
975 0 : fd_tower_verify( fd_tower_t const * tower ) {
976 0 : if( FD_UNLIKELY( fd_tower_cnt( tower )>=FD_TOWER_VOTE_MAX ) ) {
977 0 : FD_LOG_WARNING(( "[%s] invariant violation: cnt %lu >= FD_TOWER_VOTE_MAX %lu", __func__, fd_tower_cnt( tower ), (ulong)FD_TOWER_VOTE_MAX ));
978 0 : return -1;
979 0 : }
980 :
981 0 : fd_tower_t const * prev = NULL;
982 0 : for( fd_tower_iter_t iter = fd_tower_iter_init( tower );
983 0 : !fd_tower_iter_done( tower, iter );
984 0 : iter = fd_tower_iter_next( tower, iter ) ) {
985 0 : fd_tower_t const * vote = fd_tower_iter_ele_const( tower, iter );
986 0 : if( FD_UNLIKELY( prev && ( vote->slot < prev->slot || vote->conf < prev->conf ) ) ) {
987 0 : FD_LOG_WARNING(( "[%s] invariant violation: vote (slot:%lu conf:%lu) prev (slot:%lu conf:%lu)", __func__, vote->slot, vote->conf, prev->slot, prev->conf ));
988 0 : return -1;
989 0 : }
990 0 : prev = vote;
991 0 : }
992 0 : return 0;
993 0 : }
994 :
995 : static void
996 0 : to_cstr( fd_tower_t const * tower, ulong root, char * s, ulong len ) {
997 0 : ulong off = 0;
998 0 : int n;
999 :
1000 0 : n = snprintf( s + off, len - off, "[Tower]\n\n" );
1001 0 : if( FD_UNLIKELY( n < 0 )) FD_LOG_CRIT(( "snprintf: %d", n ));
1002 0 : off += (ulong)n;
1003 :
1004 0 : if( FD_UNLIKELY( fd_tower_empty( tower ) ) ) return;
1005 :
1006 0 : ulong max_slot = 0;
1007 :
1008 : /* Determine spacing. */
1009 :
1010 0 : for( fd_tower_iter_t iter = fd_tower_iter_init_rev( tower );
1011 0 : !fd_tower_iter_done_rev( tower, iter );
1012 0 : iter = fd_tower_iter_prev ( tower, iter ) ) {
1013 0 : max_slot = fd_ulong_max( max_slot, fd_tower_iter_ele_const( tower, iter )->slot );
1014 0 : }
1015 :
1016 : /* Calculate the number of digits in the maximum slot value. */
1017 :
1018 :
1019 0 : int digit_cnt = (int)fd_ulong_base10_dig_cnt( max_slot );
1020 :
1021 : /* Print the column headers. */
1022 :
1023 0 : if( off < len ) {
1024 0 : n = snprintf( s + off, len - off, "slot%*s | %s\n", digit_cnt - (int)strlen("slot"), "", "confirmation count" );
1025 0 : if( FD_UNLIKELY( n < 0 )) FD_LOG_CRIT(( "snprintf: %d", n ));
1026 0 : off += (ulong)n;
1027 0 : }
1028 :
1029 : /* Print the divider line. */
1030 :
1031 0 : for( int i = 0; i < digit_cnt && off < len; i++ ) {
1032 0 : s[off++] = '-';
1033 0 : }
1034 0 : if( off < len ) {
1035 0 : n = snprintf( s + off, len - off, " | " );
1036 0 : if( FD_UNLIKELY( n < 0 )) FD_LOG_CRIT(( "snprintf: %d", n ));
1037 0 : off += (ulong)n;
1038 0 : }
1039 0 : for( ulong i = 0; i < strlen( "confirmation count" ) && off < len; i++ ) {
1040 0 : s[off++] = '-';
1041 0 : }
1042 0 : if( off < len ) {
1043 0 : s[off++] = '\n';
1044 0 : }
1045 :
1046 : /* Print each vote as a table. */
1047 :
1048 0 : for( fd_tower_iter_t iter = fd_tower_iter_init_rev( tower );
1049 0 : !fd_tower_iter_done_rev( tower, iter );
1050 0 : iter = fd_tower_iter_prev ( tower, iter ) ) {
1051 0 : fd_tower_t const * vote = fd_tower_iter_ele_const( tower, iter );
1052 0 : if( off < len ) {
1053 0 : n = snprintf( s + off, len - off, "%*lu | %lu\n", digit_cnt, vote->slot, vote->conf );
1054 0 : if( FD_UNLIKELY( n < 0 )) FD_LOG_CRIT(( "snprintf: %d", n ));
1055 0 : off += (ulong)n;
1056 0 : }
1057 0 : }
1058 :
1059 0 : if( FD_UNLIKELY( root == ULONG_MAX ) ) {
1060 0 : if( off < len ) {
1061 0 : n = snprintf( s + off, len - off, "%*s | root\n", digit_cnt, "NULL" );
1062 0 : if( FD_UNLIKELY( n < 0 )) FD_LOG_CRIT(( "snprintf: %d", n ));
1063 0 : off += (ulong)n;
1064 0 : }
1065 0 : } else {
1066 0 : if( off < len ) {
1067 0 : n = snprintf( s + off, len - off, "%*lu | root\n", digit_cnt, root );
1068 0 : if( FD_UNLIKELY( n < 0 )) FD_LOG_CRIT(( "snprintf: %d", n ));
1069 0 : off += (ulong)n;
1070 0 : }
1071 0 : }
1072 :
1073 : /* Ensure null termination */
1074 0 : if( off < len ) {
1075 0 : s[off] = '\0';
1076 0 : } else {
1077 0 : s[len - 1] = '\0';
1078 0 : }
1079 0 : }
1080 :
1081 : char *
1082 : fd_tower_to_cstr( fd_tower_t const * tower,
1083 : ulong root,
1084 0 : char * cstr ) {
1085 0 : to_cstr( tower, root, cstr, FD_TOWER_CSTR_MIN );
1086 0 : return cstr;
1087 0 : }
|