Line data Source code
1 : #define _GNU_SOURCE
2 :
3 : /* Let's say there was a computer, the "leader" computer, that acted as
4 : a bank. Users could send it messages saying they wanted to deposit
5 : money, or transfer it to someone else.
6 :
7 : That's how, for example, Bank of America works but there are problems
8 : with it. One simple problem is: the bank can set your balance to
9 : zero if they don't like you.
10 :
11 : You could try to fix this by having the bank periodically publish the
12 : list of all account balances and transactions. If the customers add
13 : unforgeable signatures to their deposit slips and transfers, then
14 : the bank cannot zero a balance without it being obvious to everyone.
15 :
16 : There's still problems. The bank can't lie about your balance now or
17 : take your money, but it can just not accept deposits on your behalf
18 : by ignoring you.
19 :
20 : You could fix this by getting a few independent banks together, lets
21 : say Bank of America, Bank of England, and Westpac, and having them
22 : rotate who operates the leader computer periodically. If one bank
23 : ignores your deposits, you can just wait and send them to the next
24 : one.
25 :
26 : This is Solana.
27 :
28 : There's still problems of course but they are largely technical. How
29 : do the banks agree who is leader? How do you recover if a leader
30 : misbehaves? How do customers verify the transactions aren't forged?
31 : How do banks receive and publish and verify each others work quickly?
32 : These are the main technical innovations that enable Solana to work
33 : well.
34 :
35 : What about Proof of History?
36 :
37 : One particular niche problem is about the leader schedule. When the
38 : leader computer is moving from one bank to another, the new bank must
39 : wait for the old bank to say it's done and provide a final list of
40 : balances that it can start working off of. But: what if the computer
41 : at the old bank crashes and never says its done?
42 :
43 : Does the new leader just take over at some point? What if the new
44 : leader is malicious, and says the past thousand leaders crashed, and
45 : there have been no transactions for days? How do you check?
46 :
47 : This is what Proof of History solves. Each bank in the network must
48 : constantly do a lot of busywork (compute hashes), even when it is not
49 : leader.
50 :
51 : If the prior thousand leaders crashed, and no transactions happened
52 : in an hour, the new leader would have to show they did about an hour
53 : of busywork for everyone else to believe them.
54 :
55 : A better name for this is proof of skipping. If a leader is skipping
56 : slots (building off of a slot that is not the direct parent), it must
57 : prove that it waited a good amount of time to do so.
58 :
59 : It's not a perfect solution. For one thing, some banks have really
60 : fast computers and can compute a lot of busywork in a short amount of
61 : time, allowing them to skip prior slot(s) anyway. But: there is a
62 : social component that prevents validators from skipping the prior
63 : leader slot. It is easy to detect when this happens and the network
64 : could respond by ignoring their votes or stake.
65 :
66 : You could come up with other schemes: for example, the network could
67 : just use wall clock time. If a new leader publishes a block without
68 : waiting 400 milliseconds for the prior slot to complete, then there
69 : is no "proof of skipping" and the nodes ignore the slot.
70 :
71 : These schemes have a problem in that they are not deterministic
72 : across the network (different computers have different clocks), and
73 : so they will cause frequent forks which are very expensive to
74 : resolve. Even though the proof of history scheme is not perfect,
75 : it is better than any alternative which is not deterministic.
76 :
77 : With all that background, we can now describe at a high level what
78 : this PoH tile actually does,
79 :
80 : (1) Whenever any other leader in the network finishes a slot, and
81 : the slot is determined to be the best one to build off of, this
82 : tile gets "reset" onto that block, the so called "reset slot".
83 :
84 : (2) The tile is constantly doing busy work, hash(hash(hash(...))) on
85 : top of the last reset slot, even when it is not leader.
86 :
87 : (3) When the tile becomes leader, it continues hashing from where it
88 : was. Typically, the prior leader finishes their slot, so the
89 : reset slot will be the parent one, and this tile only publishes
90 : hashes for its own slot. But if prior slots were skipped, then
91 : there might be a whole chain already waiting.
92 :
93 : That's pretty much it. When we are leader, in addition to doing
94 : busywork, we publish ticks and microblocks to the shred tile. A
95 : microblock is a non-empty group of transactions whose hashes are
96 : mixed-in to the chain, while a tick is a periodic stamp of the
97 : current hash, with no transactions (nothing mixed in). We need
98 : to send both to the shred tile, as ticks are important for other
99 : validators to verify in parallel.
100 :
101 : As well, the tile should never become leader for a slot that it has
102 : published anything for, otherwise it may create a duplicate block.
103 :
104 : Some particularly common misunderstandings:
105 :
106 : - PoH is critical to security.
107 :
108 : This largely isn't true. The target hash rate of the network is
109 : so slow (1 hash per 500 nanoseconds) that a malicious leader can
110 : easily catch up if they start from an old hash, and the only
111 : practical attack prevented is the proof of skipping. Most of the
112 : long range attacks in the Solana whitepaper are not relevant.
113 :
114 : - PoH keeps passage of time.
115 :
116 : This is also not true. The way the network keeps time so it can
117 : decide who is leader is that, each leader uses their operating
118 : system clock to time 400 milliseconds and publishes their block
119 : when this timer expires.
120 :
121 : If a leader just hashed as fast as they could, they could publish
122 : a block in tens of milliseconds, and the rest of the network
123 : would happily accept it. This is why the Solana "clock" as
124 : determined by PoH is not accurate and drifts over time.
125 :
126 : - PoH prevents transaction reordering by the leader.
127 :
128 : The leader can, in theory, wait until the very end of their
129 : leader slot to publish anything at all to the network. They can,
130 : in particular, hold all received transactions for 400
131 : milliseconds and then reorder and publish some right at the end
132 : to advantage certain transactions.
133 :
134 : You might be wondering... if all the PoH chain is helping us do is
135 : prove that slots were skipped correctly, why do we need to "mix in"
136 : transactions to the hash value? Or do anything at all for slots
137 : where we don't skip the prior slot?
138 :
139 : It's a good question, and the answer is that this behavior is not
140 : necessary. An ideal implementation of PoH have no concept of ticks
141 : or mixins, and would not be part of the TPU pipeline at all.
142 : Instead, there would be a simple field "skip_proof" on the last
143 : shred we send for a slot, the hash(hash(...)) value. This field
144 : would only be filled in (and only verified by replayers) in cases
145 : where the slot actually skipped a parent.
146 :
147 : Then what is the "clock? In Solana, time is constructed as follows:
148 :
149 : HASHES
150 :
151 : The base unit of time is a hash. Hereafter, any values whose
152 : units are in hashes are called a "hashcnt" to distinguish them
153 : from actual hashed values.
154 :
155 : Agave generally defines a constant duration for each tick
156 : (see below) and then varies the number of hashcnt per tick, but
157 : as we consider the hashcnt the base unit of time, Firedancer and
158 : this PoH implementation defines everything in terms of hashcnt
159 : duration instead.
160 :
161 : In mainnet-beta, testnet, and devnet the hashcnt ticks over
162 : (increments) every 100 nanoseconds. The hashcnt rate is
163 : specified as 500 nanoseconds according to the genesis, but there
164 : are several features which increase the number of hashes per
165 : tick while keeping tick duration constant, which make the time
166 : per hashcnt lower. These features up to and including the
167 : `update_hashes_per_tick6` feature are activated on mainnet-beta,
168 : devnet, and testnet, and are described in the TICKS section
169 : below.
170 :
171 : Other chains and development environments might have a different
172 : hashcnt rate in the genesis, or they might not have activated
173 : the features which increase the rate yet, which we also support.
174 :
175 : In practice, although each validator follows a hashcnt rate of
176 : 100 nanoseconds, the overall observed hashcnt rate of the
177 : network is a little slower than once every 100 nanoseconds,
178 : mostly because there are gaps and clock synchronization issues
179 : during handoff between leaders. This is referred to as clock
180 : drift.
181 :
182 : TICKS
183 :
184 : The leader needs to periodically checkpoint the hash value
185 : associated with a given hashcnt so that they can publish it to
186 : other nodes for verification.
187 :
188 : On mainnet-beta, testnet, and devnet this occurs once every
189 : 62,500 hashcnts, or approximately once every 6.4 microseconds.
190 : This value is determined at genesis time, and according to the
191 : features below, and could be different in development
192 : environments or on other chains which we support.
193 :
194 : Due to protocol limitations, when mixing in transactions to the
195 : proof-of-history chain, it cannot occur on a tick boundary (but
196 : can occur at any other hashcnt).
197 :
198 : Ticks exist mainly so that verification can happen in parallel.
199 : A verifier computer, rather than needing to do hash(hash(...))
200 : all in sequence to verify a proof-of-history chain, can do,
201 :
202 : Core 0: hash(hash(...))
203 : Core 1: hash(hash(...))
204 : Core 2: hash(hash(...))
205 : Core 3: hash(hash(...))
206 : ...
207 :
208 : Between each pair of tick boundaries.
209 :
210 : Solana sometimes calls the current tick the "tick height",
211 : although it makes more sense to think of it as a counter from
212 : zero, it's just the number of ticks since the genesis hash.
213 :
214 : There is a set of features which increase the number of hashcnts
215 : per tick. These are all deployed on mainnet-beta, devnet, and
216 : testnet.
217 :
218 : name: update_hashes_per_tick
219 : id: 3uFHb9oKdGfgZGJK9EHaAXN4USvnQtAFC13Fh5gGFS5B
220 : hashes per tick: 12,500
221 : hashcnt duration: 500 nanos
222 :
223 : name: update_hashes_per_tick2
224 : id: EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU
225 : hashes per tick: 17,500
226 : hashcnt duration: 357.142857143 nanos
227 :
228 : name: update_hashes_per_tick3
229 : id: 8C8MCtsab5SsfammbzvYz65HHauuUYdbY2DZ4sznH6h5
230 : hashes per tick: 27,500
231 : hashcnt duration: 227.272727273 nanos
232 :
233 : name: update_hashes_per_tick4
234 : id: 8We4E7DPwF2WfAN8tRTtWQNhi98B99Qpuj7JoZ3Aikgg
235 : hashes per tick: 47,500
236 : hashcnt duration: 131.578947368 nanos
237 :
238 : name: update_hashes_per_tick5
239 : id: BsKLKAn1WM4HVhPRDsjosmqSg2J8Tq5xP2s2daDS6Ni4
240 : hashes per tick: 57,500
241 : hashcnt duration: 108.695652174 nanos
242 :
243 : name: update_hashes_per_tick6
244 : id: FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv
245 : hashes per tick: 62,500
246 : hashcnt duration: 100 nanos
247 :
248 : In development environments, there is a way to configure the
249 : hashcnt per tick to be "none" during genesis, for a so-called
250 : "low power" tick producer. The idea is not to spin cores during
251 : development. This is equivalent to setting the hashcnt per tick
252 : to be 1, and increasing the hashcnt duration to the desired tick
253 : duration.
254 :
255 : SLOTS
256 :
257 : Each leader needs to be leader for a fixed amount of time, which
258 : is called a slot. During a slot, a leader has an opportunity to
259 : receive transactions and produce a block for the network,
260 : although they may miss ("skip") the slot if they are offline or
261 : not behaving.
262 :
263 : In mainnet-beta, testnet, and devnet a slot is 64 ticks, or
264 : 4,000,000 hashcnts, or approximately 400 milliseconds.
265 :
266 : Due to the way the leader schedule is constructed, each leader
267 : is always given at least four (4) consecutive slots in the
268 : schedule. This means when becoming leader you will be leader
269 : for at least 4 slots, or 1.6 seconds.
270 :
271 : It is rare, although can happen that a leader gets more than 4
272 : consecutive slots (eg, 8, or 12), if they are lucky with the
273 : leader schedule generation.
274 :
275 : The number of ticks in a slot is fixed at genesis time, and
276 : could be different for development or other chains, which we
277 : support. There is nothing special about 4 leader slots in a
278 : row, and this might be changed in future, and the proof of
279 : history makes no assumptions that this is the case.
280 :
281 : EPOCHS
282 :
283 : Infrequently, the network needs to do certain housekeeping,
284 : mainly things like collecting rent and deciding on the leader
285 : schedule. The length of an epoch is fixed on mainnet-beta,
286 : devnet and testnet at 420,000 slots, or around ~2 (1.94) days.
287 : This value is fixed at genesis time, and could be different for
288 : other chains including development, which we support. Typically
289 : in development, epochs are every 8,192 slots, or around ~1 hour
290 : (54.61 minutes), although it depends on the number of ticks per
291 : slot and the target hashcnt rate of the genesis as well.
292 :
293 : In development, epochs need not be a fixed length either. There
294 : is a "warmup" option, where epochs start short and grow, which
295 : is useful for quickly warming up stake during development.
296 :
297 : The epoch is important because it is the only time the leader
298 : schedule is updated. The leader schedule is a list of which
299 : leader is leader for which slot, and is generated by a special
300 : algorithm that is deterministic and known to all nodes.
301 :
302 : The leader schedule is computed one epoch in advance, so that
303 : at slot T, we always know who will be leader up until the end
304 : of slot T+EPOCH_LENGTH. Specifically, the leader schedule for
305 : epoch N is computed during the epoch boundary crossing from
306 : N-2 to N-1. For mainnet-beta, the slots per epoch is fixed and
307 : will always be 420,000. */
308 :
309 : #include "../../disco/tiles.h"
310 : #include "../../disco/fd_txn_m.h"
311 : #include "../../disco/bundle/fd_bundle_crank.h"
312 : #include "../../disco/pack/fd_pack.h"
313 : #include "../../disco/pack/fd_pack_cost.h"
314 : #include "../../ballet/sha256/fd_sha256.h"
315 : #include "../../disco/metrics/fd_metrics.h"
316 : #include "../../util/pod/fd_pod.h"
317 : #include "../../disco/shred/fd_shredder.h"
318 : #include "../../disco/keyguard/fd_keyload.h"
319 : #include "../../disco/keyguard/fd_keyswitch.h"
320 : #include "../../disco/plugin/fd_plugin.h"
321 : #include "../../flamenco/leaders/fd_multi_epoch_leaders.h"
322 :
323 : #include <string.h>
324 :
325 : /* The maximum number of microblocks that pack is allowed to pack into a
326 : single slot. This is not consensus critical, and pack could, if we
327 : let it, produce as many microblocks as it wants, and the slot would
328 : still be valid.
329 :
330 : We have this here instead so that PoH can estimate slot completion,
331 : and keep the hashcnt up to date as pack progresses through packing
332 : the slot. If this upper bound was not enforced, PoH could tick to
333 : the last hash of the slot and have no hashes left to mixin incoming
334 : microblocks from pack, so this upper bound is a coordination
335 : mechanism so that PoH can progress hashcnts while the slot is active,
336 : and know that pack will not need those hashcnts later to do mixins. */
337 0 : #define MAX_MICROBLOCKS_PER_SLOT (131072UL)
338 :
339 : /* When we are hashing in the background in case a prior leader skips
340 : their slot, we need to store the result of each tick hash so we can
341 : publish them when we become leader. The network requires at least
342 : one leader slot to publish in each epoch for the leader schedule to
343 : generate, so in the worst case we might need two full epochs of slots
344 : to store the hashes. (Eg, if epoch T only had a published slot in
345 : position 0 and epoch T+1 only had a published slot right at the end).
346 :
347 : There is a tighter bound: the block data limit of mainnet-beta is
348 : currently FD_PACK_MAX_DATA_PER_BLOCK, or 27,332,342 bytes per slot.
349 : At 48 bytes per tick, it is not possible to publish a slot that skips
350 : 569,424 or more prior slots. */
351 0 : #define MAX_SKIPPED_TICKS (1UL+(FD_PACK_MAX_DATA_PER_BLOCK/48UL))
352 :
353 0 : #define IN_KIND_BANK (0)
354 0 : #define IN_KIND_PACK (1)
355 0 : #define IN_KIND_EPOCH (2)
356 :
357 :
358 : struct fd_pohh_in {
359 : fd_wksp_t * mem;
360 : ulong chunk0;
361 : ulong wmark;
362 : };
363 :
364 : typedef struct fd_pohh_in fd_pohh_in_t;
365 :
366 : struct fd_pohh_out {
367 : ulong idx;
368 : fd_wksp_t * mem;
369 : ulong chunk0;
370 : ulong wmark;
371 : ulong chunk;
372 : };
373 :
374 : typedef struct fd_pohh_out fd_pohh_out_t;
375 :
376 : struct fd_pohh_tile {
377 : fd_stem_context_t * stem;
378 :
379 : /* Static configuration determined at genesis creation time. See
380 : long comment above for more information. */
381 : ulong tick_duration_ns;
382 : ulong hashcnt_per_tick;
383 : ulong ticks_per_slot;
384 :
385 : /* Derived from the above configuration, but we precompute it. */
386 : double slot_duration_ns;
387 : double hashcnt_duration_ns;
388 : ulong hashcnt_per_slot;
389 :
390 : /* The maximum number of real microblocks that the pack tile is
391 : allowed to publish in each slot.
392 :
393 : While we are leader, PoH internally treats this limit as having
394 : one extra phantom "microblock" reserved for the done_packing
395 : message, so that PoH does not finish the slot before pack
396 : confirms it is done. Pack itself is configured with the
397 : un-inflated limit and never publishes more than this many real
398 : microblocks per slot. */
399 : ulong max_microblocks_per_slot;
400 :
401 : /* Consensus-critical slot cost limits. */
402 : struct {
403 : ulong slot_max_cost;
404 : ulong slot_max_vote_cost;
405 : ulong slot_max_write_cost_per_acct;
406 : } limits;
407 :
408 : /* The current slot and hashcnt within that slot of the proof of
409 : history, including hashes we have been producing in the background
410 : while waiting for our next leader slot. */
411 : ulong slot;
412 : ulong hashcnt;
413 : ulong cus_used;
414 :
415 : /* When we send a microblock on to the shred tile, we need to tell
416 : it how many hashes there have been since the last microblock, so
417 : this tracks the hashcnt of the last published microblock.
418 :
419 : If we are skipping slots prior to our leader slot, the last_slot
420 : will be quite old, and potentially much larger than the number of
421 : hashcnts in one slot. */
422 : ulong last_slot;
423 : ulong last_hashcnt;
424 :
425 : /* If we have published a tick or a microblock for a particular slot
426 : to the shred tile, we should never become leader for that slot
427 : again, otherwise we could publish a duplicate block.
428 :
429 : This value tracks the max slot that we have published a tick or
430 : microblock for so we can prevent this. */
431 : ulong highwater_leader_slot;
432 :
433 : /* See how this field is used below. If we have sequential leader
434 : slots, we don't reset the expected slot end time between the two,
435 : to prevent clock drift. If we didn't do this, our 2nd slot would
436 : end 400ms + `time_for_replay_to_move_slot_and_reset_poh` after
437 : our 1st, rather than just strictly 400ms. */
438 : int lagged_consecutive_leader_start;
439 : ulong expect_sequential_leader_slot;
440 :
441 : /* There's a race condition ... let's say two banks A and B, bank A
442 : processes some transactions, then releases the account locks, and
443 : sends the microblock to PoH to be stamped. Pack now re-packs the
444 : same accounts with a new microblock, sends to bank B, bank B
445 : executes and sends the microblock to PoH, and this all happens fast
446 : enough that PoH picks the 2nd block to stamp before the 1st. The
447 : accounts database changes now are misordered with respect to PoH so
448 : replay could fail.
449 :
450 : To prevent this race, we order all microblocks and only process
451 : them in PoH in the order they are produced by pack. This is a
452 : little bit over-strict, we just need to ensure that microblocks
453 : with conflicting accounts execute in order, but this is easiest to
454 : implement for now. */
455 : uint expect_pack_idx;
456 :
457 : /* Pack and bank tiles need a reference to the bank object with a
458 : slightly different lifetime than current_leader_bank, particularly
459 : when we switch forks in the middle of a leader slot. We need to
460 : make sure we don't free the last reference to the bank while the
461 : pack or bank tiles are still using it. The strange thing is that
462 : bank tiles have no concept of the current slot, but we know they're
463 : done with the bank object when pack's inter-slot bank draining
464 : process is complete. Pack notifies PoH by a frag with
465 : sig==ULONG_MAX on the pack_poh link when the banks are drained, and
466 : the PoH tile must then free the reference on behalf of pack.
467 :
468 : pack_leader_bank is non-NULL when the reference we're holding on
469 : behalf of the pack tile is acquired, and NULL when it is not
470 : acquired. */
471 : void const * pack_leader_bank;
472 :
473 : /* The PoH tile must never drop microblocks that get committed by the
474 : bank, so it needs to always be able to mixin a microblock hash.
475 : Mixing in requires incrementing the hashcnt, so we need to ensure
476 : at all times that there is enough hascnts left in the slot to
477 : mixin whatever future microblocks pack might produce for it.
478 :
479 : This value tracks that. At any time, max_microblocks_per_slot
480 : - microblocks_lower_bound is an upper bound on the maximum number
481 : of microblocks that might still be received in this slot. */
482 : ulong microblocks_lower_bound;
483 :
484 : uchar __attribute__((aligned(32UL))) reset_hash[ 32 ];
485 : uchar __attribute__((aligned(32UL))) hash[ 32 ];
486 :
487 : /* When we are not leader, we need to save the hashes that were
488 : produced in case the prior leader skips. If they skip, we will
489 : replay these skipped hashes into our next leader bank so that
490 : the slot hashes sysvar can be updated correctly, and also publish
491 : them to peer nodes as part of our outgoing shreds. */
492 : uchar skipped_tick_hashes[ MAX_SKIPPED_TICKS ][ 32 ];
493 :
494 : /* The timestamp in nanoseconds of when the reset slot was received.
495 : This is the timestamp we are building on top of to determine when
496 : our next leader slot starts. */
497 : long reset_slot_start_ns;
498 :
499 : /* The timestamp in nanoseconds of when we got the bank for the
500 : current leader slot. */
501 : long leader_bank_start_ns;
502 :
503 : /* The hashcnt corresponding to the start of the current reset slot. */
504 : ulong reset_slot;
505 :
506 : /* The hashcnt at which our next leader slot begins, or ULONG max if
507 : we have no known next leader slot. */
508 : ulong next_leader_slot;
509 :
510 : /* If an in progress frag should be skipped */
511 : int skip_frag;
512 :
513 : ulong max_active_descendant;
514 :
515 : /* If we currently are the leader according the clock AND we have
516 : received the leader bank for the slot from the replay stage,
517 : this value will be non-NULL.
518 :
519 : Note that we might be inside our leader slot, but not have a bank
520 : yet, in which case this will still be NULL.
521 :
522 : It will be NULL for a brief race period between consecutive leader
523 : slots, as we ping-pong back to replay stage waiting for a new bank.
524 :
525 : Agave refers to this as the "working bank". */
526 : void const * current_leader_bank;
527 :
528 : fd_sha256_t * sha256;
529 :
530 : fd_multi_epoch_leaders_t * mleaders;
531 :
532 : /* The last sequence number of an outgoing fragment to the shred tile,
533 : or ULONG max if no such fragment. See fd_keyswitch.h for details
534 : of how this is used. */
535 : ulong shred_seq;
536 :
537 : int halted_switching_key;
538 :
539 : fd_keyswitch_t * keyswitch;
540 : fd_pubkey_t identity_key;
541 :
542 : /* We need a few pieces of information to compute the right addresses
543 : for bundle crank information that we need to send to pack. */
544 : struct {
545 : int enabled;
546 : fd_pubkey_t vote_account;
547 : fd_bundle_crank_gen_t gen[1];
548 : } bundle;
549 :
550 :
551 : /* The Agave client needs to be notified when the leader changes,
552 : so that they can resume the replay stage if it was suspended waiting. */
553 : void * signal_leader_change;
554 :
555 : /* These are temporarily set in during_frag so they can be used in
556 : after_frag once the frag has been validated as not overrun. */
557 : uchar _txns[ USHORT_MAX ];
558 : fd_microblock_trailer_t _microblock_trailer[ 1 ];
559 :
560 : int in_kind[ 64 ];
561 : fd_pohh_in_t in[ 64 ];
562 :
563 : fd_pohh_out_t shred_out[ 1 ];
564 : fd_pohh_out_t pack_out[ 1 ];
565 : fd_pohh_out_t plugin_out[ 1 ];
566 :
567 : fd_histf_t begin_leader_delay[ 1 ];
568 : fd_histf_t first_microblock_delay[ 1 ];
569 : fd_histf_t slot_done_delay[ 1 ];
570 : fd_histf_t bundle_init_delay[ 1 ];
571 :
572 : ulong features_activation_avail;
573 : fd_shred_features_activation_t features_activation[1];
574 :
575 : ulong parent_slot;
576 : uchar parent_block_id[ 32 ];
577 :
578 : uchar __attribute__((aligned(FD_MULTI_EPOCH_LEADERS_ALIGN))) mleaders_mem[ FD_MULTI_EPOCH_LEADERS_FOOTPRINT ];
579 : };
580 :
581 : typedef struct fd_pohh_tile fd_pohh_tile_t;
582 :
583 : /* The PoH recorder is implemented in Firedancer but for now needs to
584 : work with Agave, so we have a locking scheme for them to
585 : co-operate.
586 :
587 : This is because the PoH tile lives in the Agave memory address
588 : space and their version of concurrency is locking the PoH recorder
589 : and reading arbitrary fields.
590 :
591 : So we allow them to lock the PoH tile, although with a very bad (for
592 : them) locking scheme. By default, the tile has full and exclusive
593 : access to the data. If part of Agave wishes to read/write they
594 : can either,
595 :
596 : 1. Rewrite their concurrency to message passing based on mcache
597 : (preferred, but not feasible).
598 : 2. Signal to the tile they wish to acquire the lock, by setting
599 : fd_poh_waiting_lock to 1.
600 :
601 : During after_credit, the tile will check if the waiting lock is set
602 : to 1, and if so, set the returned lock to 1, indicating to the waiter
603 : that they may now proceed.
604 :
605 : When the waiter is done reading and writing, they restore the
606 : returned lock value back to zero, and the POH tile continues with its
607 : day. */
608 :
609 : static fd_pohh_tile_t * fd_pohh_global_ctx;
610 :
611 : static volatile ulong fd_poh_waiting_lock __attribute__((aligned(128UL)));
612 : static volatile ulong fd_poh_returned_lock __attribute__((aligned(128UL)));
613 :
614 : /* Agave also needs to write to some mcaches, so we trampoline
615 : that via. the PoH tile as well. */
616 :
617 : struct poh_link {
618 : fd_frag_meta_t * mcache;
619 : ulong depth;
620 : ulong tx_seq;
621 :
622 : void * mem;
623 : void * dcache;
624 : ulong chunk0;
625 : ulong wmark;
626 : ulong chunk;
627 :
628 : ulong cr_avail;
629 : ulong rx_cnt;
630 : ulong * rx_fseqs[ 32UL ];
631 : };
632 :
633 : typedef struct poh_link poh_link_t;
634 :
635 : static poh_link_t gossip_dedup;
636 : static poh_link_t stake_out;
637 : static poh_link_t crds_shred;
638 : static poh_link_t replay_resolh;
639 : static poh_link_t executed_txn;
640 :
641 : static poh_link_t replay_plugin;
642 : static poh_link_t gossip_plugin;
643 : static poh_link_t start_progress_plugin;
644 : static poh_link_t vote_listener_plugin;
645 : static poh_link_t validator_info_plugin;
646 :
647 : static void
648 0 : poh_link_wait_credit( poh_link_t * link ) {
649 0 : if( FD_LIKELY( link->cr_avail ) ) return;
650 :
651 0 : while( 1 ) {
652 0 : ulong cr_query = ULONG_MAX;
653 0 : for( ulong i=0UL; i<link->rx_cnt; i++ ) {
654 0 : ulong const * _rx_seq = link->rx_fseqs[ i ];
655 0 : ulong rx_seq = FD_VOLATILE_CONST( *_rx_seq );
656 0 : ulong rx_cr_query = (ulong)fd_long_max( (long)link->depth - fd_long_max( fd_seq_diff( link->tx_seq, rx_seq ), 0L ), 0L );
657 0 : cr_query = fd_ulong_min( rx_cr_query, cr_query );
658 0 : }
659 0 : if( FD_LIKELY( cr_query>0UL ) ) {
660 0 : link->cr_avail = cr_query;
661 0 : break;
662 0 : }
663 0 : FD_SPIN_PAUSE();
664 0 : }
665 0 : }
666 :
667 : static void
668 : poh_link_publish( poh_link_t * link,
669 : ulong sig,
670 : uchar const * data,
671 0 : ulong data_sz ) {
672 0 : while( FD_UNLIKELY( !FD_VOLATILE_CONST( link->mcache ) ) ) FD_SPIN_PAUSE();
673 0 : if( FD_UNLIKELY( !link->mem ) ) return; /* link not enabled, don't publish */
674 0 : poh_link_wait_credit( link );
675 :
676 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( link->mem, link->chunk );
677 0 : fd_memcpy( dst, data, data_sz );
678 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
679 0 : fd_mcache_publish( link->mcache, link->depth, link->tx_seq, sig, link->chunk, data_sz, 0UL, 0UL, tspub );
680 0 : link->chunk = fd_dcache_compact_next( link->chunk, data_sz, link->chunk0, link->wmark );
681 0 : link->cr_avail--;
682 0 : link->tx_seq++;
683 0 : }
684 :
685 : static void
686 : poh_link_init( poh_link_t * link,
687 : fd_topo_t * topo,
688 : fd_topo_tile_t * tile,
689 0 : ulong out_idx ) {
690 0 : fd_topo_link_t * topo_link = &topo->links[ tile->out_link_id[ out_idx ] ];
691 0 : fd_topo_wksp_t * wksp = &topo->workspaces[ topo->objs[ topo_link->dcache_obj_id ].wksp_id ];
692 :
693 0 : link->mem = wksp->wksp;
694 0 : link->depth = fd_mcache_depth( topo_link->mcache );
695 0 : link->tx_seq = 0UL;
696 0 : link->dcache = topo_link->dcache;
697 0 : link->chunk0 = fd_dcache_compact_chunk0( wksp->wksp, topo_link->dcache );
698 0 : link->wmark = fd_dcache_compact_wmark ( wksp->wksp, topo_link->dcache, topo_link->mtu );
699 0 : link->chunk = link->chunk0;
700 0 : link->cr_avail = 0UL;
701 0 : link->rx_cnt = 0UL;
702 0 : for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
703 0 : fd_topo_tile_t * _tile = &topo->tiles[ i ];
704 0 : for( ulong j=0UL; j<_tile->in_cnt; j++ ) {
705 0 : if( _tile->in_link_id[ j ]==topo_link->id && _tile->in_link_reliable[ j ] ) {
706 0 : FD_TEST( link->rx_cnt<32UL );
707 0 : link->rx_fseqs[ link->rx_cnt++ ] = _tile->in_link_fseq[ j ];
708 0 : break;
709 0 : }
710 0 : }
711 0 : }
712 0 : FD_COMPILER_MFENCE();
713 0 : link->mcache = topo_link->mcache;
714 0 : FD_COMPILER_MFENCE();
715 0 : FD_TEST( link->mcache );
716 0 : }
717 :
718 : /* To help show correctness, functions that might be called from
719 : Rust, either directly or indirectly, have this fake "attribute"
720 : CALLED_FROM_RUST, which is actually nothing. Calls from Rust
721 : typically execute on threads did not call fd_boot, so they do not
722 : have the typical FD_TL variables. In particular, they cannot use
723 : normal metrics, and their log messages don't have full context.
724 : Additionally, Rust functions marked CALLED_FROM_RUST cannot call back
725 : into a C fd_ext function without causing a deadlock (although the
726 : other Rust fd_ext functions have a similar problem).
727 :
728 : To prevent annotation from polluting the whole codebase, calls to
729 : functions outside this file are manually checked and marked as being
730 : safe at each call rather than annotated. */
731 : #define CALLED_FROM_RUST
732 :
733 : static CALLED_FROM_RUST fd_pohh_tile_t *
734 0 : fd_ext_poh_write_lock( void ) {
735 0 : for(;;) {
736 : /* Acquire the waiter lock to make sure we are the first writer in the queue. */
737 0 : if( FD_LIKELY( !FD_ATOMIC_CAS( &fd_poh_waiting_lock, 0UL, 1UL) ) ) break;
738 0 : FD_SPIN_PAUSE();
739 0 : }
740 0 : FD_COMPILER_MFENCE();
741 0 : for(;;) {
742 : /* Now wait for the tile to tell us we can proceed. */
743 0 : if( FD_LIKELY( FD_VOLATILE_CONST( fd_poh_returned_lock ) ) ) break;
744 0 : FD_SPIN_PAUSE();
745 0 : }
746 0 : FD_COMPILER_MFENCE();
747 0 : return fd_pohh_global_ctx;
748 0 : }
749 :
750 : static CALLED_FROM_RUST void
751 0 : fd_ext_poh_write_unlock( void ) {
752 0 : FD_COMPILER_MFENCE();
753 0 : FD_VOLATILE( fd_poh_returned_lock ) = 0UL;
754 0 : }
755 :
756 : /* The PoH tile needs to interact with the Agave address space to
757 : do certain operations that Firedancer hasn't reimplemented yet, a.k.a
758 : transaction execution. We have Agave export some wrapper
759 : functions that we call into during regular tile execution. These do
760 : not need any locking, since they are called serially from the single
761 : PoH tile. */
762 :
763 : extern CALLED_FROM_RUST void fd_ext_bank_acquire( void const * bank );
764 : extern CALLED_FROM_RUST void fd_ext_bank_release( void const * bank );
765 : extern CALLED_FROM_RUST void fd_ext_poh_signal_leader_change( void * sender );
766 : extern void fd_ext_poh_register_tick( void const * bank, uchar const * hash );
767 :
768 : /* fd_ext_poh_initialize is called by Agave on startup to
769 : initialize the PoH tile with some static configuration, and the
770 : initial reset slot and hash which it retrieves from a snapshot.
771 :
772 : This function is called by some random Agave thread, but
773 : it blocks booting of the PoH tile. The tile will spin until it
774 : determines that this initialization has happened.
775 :
776 : signal_leader_change is an opaque Rust object that is used to
777 : tell the replay stage that the leader has changed. It is a
778 : Box::into_raw(Arc::increment_strong(crossbeam::Sender)), so it
779 : has infinite lifetime unless this C code releases the refcnt.
780 :
781 : It can be used with `fd_ext_poh_signal_leader_change` which
782 : will just issue a nonblocking send on the channel. */
783 :
784 : CALLED_FROM_RUST void
785 : fd_ext_poh_initialize( ulong tick_duration_ns, /* See clock comments above, will be 6.4 microseconds for mainnet-beta. */
786 : ulong hashcnt_per_tick, /* See clock comments above, will be 62,500 for mainnet-beta. */
787 : ulong ticks_per_slot, /* See clock comments above, will almost always be 64. */
788 : ulong tick_height, /* The counter (height) of the tick to start hashing on top of. */
789 : uchar const * last_entry_hash, /* Points to start of a 32 byte region of memory, the hash itself at the tick height. */
790 0 : void * signal_leader_change /* See comment above. */ ) {
791 0 : FD_COMPILER_MFENCE();
792 0 : for(;;) {
793 : /* Make sure the ctx is initialized before trying to take the lock. */
794 0 : if( FD_LIKELY( FD_VOLATILE_CONST( fd_pohh_global_ctx ) ) ) break;
795 0 : FD_SPIN_PAUSE();
796 0 : }
797 0 : fd_pohh_tile_t * ctx = fd_ext_poh_write_lock();
798 :
799 0 : ctx->slot = tick_height/ticks_per_slot;
800 0 : ctx->hashcnt = 0UL;
801 0 : ctx->cus_used = 0UL;
802 0 : ctx->last_slot = ctx->slot;
803 0 : ctx->last_hashcnt = 0UL;
804 0 : ctx->reset_slot = ctx->slot;
805 0 : ctx->reset_slot_start_ns = fd_log_wallclock(); /* safe to call from Rust */
806 :
807 0 : memcpy( ctx->reset_hash, last_entry_hash, 32UL );
808 0 : memcpy( ctx->hash, last_entry_hash, 32UL );
809 :
810 0 : ctx->signal_leader_change = signal_leader_change;
811 :
812 : /* Static configuration about the clock. */
813 0 : ctx->tick_duration_ns = tick_duration_ns;
814 0 : ctx->hashcnt_per_tick = hashcnt_per_tick;
815 0 : ctx->ticks_per_slot = ticks_per_slot;
816 :
817 : /* Recompute derived information about the clock. */
818 0 : ctx->slot_duration_ns = (double)ticks_per_slot*(double)tick_duration_ns;
819 0 : ctx->hashcnt_duration_ns = (double)tick_duration_ns/(double)hashcnt_per_tick;
820 0 : ctx->hashcnt_per_slot = ticks_per_slot*hashcnt_per_tick;
821 :
822 0 : if( FD_UNLIKELY( ctx->hashcnt_per_tick==1UL ) ) {
823 : /* Low power producer, maximum of one microblock per tick in the slot */
824 0 : ctx->max_microblocks_per_slot = ctx->ticks_per_slot;
825 0 : } else {
826 : /* See the long comment in after_credit for this limit */
827 0 : ctx->max_microblocks_per_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ctx->ticks_per_slot*(ctx->hashcnt_per_tick-1UL) );
828 0 : }
829 :
830 0 : fd_ext_poh_write_unlock();
831 0 : }
832 :
833 : /* fd_ext_poh_acquire_bank gets the current leader bank if there is one
834 : currently active. PoH might think we are leader without having a
835 : leader bank if the replay stage has not yet noticed we are leader.
836 :
837 : The bank that is returned is owned the caller, and must be converted
838 : to an Arc<Bank> by calling Arc::from_raw() on it. PoH increments the
839 : reference count before returning the bank, so that it can also keep
840 : its internal copy.
841 :
842 : If there is no leader bank, NULL is returned. In this case, the
843 : caller should not call `Arc::from_raw()`. */
844 :
845 : CALLED_FROM_RUST void const *
846 0 : fd_ext_poh_acquire_leader_bank( void ) {
847 0 : fd_pohh_tile_t * ctx = fd_ext_poh_write_lock();
848 0 : void const * bank = NULL;
849 0 : if( FD_LIKELY( ctx->current_leader_bank ) ) {
850 : /* Clone refcount before we release the lock. */
851 0 : fd_ext_bank_acquire( ctx->current_leader_bank );
852 0 : bank = ctx->current_leader_bank;
853 0 : }
854 0 : fd_ext_poh_write_unlock();
855 0 : return bank;
856 0 : }
857 :
858 : /* fd_ext_poh_reset_slot returns the slot height one above the last good
859 : (unskipped) slot we are building on top of. This is always a good
860 : known value, and will not be ULONG_MAX. */
861 :
862 : CALLED_FROM_RUST ulong
863 0 : fd_ext_poh_reset_slot( void ) {
864 0 : fd_pohh_tile_t * ctx = fd_ext_poh_write_lock();
865 0 : ulong reset_slot = ctx->reset_slot;
866 0 : fd_ext_poh_write_unlock();
867 0 : return reset_slot;
868 0 : }
869 :
870 : CALLED_FROM_RUST void
871 0 : fd_ext_poh_update_active_descendant( ulong max_active_descendant ) {
872 0 : fd_pohh_tile_t * ctx = fd_ext_poh_write_lock();
873 0 : ctx->max_active_descendant = max_active_descendant;
874 0 : fd_ext_poh_write_unlock();
875 0 : }
876 :
877 : /* fd_ext_poh_reached_leader_slot returns 1 if we have reached a slot
878 : where we are leader. This is used by the replay stage to determine
879 : if it should create a new leader bank descendant of the prior reset
880 : slot block.
881 :
882 : Sometimes, even when we reach our slot we do not return 1, as we are
883 : giving a grace period to the prior leader to finish publishing their
884 : block.
885 :
886 : out_leader_slot is the slot height of the leader slot we reached, and
887 : reset_slot is the slot height of the last good (unskipped) slot we
888 : are building on top of. */
889 :
890 : CALLED_FROM_RUST int
891 : fd_ext_poh_reached_leader_slot( ulong * out_leader_slot,
892 0 : ulong * out_reset_slot ) {
893 0 : fd_pohh_tile_t * ctx = fd_ext_poh_write_lock();
894 :
895 0 : *out_leader_slot = ctx->next_leader_slot;
896 0 : *out_reset_slot = ctx->reset_slot;
897 :
898 0 : if( FD_UNLIKELY( ctx->next_leader_slot==ULONG_MAX ||
899 0 : ctx->slot<ctx->next_leader_slot ) ) {
900 : /* Didn't reach our leader slot yet. */
901 0 : fd_ext_poh_write_unlock();
902 0 : return 0;
903 0 : }
904 :
905 0 : if( FD_UNLIKELY( ctx->halted_switching_key ) ) {
906 : /* Reached our leader slot, but the leader pipeline is halted
907 : because we are switching identity key. */
908 0 : fd_ext_poh_write_unlock();
909 0 : return 0;
910 0 : }
911 :
912 0 : if( FD_LIKELY( ctx->reset_slot==ctx->next_leader_slot ) ) {
913 : /* We were reset onto our leader slot, because the prior leader
914 : completed theirs, so we should start immediately, no need for a
915 : grace period. */
916 0 : fd_ext_poh_write_unlock();
917 0 : return 1;
918 0 : }
919 :
920 0 : fd_pubkey_t const * reset_leader = fd_multi_epoch_leaders_get_leader_for_slot( ctx->mleaders, ctx->reset_slot );
921 0 : if( FD_UNLIKELY( reset_leader && fd_memeq( reset_leader, ctx->identity_key.uc, 32UL ) ) ) {
922 : /* Surprisingly, in some rare cases where we're skipping ourselves,
923 : the following can occur:
924 : Reset onto n-1
925 : Tick into slot n, become leader for slot n, skipping slot n-1
926 : Prior leader start publishing slot n-1
927 : max_active_descendant is set to n
928 : Switch forks, abandon slot n, reset onto slot n
929 : In this case, next_leader_slot is n+1 because we can't become
930 : leader again for slot n. We don't want to give ourselves any
931 : grace time though; we want to start n+1 as soon as the hashing
932 : is ready. */
933 0 : fd_ext_poh_write_unlock();
934 0 : return 1;
935 0 : }
936 :
937 0 : long now_ns = fd_log_wallclock();
938 0 : long expected_start_time_ns = ctx->reset_slot_start_ns + (long)((double)(ctx->next_leader_slot-ctx->reset_slot)*ctx->slot_duration_ns);
939 :
940 : /* Now we're faced with the question of how much grace to give the
941 : prior leader before trying to skip them. If they are still in the
942 : process of publishing their slot, delay ours to let them finish ...
943 : unless they are so delayed that we risk getting skipped by the
944 : leader following us. 1.2 seconds is a reasonable default here,
945 : although any value between 0 and 1.6 seconds could be considered
946 : reasonable. If they haven't started their last block, but we're
947 : reset on their second to last block, we'll give them an extra
948 : 400ms. This is arbitrary and chosen due to intuition. */
949 :
950 0 : long start_time_with_grace_ns = expected_start_time_ns;
951 :
952 0 : if( FD_UNLIKELY( ctx->max_active_descendant>=ctx->next_leader_slot ) ) {
953 : /* If the max_active_descendant is >= next_leader_slot, we waited
954 : too long and a leader after us started publishing to try and skip
955 : us. Just start our leader slot immediately, we might win ... */
956 0 : start_time_with_grace_ns = now_ns;
957 0 : } else if( FD_LIKELY( ctx->max_active_descendant>=ctx->reset_slot ) ) {
958 : /* If one of the leaders between the reset slot and our leader
959 : slot is in the process of publishing (they have a descendant
960 : bank that is in progress of being replayed), then keep waiting.
961 : We probably wouldn't get a leader slot out before they
962 : finished. */
963 0 : start_time_with_grace_ns += (long)(3.0*ctx->slot_duration_ns);
964 0 : } else if( FD_LIKELY( ctx->next_leader_slot==ctx->reset_slot+1UL ) ) {
965 : /* We finished replaying the slot two before ours, which means the
966 : prior leader is probably online, but they haven't started
967 : publishing the slot immediately prior to ours. Give the prior
968 : leader a little more time. */
969 0 : start_time_with_grace_ns += (long)(1.0*ctx->slot_duration_ns);
970 0 : }
971 :
972 :
973 0 : if( FD_UNLIKELY( now_ns<start_time_with_grace_ns ) ) {
974 0 : fd_ext_poh_write_unlock();
975 0 : return 0;
976 0 : }
977 :
978 0 : fd_ext_poh_write_unlock();
979 0 : return 1;
980 0 : }
981 :
982 : CALLED_FROM_RUST static inline void
983 : publish_plugin_slot_start( fd_pohh_tile_t * ctx,
984 : ulong slot,
985 0 : ulong parent_slot ) {
986 0 : if( FD_UNLIKELY( !ctx->plugin_out->mem ) ) return;
987 :
988 0 : fd_plugin_msg_slot_start_t * slot_start = (fd_plugin_msg_slot_start_t *)fd_chunk_to_laddr( ctx->plugin_out->mem, ctx->plugin_out->chunk );
989 0 : *slot_start = (fd_plugin_msg_slot_start_t){ .slot = slot, .parent_slot = parent_slot };
990 0 : fd_stem_publish( ctx->stem, ctx->plugin_out->idx, FD_PLUGIN_MSG_SLOT_START, ctx->plugin_out->chunk, sizeof(fd_plugin_msg_slot_start_t), 0UL, 0UL, 0UL );
991 0 : ctx->plugin_out->chunk = fd_dcache_compact_next( ctx->plugin_out->chunk, sizeof(fd_plugin_msg_slot_start_t), ctx->plugin_out->chunk0, ctx->plugin_out->wmark );
992 0 : }
993 :
994 : CALLED_FROM_RUST static inline void
995 : publish_plugin_slot_end( fd_pohh_tile_t * ctx,
996 : ulong slot,
997 0 : ulong cus_used ) {
998 0 : if( FD_UNLIKELY( !ctx->plugin_out->mem ) ) return;
999 :
1000 0 : fd_plugin_msg_slot_end_t * slot_end = (fd_plugin_msg_slot_end_t *)fd_chunk_to_laddr( ctx->plugin_out->mem, ctx->plugin_out->chunk );
1001 0 : *slot_end = (fd_plugin_msg_slot_end_t){ .slot = slot, .cus_used = cus_used };
1002 0 : fd_stem_publish( ctx->stem, ctx->plugin_out->idx, FD_PLUGIN_MSG_SLOT_END, ctx->plugin_out->chunk, sizeof(fd_plugin_msg_slot_end_t), 0UL, 0UL, 0UL );
1003 0 : ctx->plugin_out->chunk = fd_dcache_compact_next( ctx->plugin_out->chunk, sizeof(fd_plugin_msg_slot_end_t), ctx->plugin_out->chunk0, ctx->plugin_out->wmark );
1004 0 : }
1005 :
1006 : extern int
1007 : fd_ext_bank_load_account( void const * bank,
1008 : int fixed_root,
1009 : uchar const * addr,
1010 : uchar * owner,
1011 : uchar * data,
1012 : ulong * data_sz );
1013 :
1014 : CALLED_FROM_RUST static void
1015 : publish_became_leader( fd_pohh_tile_t * ctx,
1016 : ulong slot,
1017 0 : ulong epoch ) {
1018 0 : double tick_per_ns = fd_tempo_tick_per_ns( NULL );
1019 0 : fd_histf_sample( ctx->begin_leader_delay, (ulong)((double)(fd_log_wallclock()-ctx->reset_slot_start_ns)/tick_per_ns) );
1020 :
1021 0 : if( FD_UNLIKELY( ctx->lagged_consecutive_leader_start ) ) {
1022 : /* If we are mirroring Agave behavior, the wall clock gets reset
1023 : here so we don't count time spent waiting for a bank to freeze
1024 : or replay stage to actually start the slot towards our 400ms.
1025 :
1026 : See extended comments in the config file on this option. */
1027 0 : ctx->reset_slot_start_ns = fd_log_wallclock() - (long)((double)(slot-ctx->reset_slot)*ctx->slot_duration_ns);
1028 0 : }
1029 :
1030 0 : fd_bundle_crank_tip_payment_config_t config[1] = { 0 };
1031 0 : fd_acct_addr_t tip_receiver_owner[1] = { 0 };
1032 :
1033 0 : if( FD_UNLIKELY( ctx->bundle.enabled ) ) {
1034 0 : long bundle_time = -fd_tickcount();
1035 0 : fd_acct_addr_t tip_payment_config[1];
1036 0 : fd_acct_addr_t tip_receiver[1];
1037 0 : fd_bundle_crank_get_addresses( ctx->bundle.gen, epoch, tip_payment_config, tip_receiver );
1038 :
1039 0 : fd_acct_addr_t _dummy[1];
1040 0 : uchar dummy[1];
1041 :
1042 0 : void const * bank = ctx->current_leader_bank;
1043 :
1044 : /* Calling rust from a C function that is CALLED_FROM_RUST risks
1045 : deadlock. In this case, I checked the load_account function and
1046 : ensured it never calls any C functions that acquire the lock. */
1047 0 : ulong sz1 = sizeof(config), sz2 = 1UL;
1048 0 : int found1 = fd_ext_bank_load_account( bank, 0, tip_payment_config->b, _dummy->b, (uchar *)config, &sz1 );
1049 0 : int found2 = fd_ext_bank_load_account( bank, 0, tip_receiver->b, tip_receiver_owner->b, dummy, &sz2 );
1050 : /* The bundle crank code detects whether the accounts were found by
1051 : whether they have non-zero values (since found and uninitialized
1052 : should be treated the same), so we actually don't really care
1053 : about the value of found{1,2}. */
1054 0 : (void)found1; (void)found2;
1055 0 : bundle_time += fd_tickcount();
1056 0 : fd_histf_sample( ctx->bundle_init_delay, (ulong)bundle_time );
1057 0 : }
1058 :
1059 0 : long slot_start_ns = ctx->reset_slot_start_ns + (long)((double)(slot-ctx->reset_slot)*ctx->slot_duration_ns);
1060 :
1061 : /* No need to check flow control, there are always credits became when we
1062 : are leader, we will not "become" leader again until we are done, so at
1063 : most one frag in flight at a time. */
1064 :
1065 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->pack_out->mem, ctx->pack_out->chunk );
1066 :
1067 0 : fd_became_leader_t * leader = (fd_became_leader_t *)dst;
1068 0 : leader->slot_start_ns = slot_start_ns;
1069 0 : leader->slot_end_ns = (long)((double)slot_start_ns + ctx->slot_duration_ns);
1070 0 : leader->bank = ctx->current_leader_bank;
1071 0 : leader->max_microblocks_in_slot = ctx->max_microblocks_per_slot;
1072 0 : leader->ticks_per_slot = ctx->ticks_per_slot;
1073 0 : leader->total_skipped_ticks = ctx->ticks_per_slot*(slot-ctx->reset_slot);
1074 0 : leader->epoch = epoch;
1075 0 : leader->bundle->config[0] = config[0];
1076 0 : leader->slot = slot;
1077 :
1078 0 : leader->limits.slot_max_cost = ctx->limits.slot_max_cost;
1079 0 : leader->limits.slot_max_vote_cost = ctx->limits.slot_max_vote_cost;
1080 0 : leader->limits.slot_max_write_cost_per_acct = ctx->limits.slot_max_write_cost_per_acct;
1081 :
1082 0 : memcpy( leader->bundle->last_blockhash, ctx->reset_hash, 32UL );
1083 0 : memcpy( leader->bundle->tip_receiver_owner, tip_receiver_owner, 32UL );
1084 :
1085 0 : if( FD_UNLIKELY( leader->ticks_per_slot+leader->total_skipped_ticks>=MAX_SKIPPED_TICKS ) )
1086 0 : FD_LOG_ERR(( "Too many skipped ticks %lu for slot %lu, chain must halt", leader->ticks_per_slot+leader->total_skipped_ticks, slot ));
1087 :
1088 0 : ulong sig = fd_disco_poh_sig( slot, POH_PKT_TYPE_BECAME_LEADER, 0UL );
1089 0 : fd_stem_publish( ctx->stem, ctx->pack_out->idx, sig, ctx->pack_out->chunk, sizeof(fd_became_leader_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1090 0 : ctx->pack_out->chunk = fd_dcache_compact_next( ctx->pack_out->chunk, sizeof(fd_became_leader_t), ctx->pack_out->chunk0, ctx->pack_out->wmark );
1091 :
1092 : /* increment refcount for pack's reference to the current leader bank */
1093 0 : if( FD_UNLIKELY( ctx->current_leader_bank ) ) {
1094 0 : ctx->pack_leader_bank = ctx->current_leader_bank;
1095 0 : fd_ext_bank_acquire( ctx->pack_leader_bank );
1096 0 : }
1097 0 : }
1098 :
1099 : /* The PoH tile knows when it should become leader by waiting for its
1100 : leader slot (with the operating system clock). This function is so
1101 : that when it becomes the leader, it can be told what the leader bank
1102 : is by the replay stage. See the notes in the long comment above for
1103 : more on how this works. */
1104 :
1105 : CALLED_FROM_RUST void
1106 : fd_ext_poh_begin_leader( void const * bank,
1107 : ulong slot,
1108 : ulong epoch,
1109 : ulong hashcnt_per_tick,
1110 : ulong cus_block_limit,
1111 : ulong cus_vote_cost_limit,
1112 0 : ulong cus_account_cost_limit ) {
1113 0 : fd_pohh_tile_t * ctx = fd_ext_poh_write_lock();
1114 :
1115 0 : FD_TEST( !ctx->current_leader_bank );
1116 :
1117 0 : if( FD_UNLIKELY( slot!=ctx->slot ) ) FD_LOG_ERR(( "Trying to begin leader slot %lu but we are now on slot %lu", slot, ctx->slot ));
1118 0 : if( FD_UNLIKELY( slot!=ctx->next_leader_slot ) ) FD_LOG_ERR(( "Trying to begin leader slot %lu but next leader slot is %lu", slot, ctx->next_leader_slot ));
1119 :
1120 0 : if( FD_UNLIKELY( ctx->hashcnt_per_tick!=hashcnt_per_tick ) ) {
1121 0 : FD_LOG_WARNING(( "hashes per tick changed from %lu to %lu", ctx->hashcnt_per_tick, hashcnt_per_tick ));
1122 :
1123 : /* Recompute derived information about the clock. */
1124 0 : ctx->hashcnt_duration_ns = (double)ctx->tick_duration_ns/(double)hashcnt_per_tick;
1125 0 : ctx->hashcnt_per_slot = ctx->ticks_per_slot*hashcnt_per_tick;
1126 0 : ctx->hashcnt_per_tick = hashcnt_per_tick;
1127 :
1128 : /* Discard any ticks we might have done in the interim. They will
1129 : have the wrong number of hashes per tick. We can just catch back
1130 : up quickly if not too many slots were skipped and hopefully
1131 : publish on time. Note that tick production and verification of
1132 : skipped slots is done for the eventual bank that publishes a
1133 : slot, for example:
1134 :
1135 : Reset Slot: 998
1136 : Epoch Transition Slot: 1000
1137 : Leader Slot: 1002
1138 :
1139 : In this case, if a feature changing the hashcnt_per_tick is
1140 : activated in slot 1000, and we are publishing empty ticks for
1141 : slots 998, 999, 1000, and 1001, they should all have the new
1142 : hashes_per_tick number of hashes, rather than the older one, or
1143 : some combination. */
1144 :
1145 0 : FD_TEST( ctx->last_slot==ctx->reset_slot );
1146 0 : FD_TEST( !ctx->last_hashcnt );
1147 0 : ctx->slot = ctx->reset_slot;
1148 0 : ctx->hashcnt = 0UL;
1149 0 : }
1150 :
1151 0 : if( FD_UNLIKELY( ctx->hashcnt_per_tick==1UL ) ) {
1152 : /* Low power producer, maximum of one microblock per tick in the slot */
1153 0 : ctx->max_microblocks_per_slot = ctx->ticks_per_slot;
1154 0 : } else {
1155 : /* See the long comment in after_credit for this limit */
1156 0 : ctx->max_microblocks_per_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ctx->ticks_per_slot*(ctx->hashcnt_per_tick-1UL) );
1157 0 : }
1158 :
1159 0 : ctx->current_leader_bank = bank;
1160 0 : ctx->microblocks_lower_bound = 0UL;
1161 0 : ctx->cus_used = 0UL;
1162 :
1163 0 : ctx->limits.slot_max_cost = cus_block_limit;
1164 0 : ctx->limits.slot_max_vote_cost = cus_vote_cost_limit;
1165 0 : ctx->limits.slot_max_write_cost_per_acct = cus_account_cost_limit;
1166 :
1167 : /* clamp and warn if we are underutilizing CUs */
1168 0 : if( FD_UNLIKELY( ctx->limits.slot_max_cost > FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND ) ) {
1169 0 : FD_LOG_WARNING(( "Underutilizing protocol slot CU limit. protocol_limit=%lu validator_limit=%lu", ctx->limits.slot_max_cost, FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND ));
1170 0 : ctx->limits.slot_max_cost = FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND;
1171 0 : }
1172 0 : if( FD_UNLIKELY( ctx->limits.slot_max_vote_cost > FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND ) ) {
1173 0 : FD_LOG_WARNING(( "Underutilizing protocol vote CU limit. protocol_limit=%lu validator_limit=%lu", ctx->limits.slot_max_vote_cost, FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND ));
1174 0 : ctx->limits.slot_max_vote_cost = FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND;
1175 0 : }
1176 0 : if( FD_UNLIKELY( ctx->limits.slot_max_write_cost_per_acct > FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND ) ) {
1177 0 : FD_LOG_WARNING(( "Underutilizing protocol write CU limit. protocol_limit=%lu validator_limit=%lu", ctx->limits.slot_max_write_cost_per_acct, FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND ));
1178 0 : ctx->limits.slot_max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND;
1179 0 : }
1180 :
1181 : /* We are about to start publishing to the shred tile for this slot
1182 : so update the highwater mark so we never republish in this slot
1183 : again. Also check that the leader slot is greater than the
1184 : highwater, which should have been ensured earlier. */
1185 :
1186 0 : FD_TEST( ctx->highwater_leader_slot==ULONG_MAX || slot>=ctx->highwater_leader_slot );
1187 0 : ctx->highwater_leader_slot = fd_ulong_max( fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot ), slot );
1188 :
1189 0 : publish_became_leader( ctx, slot, epoch );
1190 :
1191 : /* PoH ends the slot once it "ticks" through all of the hashes, but
1192 : we only want that to happen if we received a done packing message
1193 : from pack, so we always reserve an empty microblock at the end so
1194 : the tick advance will not end the slot without being told.
1195 :
1196 : This should be after publish_became_leader so that pack receives
1197 : the original (un-inflated) max_microblocks_per_slot. */
1198 0 : ctx->max_microblocks_per_slot += 1UL;
1199 :
1200 0 : FD_LOG_INFO(( "fd_ext_poh_begin_leader(slot=%lu, highwater_leader_slot=%lu, last_slot=%lu, last_hashcnt=%lu)", slot, ctx->highwater_leader_slot, ctx->last_slot, ctx->last_hashcnt ));
1201 :
1202 0 : fd_ext_poh_write_unlock();
1203 0 : }
1204 :
1205 : /* Determine what the next slot is in the leader schedule is that we are
1206 : leader. Includes the current slot. If we are not leader in what
1207 : remains of the current and next epoch, return ULONG_MAX. */
1208 :
1209 : static inline CALLED_FROM_RUST ulong
1210 0 : next_leader_slot( fd_pohh_tile_t * ctx ) {
1211 : /* If we have published anything in a particular slot, then we
1212 : should never become leader for that slot again. */
1213 0 : ulong min_leader_slot = fd_ulong_max( ctx->slot, fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot ) );
1214 0 : return fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, min_leader_slot, &ctx->identity_key );
1215 0 : }
1216 :
1217 : extern int
1218 : fd_ext_admin_rpc_set_identity( uchar const * identity_keypair,
1219 : int require_tower );
1220 :
1221 : static inline int FD_FN_SENSITIVE
1222 : maybe_change_identity( fd_pohh_tile_t * ctx,
1223 0 : int definitely_not_leader ) {
1224 0 : if( FD_UNLIKELY( ctx->halted_switching_key && fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_UNHALT_PENDING ) ) {
1225 0 : ctx->halted_switching_key = 0;
1226 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
1227 0 : return 1;
1228 0 : }
1229 :
1230 : /* Cannot change identity while in the middle of a leader slot, else
1231 : poh state machine would become corrupt. */
1232 :
1233 0 : int is_leader = !definitely_not_leader && ctx->next_leader_slot!=ULONG_MAX && ctx->slot>=ctx->next_leader_slot;
1234 0 : if( FD_UNLIKELY( is_leader ) ) return 0;
1235 :
1236 0 : if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
1237 0 : int failed = fd_ext_admin_rpc_set_identity( ctx->keyswitch->bytes, fd_keyswitch_param_query( ctx->keyswitch )==1 );
1238 0 : fd_memzero_explicit( ctx->keyswitch->bytes, 32UL );
1239 0 : FD_COMPILER_MFENCE();
1240 0 : if( FD_UNLIKELY( failed==-1 ) ) {
1241 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_FAILED );
1242 0 : return 0;
1243 0 : }
1244 :
1245 0 : memcpy( ctx->identity_key.uc, ctx->keyswitch->bytes+32UL, 32UL );
1246 :
1247 : /* When we switch key, we might have ticked part way through a slot
1248 : that we are now leader in. This violates the contract of the
1249 : tile, that when we become leader, we have not ticked in that slot
1250 : at all. To see why this would be bad, consider the case where we
1251 : have ticked almost to the end, and there isn't enough space left
1252 : to reserve the minimum amount of microblocks needed by pack.
1253 :
1254 : To resolve this, we just reset PoH back to the reset slot, and
1255 : let it try to catch back up quickly. This is OK since the network
1256 : rarely skips. */
1257 0 : ctx->slot = ctx->reset_slot;
1258 0 : ctx->hashcnt = 0UL;
1259 0 : memcpy( ctx->hash, ctx->reset_hash, 32UL );
1260 :
1261 0 : ctx->halted_switching_key = 1;
1262 0 : ctx->keyswitch->result = ctx->shred_seq;
1263 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
1264 0 : }
1265 :
1266 0 : return 0;
1267 0 : }
1268 :
1269 : static CALLED_FROM_RUST void
1270 0 : no_longer_leader( fd_pohh_tile_t * ctx ) {
1271 0 : if( FD_UNLIKELY( ctx->current_leader_bank ) ) fd_ext_bank_release( ctx->current_leader_bank );
1272 : /* If we stop being leader in a slot, we can never become leader in
1273 : that slot again, and all in-flight microblocks for that slot
1274 : should be dropped. */
1275 0 : ctx->highwater_leader_slot = fd_ulong_max( fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot ), ctx->slot );
1276 0 : ctx->current_leader_bank = NULL;
1277 0 : int identity_changed = maybe_change_identity( ctx, 1 );
1278 0 : ctx->next_leader_slot = next_leader_slot( ctx );
1279 0 : if( FD_UNLIKELY( identity_changed ) ) {
1280 0 : FD_LOG_INFO(( "fd_poh_identity_changed(next_leader_slot=%lu)", ctx->next_leader_slot ));
1281 0 : }
1282 :
1283 0 : FD_COMPILER_MFENCE();
1284 0 : fd_ext_poh_signal_leader_change( ctx->signal_leader_change );
1285 0 : FD_LOG_INFO(( "no_longer_leader(next_leader_slot=%lu)", ctx->next_leader_slot ));
1286 0 : }
1287 :
1288 : /* fd_ext_poh_reset is called by the Agave client when a slot on
1289 : the active fork has finished a block and we need to reset our PoH to
1290 : be ticking on top of the block it produced. */
1291 :
1292 : CALLED_FROM_RUST void
1293 : fd_ext_poh_reset( ulong completed_bank_slot, /* The slot that successfully produced a block */
1294 : uchar const * reset_blockhash, /* The hash of the last tick in the produced block */
1295 : ulong hashcnt_per_tick, /* The hashcnt per tick of the bank that completed */
1296 : uchar const * parent_block_id, /* The block id of the parent block */
1297 0 : ulong const * features_activation /* The activation slot of shred-tile features */ ) {
1298 0 : fd_pohh_tile_t * ctx = fd_ext_poh_write_lock();
1299 :
1300 0 : ulong slot_before_reset = ctx->slot;
1301 0 : int leader_before_reset = ctx->slot>=ctx->next_leader_slot;
1302 0 : if( FD_UNLIKELY( leader_before_reset && ctx->current_leader_bank ) ) {
1303 : /* If we were in the middle of a leader slot that we notified pack
1304 : pack to start packing for we can never publish into that slot
1305 : again, mark all in-flight microblocks to be dropped. */
1306 0 : ctx->highwater_leader_slot = fd_ulong_max( fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot ), 1UL+ctx->slot );
1307 0 : }
1308 :
1309 0 : ctx->leader_bank_start_ns = fd_log_wallclock(); /* safe to call from Rust */
1310 0 : if( FD_UNLIKELY( ctx->expect_sequential_leader_slot==(completed_bank_slot+1UL) ) ) {
1311 : /* If we are being reset onto a slot, it means some block was fully
1312 : processed, so we reset to build on top of it. Typically we want
1313 : to update the reset_slot_start_ns to the current time, because
1314 : the network will give the next leader 400ms to publish,
1315 : regardless of how long the prior leader took.
1316 :
1317 : But: if we were leader in the prior slot, and the block was our
1318 : own we can do better. We know that the next slot should start
1319 : exactly 400ms after the prior one started, so we can use that as
1320 : the reset slot start time instead. */
1321 0 : ctx->reset_slot_start_ns = ctx->reset_slot_start_ns + (long)((double)((completed_bank_slot+1UL)-ctx->reset_slot)*ctx->slot_duration_ns);
1322 0 : } else {
1323 0 : ctx->reset_slot_start_ns = ctx->leader_bank_start_ns;
1324 0 : }
1325 0 : ctx->expect_sequential_leader_slot = ULONG_MAX;
1326 :
1327 0 : memcpy( ctx->reset_hash, reset_blockhash, 32UL );
1328 0 : memcpy( ctx->hash, reset_blockhash, 32UL );
1329 0 : if( FD_LIKELY( parent_block_id!=NULL ) ) {
1330 0 : ctx->parent_slot = completed_bank_slot;
1331 0 : memcpy( ctx->parent_block_id, parent_block_id, 32UL );
1332 0 : }
1333 0 : ctx->slot = completed_bank_slot+1UL;
1334 0 : ctx->hashcnt = 0UL;
1335 0 : ctx->last_slot = ctx->slot;
1336 0 : ctx->last_hashcnt = 0UL;
1337 0 : ctx->reset_slot = ctx->slot;
1338 :
1339 0 : if( FD_UNLIKELY( ctx->hashcnt_per_tick!=hashcnt_per_tick ) ) {
1340 0 : FD_LOG_WARNING(( "hashes per tick changed from %lu to %lu", ctx->hashcnt_per_tick, hashcnt_per_tick ));
1341 :
1342 : /* Recompute derived information about the clock. */
1343 0 : ctx->hashcnt_duration_ns = (double)ctx->tick_duration_ns/(double)hashcnt_per_tick;
1344 0 : ctx->hashcnt_per_slot = ctx->ticks_per_slot*hashcnt_per_tick;
1345 0 : ctx->hashcnt_per_tick = hashcnt_per_tick;
1346 0 : }
1347 :
1348 0 : if( FD_UNLIKELY( ctx->hashcnt_per_tick==1UL ) ) {
1349 : /* Low power producer, maximum of one microblock per tick in the slot */
1350 0 : ctx->max_microblocks_per_slot = ctx->ticks_per_slot;
1351 0 : } else {
1352 : /* See the long comment in after_credit for this limit */
1353 0 : ctx->max_microblocks_per_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ctx->ticks_per_slot*(ctx->hashcnt_per_tick-1UL) );
1354 0 : }
1355 :
1356 : /* When we reset, we need to allow PoH to tick freely again rather
1357 : than being constrained. If we are leader after the reset, this
1358 : is OK because we won't tick until we get a bank, and the lower
1359 : bound will be reset with the value from the bank. */
1360 0 : ctx->microblocks_lower_bound = ctx->max_microblocks_per_slot;
1361 :
1362 0 : if( FD_UNLIKELY( leader_before_reset ) ) {
1363 : /* No longer have a leader bank if we are reset. Replay stage will
1364 : call back again to give us a new one if we should become leader
1365 : for the reset slot.
1366 :
1367 : The order is important here, ctx->hashcnt must be updated before
1368 : calling no_longer_leader. */
1369 0 : no_longer_leader( ctx );
1370 0 : }
1371 0 : ctx->next_leader_slot = next_leader_slot( ctx );
1372 0 : FD_LOG_INFO(( "fd_ext_poh_reset(slot=%lu,next_leader_slot=%lu)", ctx->reset_slot, ctx->next_leader_slot ));
1373 :
1374 0 : if( FD_UNLIKELY( ctx->slot>=ctx->next_leader_slot ) ) {
1375 : /* We are leader after the reset... two cases: */
1376 0 : if( FD_LIKELY( ctx->slot==slot_before_reset ) ) {
1377 : /* 1. We are reset onto the same slot we are already leader on.
1378 : This is a common case when we have two leader slots in a
1379 : row, replay stage will reset us to our own slot. No need to
1380 : do anything here, we already sent a SLOT_START. */
1381 0 : FD_TEST( leader_before_reset );
1382 0 : } else {
1383 : /* 2. We are reset onto a different slot. If we were leader
1384 : before, we should first end that slot, then begin the new
1385 : one if we are newly leader now. */
1386 0 : if( FD_LIKELY( leader_before_reset ) ) publish_plugin_slot_end( ctx, slot_before_reset, ctx->cus_used );
1387 0 : else publish_plugin_slot_start( ctx, ctx->next_leader_slot, ctx->reset_slot );
1388 0 : }
1389 0 : } else {
1390 0 : if( FD_UNLIKELY( leader_before_reset ) ) publish_plugin_slot_end( ctx, slot_before_reset, ctx->cus_used );
1391 0 : }
1392 :
1393 : /* There is a subset of FD_SHRED_FEATURES_ACTIVATION_... slots that
1394 : the shred tile needs to be aware of. Since their computation
1395 : requires the bank, we are forced (so far) to receive them here
1396 : from the Rust side, before forwarding them to the shred tile as
1397 : POH_PKT_TYPE_FEAT_ACT_SLOT. This is not elegant, and it should
1398 : be revised in the future (TODO), but it provides a "temporary"
1399 : working solution to handle features activation. */
1400 0 : if( FD_UNLIKELY( !fd_memeq( ctx->features_activation->slots, features_activation, sizeof(fd_shred_features_activation_t) ) ) ) {
1401 0 : fd_memcpy( ctx->features_activation->slots, features_activation, sizeof(fd_shred_features_activation_t) );
1402 0 : ctx->features_activation_avail = 1UL;
1403 0 : }
1404 :
1405 0 : fd_ext_poh_write_unlock();
1406 0 : }
1407 :
1408 : /* Since it can't easily return an Option<Pubkey>, return 1 for Some and
1409 : 0 for None. */
1410 : CALLED_FROM_RUST int
1411 : fd_ext_poh_get_leader_after_n_slots( ulong n,
1412 0 : uchar out_pubkey[ static 32 ] ) {
1413 0 : fd_pohh_tile_t * ctx = fd_ext_poh_write_lock();
1414 0 : ulong slot = ctx->slot + n;
1415 0 : fd_pubkey_t const * leader = fd_multi_epoch_leaders_get_leader_for_slot( ctx->mleaders, slot );
1416 :
1417 0 : int copied = 0;
1418 0 : if( FD_LIKELY( leader ) ) {
1419 0 : memcpy( out_pubkey, leader, 32UL );
1420 0 : copied = 1;
1421 0 : }
1422 0 : fd_ext_poh_write_unlock();
1423 0 : return copied;
1424 0 : }
1425 :
1426 : FD_FN_CONST static inline ulong
1427 0 : scratch_align( void ) {
1428 0 : return 128UL;
1429 0 : }
1430 :
1431 : FD_FN_PURE static inline ulong
1432 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
1433 0 : (void)tile;
1434 0 : ulong l = FD_LAYOUT_INIT;
1435 0 : l = FD_LAYOUT_APPEND( l, alignof( fd_pohh_tile_t ), sizeof( fd_pohh_tile_t ) );
1436 0 : l = FD_LAYOUT_APPEND( l, FD_SHA256_ALIGN, FD_SHA256_FOOTPRINT );
1437 0 : return FD_LAYOUT_FINI( l, scratch_align() );
1438 0 : }
1439 :
1440 : static void
1441 : publish_tick( fd_pohh_tile_t * ctx,
1442 : fd_stem_context_t * stem,
1443 : uchar hash[ static 32 ],
1444 0 : int is_skipped ) {
1445 0 : ulong hashcnt = ctx->hashcnt_per_tick*(1UL+(ctx->last_hashcnt/ctx->hashcnt_per_tick));
1446 :
1447 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->shred_out->mem, ctx->shred_out->chunk );
1448 :
1449 0 : FD_TEST( ctx->last_slot>=ctx->reset_slot );
1450 0 : fd_entry_batch_meta_t * meta = (fd_entry_batch_meta_t *)dst;
1451 0 : if( FD_UNLIKELY( is_skipped ) ) {
1452 : /* We are publishing ticks for a skipped slot, the reference tick
1453 : and block complete flags should always be zero. */
1454 0 : meta->reference_tick = 0UL;
1455 0 : meta->block_complete = 0;
1456 0 : } else {
1457 0 : meta->reference_tick = hashcnt/ctx->hashcnt_per_tick;
1458 0 : meta->block_complete = hashcnt==ctx->hashcnt_per_slot;
1459 0 : }
1460 :
1461 0 : ulong slot = fd_ulong_if( meta->block_complete, ctx->slot-1UL, ctx->slot );
1462 0 : meta->parent_offset = 1UL+slot-ctx->reset_slot;
1463 :
1464 : /* From poh_reset we received the block_id for ctx->parent_slot.
1465 : Now we're telling shred tile to build on parent: (slot-meta->parent_offset).
1466 : The block_id that we're passing is valid iff the two are the same,
1467 : i.e. ctx->parent_slot == (slot-meta->parent_offset). */
1468 0 : meta->parent_block_id_valid = ctx->parent_slot == (slot-meta->parent_offset);
1469 0 : if( FD_LIKELY( meta->parent_block_id_valid ) ) {
1470 0 : fd_memcpy( meta->parent_block_id, ctx->parent_block_id, 32UL );
1471 0 : }
1472 :
1473 0 : FD_TEST( hashcnt>ctx->last_hashcnt );
1474 0 : ulong hash_delta = hashcnt-ctx->last_hashcnt;
1475 :
1476 0 : dst += sizeof(fd_entry_batch_meta_t);
1477 0 : fd_entry_batch_header_t * tick = (fd_entry_batch_header_t *)dst;
1478 0 : tick->hashcnt_delta = hash_delta;
1479 0 : fd_memcpy( tick->hash, hash, 32UL );
1480 0 : tick->txn_cnt = 0UL;
1481 :
1482 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
1483 0 : ulong sz = sizeof(fd_entry_batch_meta_t)+sizeof(fd_entry_batch_header_t);
1484 0 : ulong sig = fd_disco_poh_sig( slot, POH_PKT_TYPE_MICROBLOCK, 0UL );
1485 0 : fd_stem_publish( stem, ctx->shred_out->idx, sig, ctx->shred_out->chunk, sz, 0UL, 0UL, tspub );
1486 0 : ctx->shred_seq = stem->seqs[ ctx->shred_out->idx ];
1487 0 : ctx->shred_out->chunk = fd_dcache_compact_next( ctx->shred_out->chunk, sz, ctx->shred_out->chunk0, ctx->shred_out->wmark );
1488 :
1489 0 : if( FD_UNLIKELY( hashcnt==ctx->hashcnt_per_slot ) ) {
1490 0 : ctx->last_slot++;
1491 0 : ctx->last_hashcnt = 0UL;
1492 0 : } else {
1493 0 : ctx->last_hashcnt = hashcnt;
1494 0 : }
1495 0 : }
1496 :
1497 : static inline void
1498 : publish_features_activation( fd_pohh_tile_t * ctx,
1499 0 : fd_stem_context_t * stem ) {
1500 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->shred_out->mem, ctx->shred_out->chunk );
1501 0 : fd_shred_features_activation_t * act_data = (fd_shred_features_activation_t *)dst;
1502 0 : fd_memcpy( act_data, ctx->features_activation, sizeof(fd_shred_features_activation_t) );
1503 :
1504 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
1505 0 : ulong sz = sizeof(fd_shred_features_activation_t);
1506 0 : ulong sig = fd_disco_poh_sig( ctx->slot, POH_PKT_TYPE_FEAT_ACT_SLOT, 0UL );
1507 0 : fd_stem_publish( stem, ctx->shred_out->idx, sig, ctx->shred_out->chunk, sz, 0UL, 0UL, tspub );
1508 0 : ctx->shred_seq = stem->seqs[ ctx->shred_out->idx ];
1509 0 : ctx->shred_out->chunk = fd_dcache_compact_next( ctx->shred_out->chunk, sz, ctx->shred_out->chunk0, ctx->shred_out->wmark );
1510 0 : }
1511 :
1512 : static inline void
1513 : after_credit( fd_pohh_tile_t * ctx,
1514 : fd_stem_context_t * stem,
1515 : int * opt_poll_in,
1516 0 : int * charge_busy ) {
1517 0 : ctx->stem = stem;
1518 :
1519 0 : FD_COMPILER_MFENCE();
1520 0 : if( FD_UNLIKELY( fd_poh_waiting_lock ) ) {
1521 0 : FD_VOLATILE( fd_poh_returned_lock ) = 1UL;
1522 0 : FD_COMPILER_MFENCE();
1523 0 : for(;;) {
1524 0 : if( FD_UNLIKELY( !FD_VOLATILE_CONST( fd_poh_returned_lock ) ) ) break;
1525 0 : FD_SPIN_PAUSE();
1526 0 : }
1527 0 : FD_COMPILER_MFENCE();
1528 0 : FD_VOLATILE( fd_poh_waiting_lock ) = 0UL;
1529 0 : *opt_poll_in = 0;
1530 0 : *charge_busy = 1;
1531 0 : return;
1532 0 : }
1533 0 : FD_COMPILER_MFENCE();
1534 :
1535 0 : if( FD_UNLIKELY( ctx->features_activation_avail ) ) {
1536 : /* If we have received an update on features_activation, then
1537 : forward them to the shred tile. In principle, this should
1538 : happen at most once per slot. */
1539 0 : publish_features_activation( ctx, stem );
1540 0 : ctx->features_activation_avail = 0UL;
1541 0 : }
1542 :
1543 0 : int is_leader = ctx->next_leader_slot!=ULONG_MAX && ctx->slot>=ctx->next_leader_slot;
1544 0 : if( FD_UNLIKELY( is_leader && !ctx->current_leader_bank ) ) {
1545 : /* If we are the leader, but we didn't yet learn what the leader
1546 : bank object is from the replay stage, do not do any hashing.
1547 :
1548 : This is not ideal, but greatly simplifies the control flow. */
1549 0 : return;
1550 0 : }
1551 :
1552 : /* If we have skipped ticks pending because we skipped some slots to
1553 : become leader, register them now one at a time. */
1554 0 : if( FD_UNLIKELY( is_leader && ctx->last_slot<ctx->slot ) ) {
1555 0 : ulong publish_hashcnt = ctx->last_hashcnt+ctx->hashcnt_per_tick;
1556 0 : ulong tick_idx = (ctx->last_slot*ctx->ticks_per_slot+publish_hashcnt/ctx->hashcnt_per_tick)%MAX_SKIPPED_TICKS;
1557 :
1558 0 : fd_ext_poh_register_tick( ctx->current_leader_bank, ctx->skipped_tick_hashes[ tick_idx ] );
1559 0 : publish_tick( ctx, stem, ctx->skipped_tick_hashes[ tick_idx ], 1 );
1560 :
1561 : /* If we are catching up now and publishing a bunch of skipped
1562 : ticks, we do not want to process any incoming microblocks until
1563 : all the skipped ticks have been published out; otherwise we would
1564 : intersperse skipped tick messages with microblocks. */
1565 0 : *opt_poll_in = 0;
1566 0 : *charge_busy = 1;
1567 0 : return;
1568 0 : }
1569 :
1570 0 : int low_power_mode = ctx->hashcnt_per_tick==1UL;
1571 :
1572 : /* If we are the leader, always leave enough capacity in the slot so
1573 : that we can mixin any potential microblocks still coming from the
1574 : pack tile for this slot. */
1575 0 : ulong max_remaining_microblocks = ctx->max_microblocks_per_slot - ctx->microblocks_lower_bound;
1576 :
1577 : /* With hashcnt_per_tick hashes per tick, we actually get
1578 : hashcnt_per_tick-1 chances to mixin a microblock. For each tick
1579 : span that we need to reserve, we also need to reserve the hashcnt
1580 : for the tick, hence the +
1581 : max_remaining_microblocks/(hashcnt_per_tick-1) rounded up.
1582 :
1583 : However, if hashcnt_per_tick is 1 because we're in low power mode,
1584 : this should probably just be max_remaining_microblocks. */
1585 0 : ulong max_remaining_ticks_or_microblocks = max_remaining_microblocks;
1586 0 : if( FD_LIKELY( !low_power_mode ) ) max_remaining_ticks_or_microblocks += (max_remaining_microblocks+ctx->hashcnt_per_tick-2UL)/(ctx->hashcnt_per_tick-1UL);
1587 :
1588 0 : ulong restricted_hashcnt = fd_ulong_if( ctx->hashcnt_per_slot>=max_remaining_ticks_or_microblocks, ctx->hashcnt_per_slot-max_remaining_ticks_or_microblocks, 0UL );
1589 :
1590 0 : ulong min_hashcnt = ctx->hashcnt;
1591 :
1592 0 : if( FD_LIKELY( !low_power_mode ) ) {
1593 : /* Recall that there are two kinds of events that will get published
1594 : to the shredder,
1595 :
1596 : (a) Ticks. These occur every 62,500 (hashcnt_per_tick) hashcnts,
1597 : and there will be 64 (ticks_per_slot) of them in each slot.
1598 :
1599 : Ticks must not have any transactions mixed into the hash.
1600 : This is not strictly needed in theory, but is required by the
1601 : current consensus protocol. They get published here in
1602 : after_credit.
1603 :
1604 : (b) Microblocks. These can occur at any other hashcnt, as long
1605 : as it is not a tick. Microblocks cannot be empty, and must
1606 : have at least one transactions mixed in. These get
1607 : published in after_frag.
1608 :
1609 : If hashcnt_per_tick is 1, then we are in low power mode and the
1610 : following does not apply, since we can mix in transactions at any
1611 : time.
1612 :
1613 : In the normal, non-low-power mode, though, we have to be careful
1614 : to make sure that we do not publish microblocks on tick
1615 : boundaries. To do that, we need to obey two rules:
1616 : (i) after_credit must not leave hashcnt one before a tick
1617 : boundary
1618 : (ii) if after_credit begins one before a tick boundary, it must
1619 : advance hashcnt and publish the tick
1620 :
1621 : There's some interplay between min_hashcnt and restricted_hashcnt
1622 : here, and we need to show that there's always a value of
1623 : target_hashcnt we can pick such that
1624 : min_hashcnt <= target_hashcnt <= restricted_hashcnt.
1625 : We'll prove this by induction for current_slot==0 and
1626 : is_leader==true, since all other slots should be the same.
1627 :
1628 : Let m_j and r_j be the min_hashcnt and restricted_hashcnt
1629 : (respectively) for the jth call to after_credit in a slot. We
1630 : want to show that for all values of j, it's possible to pick a
1631 : value h_j, the value of target_hashcnt for the jth call to
1632 : after_credit (which is also the value of hashcnt after
1633 : after_credit has completed) such that m_j<=h_j<=r_j.
1634 :
1635 : Additionally, let T be hashcnt_per_tick and N be ticks_per_slot.
1636 :
1637 : Starting with the base case, j==0. m_j=0, and
1638 : r_0 = N*T - max_microblocks_per_slot
1639 : - ceil(max_microblocks_per_slot/(T-1)).
1640 :
1641 : This is monotonic decreasing in max_microblocks_per_slot, so it
1642 : achieves its minimum when max_microblocks_per_slot is its
1643 : maximum.
1644 : r_0 >= N*T - N*(T-1) - ceil( (N*(T-1))/(T-1))
1645 : = N*T - N*(T-1)-N = 0.
1646 : Thus, m_0 <= r_0, as desired.
1647 :
1648 :
1649 :
1650 : Then, for the inductive step, assume there exists h_j such that
1651 : m_j<=h_j<=r_j, and we want to show that there exists h_{j+1},
1652 : which is the same as showing m_{j+1}<=r_{j+1}.
1653 :
1654 : Let a_j be 1 if we had a microblock immediately following the jth
1655 : call to after_credit, and 0 otherwise. Then hashcnt at the start
1656 : of the (j+1)th call to after_frag is h_j+a_j.
1657 : Also, set b_{j+1}=1 if we are in the case covered by rule (ii)
1658 : above during the (j+1)th call to after_credit, i.e. if
1659 : (h_j+a_j)%T==T-1. Thus, m_{j+1} = h_j + a_j + b_{j+1}.
1660 :
1661 : If we received an additional microblock, then
1662 : max_remaining_microblocks goes down by 1, and
1663 : max_remaining_ticks_or_microblocks goes down by either 1 or 2,
1664 : which means restricted_hashcnt goes up by either 1 or 2. In
1665 : particular, it goes up by 2 if the new value of
1666 : max_remaining_microblocks (at the start of the (j+1)th call to
1667 : after_credit) is congruent to 0 mod T-1. Let b'_{j+1} be 1 if
1668 : this condition is met and 0 otherwise. If we receive a
1669 : done_packing message, restricted_hashcnt can go up by more, but
1670 : we can ignore that case, since it is less restrictive.
1671 : Thus, r_{j+1}=r_j+a_j+b'_{j+1}.
1672 :
1673 : If h_j < r_j (strictly less), then h_j+a_j < r_j+a_j. And thus,
1674 : since b_{j+1}<=b'_{j+1}+1, just by virtue of them both being
1675 : binary,
1676 : h_j + a_j + b_{j+1} < r_j + a_j + b'_{j+1} + 1,
1677 : which is the same (for integers) as
1678 : h_j + a_j + b_{j+1} <= r_j + a_j + b'_{j+1},
1679 : m_{j+1} <= r_{j+1}
1680 :
1681 : On the other hand, if h_j==r_j, this is easy unless b_{j+1}==1,
1682 : which can also only happen if a_j==1. Then (h_j+a_j)%T==T-1,
1683 : which means there's an integer k such that
1684 :
1685 : h_j+a_j==(ticks_per_slot-k)*T-1
1686 : h_j ==ticks_per_slot*T - k*(T-1)-1 - k-1
1687 : ==ticks_per_slot*T - (k*(T-1)+1) - ceil( (k*(T-1)+1)/(T-1) )
1688 :
1689 : Since h_j==r_j in this case, and
1690 : r_j==(ticks_per_slot*T) - max_remaining_microblocks_j - ceil(max_remaining_microblocks_j/(T-1)),
1691 : we can see that the value of max_remaining_microblocks at the
1692 : start of the jth call to after_credit is k*(T-1)+1. Again, since
1693 : a_j==1, then the value of max_remaining_microblocks at the start
1694 : of the j+1th call to after_credit decreases by 1 to k*(T-1),
1695 : which means b'_{j+1}=1.
1696 :
1697 : Thus, h_j + a_j + b_{j+1} == r_j + a_j + b'_{j+1}, so, in
1698 : particular, h_{j+1}<=r_{j+1} as desired. */
1699 0 : min_hashcnt += (ulong)(min_hashcnt%ctx->hashcnt_per_tick == (ctx->hashcnt_per_tick-1UL)); /* add b_{j+1}, enforcing rule (ii) */
1700 0 : }
1701 : /* Now figure out how many hashes are needed to "catch up" the hash
1702 : count to the current system clock, and clamp it to the allowed
1703 : range. */
1704 0 : long now = fd_log_wallclock();
1705 0 : ulong target_hashcnt;
1706 0 : if( FD_LIKELY( !is_leader ) ) {
1707 0 : target_hashcnt = (ulong)((double)(now - ctx->reset_slot_start_ns) / ctx->hashcnt_duration_ns) - (ctx->slot-ctx->reset_slot)*ctx->hashcnt_per_slot;
1708 0 : } else {
1709 : /* We might have gotten very behind on hashes, but if we are leader
1710 : we want to catch up gradually over the remainder of our leader
1711 : slot, not all at once right now. This helps keep the tile from
1712 : being oversubscribed and taking a long time to process incoming
1713 : microblocks. */
1714 0 : long expected_slot_start_ns = ctx->reset_slot_start_ns + (long)((double)(ctx->slot-ctx->reset_slot)*ctx->slot_duration_ns);
1715 0 : double actual_slot_duration_ns = ctx->slot_duration_ns<(double)(ctx->leader_bank_start_ns - expected_slot_start_ns) ? 0.0 : ctx->slot_duration_ns - (double)(ctx->leader_bank_start_ns - expected_slot_start_ns);
1716 0 : double actual_hashcnt_duration_ns = actual_slot_duration_ns / (double)ctx->hashcnt_per_slot;
1717 0 : target_hashcnt = actual_hashcnt_duration_ns==0.0 ? restricted_hashcnt : (ulong)((double)(now - ctx->leader_bank_start_ns) / actual_hashcnt_duration_ns);
1718 0 : }
1719 : /* Clamp to [min_hashcnt, restricted_hashcnt] as above */
1720 0 : target_hashcnt = fd_ulong_max( fd_ulong_min( target_hashcnt, restricted_hashcnt ), min_hashcnt );
1721 :
1722 : /* The above proof showed that it was always possible to pick a value
1723 : of target_hashcnt, but we still have a lot of freedom in how to
1724 : pick it. It simplifies the code a lot if we don't keep going after
1725 : a tick in this function. In particular, we want to publish at most
1726 : 1 tick in this call, since otherwise we could consume infinite
1727 : credits to publish here. The credits are set so that we should
1728 : only ever publish one tick during this loop. Also, all the extra
1729 : stuff (leader transitions, publishing ticks, etc.) we have to do
1730 : happens at tick boundaries, so this lets us consolidate all those
1731 : cases.
1732 :
1733 : Mathematically, since the current value of hashcnt is h_j+a_j, the
1734 : next tick (advancing a full tick if we're currently at a tick) is
1735 : t_{j+1} = T*(floor( (h_j+a_j)/T )+1). We need to show that if we set
1736 : h'_{j+1} = min( h_{j+1}, t_{j+1} ), it is still valid.
1737 :
1738 : First, h'_{j+1} <= h_{j+1} <= r_{j+1}, so we're okay in that
1739 : direction.
1740 :
1741 : Next, observe that t_{j+1}>=h_j + a_j + 1, and recall that b_{j+1}
1742 : is 0 or 1. So then,
1743 : t_{j+1} >= h_j+a_j+b_{j+1} = m_{j+1}.
1744 :
1745 : We know h_{j+1) >= m_{j+1} from before, so then h'_{j+1} >=
1746 : m_{j+1}, as desired. */
1747 :
1748 0 : ulong next_tick_hashcnt = ctx->hashcnt_per_tick * (1UL+(ctx->hashcnt/ctx->hashcnt_per_tick));
1749 0 : target_hashcnt = fd_ulong_min( target_hashcnt, next_tick_hashcnt );
1750 :
1751 : /* We still need to enforce rule (i). We know that min_hashcnt%T !=
1752 : T-1 because of rule (ii). That means that if target_hashcnt%T ==
1753 : T-1 at this point, target_hashcnt > min_hashcnt (notice the
1754 : strict), so target_hashcnt-1 >= min_hashcnt and is thus still a
1755 : valid choice for target_hashcnt. */
1756 0 : target_hashcnt -= (ulong)( (!low_power_mode) & ((target_hashcnt%ctx->hashcnt_per_tick)==(ctx->hashcnt_per_tick-1UL)) );
1757 :
1758 0 : FD_TEST( target_hashcnt >= ctx->hashcnt );
1759 0 : FD_TEST( target_hashcnt >= min_hashcnt );
1760 0 : FD_TEST( target_hashcnt <= restricted_hashcnt );
1761 :
1762 0 : if( FD_UNLIKELY( ctx->hashcnt==target_hashcnt ) ) return; /* Nothing to do, don't publish a tick twice */
1763 :
1764 0 : *charge_busy = 1;
1765 :
1766 0 : if( FD_LIKELY( ctx->hashcnt<target_hashcnt ) ) {
1767 0 : fd_sha256_hash_32_repeated( ctx->hash, ctx->hash, target_hashcnt-ctx->hashcnt );
1768 0 : ctx->hashcnt = target_hashcnt;
1769 0 : }
1770 :
1771 0 : if( FD_UNLIKELY( ctx->hashcnt==ctx->hashcnt_per_slot ) ) {
1772 0 : ctx->slot++;
1773 0 : ctx->hashcnt = 0UL;
1774 0 : }
1775 :
1776 0 : if( FD_UNLIKELY( !is_leader && !(ctx->hashcnt%ctx->hashcnt_per_tick ) ) ) {
1777 : /* We finished a tick while not leader... save the current hash so
1778 : it can be played back into the bank when we become the leader. */
1779 0 : ulong tick_idx = (ctx->slot*ctx->ticks_per_slot+ctx->hashcnt/ctx->hashcnt_per_tick)%MAX_SKIPPED_TICKS;
1780 0 : fd_memcpy( ctx->skipped_tick_hashes[ tick_idx ], ctx->hash, 32UL );
1781 :
1782 0 : ulong initial_tick_idx = (ctx->last_slot*ctx->ticks_per_slot+ctx->last_hashcnt/ctx->hashcnt_per_tick)%MAX_SKIPPED_TICKS;
1783 0 : if( FD_UNLIKELY( tick_idx==initial_tick_idx ) ) FD_LOG_ERR(( "Too many skipped ticks from slot %lu to slot %lu, chain must halt", ctx->last_slot, ctx->slot ));
1784 0 : }
1785 :
1786 0 : if( FD_UNLIKELY( is_leader && !(ctx->hashcnt%ctx->hashcnt_per_tick) ) ) {
1787 : /* We ticked while leader... tell the leader bank. */
1788 0 : fd_ext_poh_register_tick( ctx->current_leader_bank, ctx->hash );
1789 :
1790 : /* And send an empty microblock (a tick) to the shred tile. */
1791 0 : publish_tick( ctx, stem, ctx->hash, 0 );
1792 0 : }
1793 :
1794 0 : if( FD_UNLIKELY( !is_leader && ctx->slot>=ctx->next_leader_slot ) ) {
1795 : /* We ticked while not leader and are now leader... transition
1796 : the state machine. */
1797 0 : publish_plugin_slot_start( ctx, ctx->next_leader_slot, ctx->reset_slot );
1798 0 : FD_LOG_INFO(( "fd_poh_ticked_into_leader(slot=%lu, reset_slot=%lu)", ctx->next_leader_slot, ctx->reset_slot ));
1799 0 : }
1800 :
1801 0 : if( FD_UNLIKELY( is_leader && ctx->slot>ctx->next_leader_slot ) ) {
1802 : /* We ticked while leader and are no longer leader... transition
1803 : the state machine. */
1804 0 : FD_TEST( !max_remaining_microblocks );
1805 0 : publish_plugin_slot_end( ctx, ctx->next_leader_slot, ctx->cus_used );
1806 0 : FD_LOG_INFO(( "fd_poh_ticked_outof_leader(slot=%lu)", ctx->next_leader_slot ));
1807 :
1808 0 : no_longer_leader( ctx );
1809 0 : ctx->expect_sequential_leader_slot = ctx->slot;
1810 :
1811 0 : double tick_per_ns = fd_tempo_tick_per_ns( NULL );
1812 0 : fd_histf_sample( ctx->slot_done_delay, (ulong)((double)(fd_log_wallclock()-ctx->reset_slot_start_ns)*tick_per_ns) );
1813 0 : ctx->next_leader_slot = next_leader_slot( ctx );
1814 :
1815 0 : if( FD_UNLIKELY( ctx->slot>=ctx->next_leader_slot ) ) {
1816 : /* We finished a leader slot, and are immediately leader for the
1817 : following slot... transition. */
1818 0 : publish_plugin_slot_start( ctx, ctx->next_leader_slot, ctx->next_leader_slot-1UL );
1819 0 : FD_LOG_INFO(( "fd_poh_ticked_into_leader(slot=%lu, reset_slot=%lu)", ctx->next_leader_slot, ctx->next_leader_slot-1UL ));
1820 0 : }
1821 0 : }
1822 0 : }
1823 :
1824 : static inline void
1825 0 : during_housekeeping( fd_pohh_tile_t * ctx ) {
1826 0 : if( FD_UNLIKELY( maybe_change_identity( ctx, 0 ) ) ) {
1827 0 : ctx->next_leader_slot = next_leader_slot( ctx );
1828 0 : FD_LOG_INFO(( "fd_poh_identity_changed(next_leader_slot=%lu)", ctx->next_leader_slot ));
1829 :
1830 : /* Signal replay to check if we are leader again, in-case it's stuck
1831 : because everything already replayed. */
1832 0 : FD_COMPILER_MFENCE();
1833 0 : fd_ext_poh_signal_leader_change( ctx->signal_leader_change );
1834 0 : }
1835 0 : }
1836 :
1837 : static inline void
1838 0 : metrics_write( fd_pohh_tile_t * ctx ) {
1839 0 : FD_MHIST_COPY( POHH, BEGIN_LEADER_DELAY_SECONDS, ctx->begin_leader_delay );
1840 0 : FD_MHIST_COPY( POHH, FIRST_MICROBLOCK_DELAY_SECONDS, ctx->first_microblock_delay );
1841 0 : FD_MHIST_COPY( POHH, SLOT_DONE_DELAY_SECONDS, ctx->slot_done_delay );
1842 0 : FD_MHIST_COPY( POHH, BUNDLE_INITIALIZE_DELAY_SECONDS, ctx->bundle_init_delay );
1843 0 : }
1844 :
1845 : static int
1846 : before_frag( fd_pohh_tile_t * ctx,
1847 : ulong in_idx,
1848 : ulong seq,
1849 0 : ulong sig ) {
1850 0 : (void)seq;
1851 :
1852 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]!=IN_KIND_BANK && ctx->in_kind[ in_idx ]!=IN_KIND_PACK ) ) return 0;
1853 :
1854 0 : if( FD_UNLIKELY( sig==ULONG_MAX ) ) {
1855 : /* Banks are drained, release pack's owenership of the current bank */
1856 0 : if( FD_UNLIKELY( ctx->pack_leader_bank ) ) fd_ext_bank_release( ctx->pack_leader_bank );
1857 0 : ctx->pack_leader_bank = NULL;
1858 0 : return 1; /* discard */
1859 0 : }
1860 :
1861 0 : uint pack_idx = (uint)fd_disco_execle_sig_pack_idx( sig );
1862 0 : FD_TEST( ((int)(pack_idx-ctx->expect_pack_idx))>=0L );
1863 0 : if( FD_UNLIKELY( pack_idx!=ctx->expect_pack_idx ) ) return -1;
1864 0 : ctx->expect_pack_idx++;
1865 :
1866 0 : return 0;
1867 0 : }
1868 :
1869 : static inline void
1870 : during_frag( fd_pohh_tile_t * ctx,
1871 : ulong in_idx,
1872 : ulong seq FD_PARAM_UNUSED,
1873 : ulong sig,
1874 : ulong chunk,
1875 : ulong sz,
1876 0 : ulong ctl FD_PARAM_UNUSED ) {
1877 0 : ctx->skip_frag = 0;
1878 :
1879 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
1880 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
1881 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
1882 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
1883 :
1884 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
1885 0 : fd_multi_epoch_leaders_stake_msg_init( ctx->mleaders, fd_type_pun_const( dcache_entry ) );
1886 0 : return;
1887 0 : }
1888 :
1889 0 : ulong slot;
1890 0 : switch( ctx->in_kind[ in_idx ] ) {
1891 0 : case IN_KIND_BANK:
1892 0 : case IN_KIND_PACK: {
1893 0 : slot = fd_disco_execle_sig_slot( sig );
1894 0 : break;
1895 0 : }
1896 0 : default:
1897 0 : FD_LOG_ERR(( "unexpected in_kind %d", ctx->in_kind[ in_idx ] ));
1898 0 : }
1899 :
1900 : /* The following sequence is possible...
1901 :
1902 : 1. We become leader in slot 10
1903 : 2. While leader, we switch to a fork that is on slot 8, where
1904 : we are leader
1905 : 3. We get the in-flight microblocks for slot 10
1906 :
1907 : These in-flight microblocks need to be dropped, so we check
1908 : against the high water mark (highwater_leader_slot) rather than
1909 : the current hashcnt here when determining what to drop.
1910 :
1911 : We know if the slot is lower than the high water mark it's from a stale
1912 : leader slot, because we will not become leader for the same slot twice
1913 : even if we are reset back in time (to prevent duplicate blocks). */
1914 0 : int is_frag_for_prior_leader_slot = slot<ctx->highwater_leader_slot;
1915 :
1916 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_PACK ) ) {
1917 : /* We now know the real amount of microblocks published, so set an
1918 : exact bound for once we receive them. */
1919 0 : ctx->skip_frag = 1;
1920 0 : if( FD_UNLIKELY( is_frag_for_prior_leader_slot ) ) return;
1921 0 : fd_done_packing_t const * done_packing = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
1922 :
1923 0 : FD_TEST( ctx->microblocks_lower_bound<=ctx->max_microblocks_per_slot );
1924 0 : FD_TEST( done_packing->microblocks_in_slot<=ctx->max_microblocks_per_slot-1UL );
1925 0 : FD_LOG_INFO(( "done_packing(slot=%lu,seen_microblocks=%lu,microblocks_in_slot=%lu)",
1926 0 : ctx->slot,
1927 0 : ctx->microblocks_lower_bound,
1928 0 : done_packing->microblocks_in_slot ));
1929 :
1930 0 : ctx->microblocks_lower_bound += 1UL /* done_packing as a phantom "microblock"*/
1931 0 : + (ctx->max_microblocks_per_slot-1UL) /* the canonical microblock limit */
1932 0 : - done_packing->microblocks_in_slot /* the actual microblock count */;
1933 0 : return;
1934 0 : } else {
1935 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>USHORT_MAX ) )
1936 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
1937 :
1938 0 : uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
1939 :
1940 0 : fd_memcpy( ctx->_txns, src, sz-sizeof(fd_microblock_trailer_t) );
1941 0 : fd_memcpy( ctx->_microblock_trailer, src+sz-sizeof(fd_microblock_trailer_t), sizeof(fd_microblock_trailer_t) );
1942 :
1943 0 : ctx->skip_frag = is_frag_for_prior_leader_slot;
1944 0 : }
1945 0 : }
1946 :
1947 : static void
1948 : publish_microblock( fd_pohh_tile_t * ctx,
1949 : fd_stem_context_t * stem,
1950 : ulong slot,
1951 : ulong hashcnt_delta,
1952 0 : ulong txn_cnt ) {
1953 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->shred_out->mem, ctx->shred_out->chunk );
1954 0 : FD_TEST( slot>=ctx->reset_slot );
1955 0 : fd_entry_batch_meta_t * meta = (fd_entry_batch_meta_t *)dst;
1956 0 : meta->parent_offset = 1UL+slot-ctx->reset_slot;
1957 0 : meta->reference_tick = (ctx->hashcnt/ctx->hashcnt_per_tick) % ctx->ticks_per_slot;
1958 0 : meta->block_complete = !ctx->hashcnt;
1959 :
1960 : /* Refer to publish_tick() for details on meta->parent_block_id_valid. */
1961 0 : meta->parent_block_id_valid = ctx->parent_slot == (slot-meta->parent_offset);
1962 0 : if( FD_LIKELY( meta->parent_block_id_valid ) ) {
1963 0 : fd_memcpy( meta->parent_block_id, ctx->parent_block_id, 32UL );
1964 0 : }
1965 :
1966 0 : dst += sizeof(fd_entry_batch_meta_t);
1967 0 : fd_entry_batch_header_t * header = (fd_entry_batch_header_t *)dst;
1968 0 : header->hashcnt_delta = hashcnt_delta;
1969 0 : fd_memcpy( header->hash, ctx->hash, 32UL );
1970 :
1971 0 : dst += sizeof(fd_entry_batch_header_t);
1972 0 : ulong payload_sz = 0UL;
1973 0 : ulong included_txn_cnt = 0UL;
1974 0 : for( ulong i=0UL; i<txn_cnt; i++ ) {
1975 0 : fd_txn_p_t * txn = (fd_txn_p_t *)(ctx->_txns + i*sizeof(fd_txn_p_t));
1976 0 : if( FD_UNLIKELY( !(txn->flags & FD_TXN_P_FLAGS_EXECUTE_SUCCESS) ) ) continue;
1977 :
1978 0 : fd_memcpy( dst, txn->payload, txn->payload_sz );
1979 0 : payload_sz += txn->payload_sz;
1980 0 : dst += txn->payload_sz;
1981 0 : included_txn_cnt++;
1982 0 : }
1983 0 : header->txn_cnt = included_txn_cnt;
1984 :
1985 : /* We always have credits to publish here, because we have a burst
1986 : value of 3 credits, and at most we will publish_tick() once and
1987 : then publish_became_leader() once, leaving one credit here to
1988 : publish the microblock. */
1989 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
1990 0 : ulong sz = sizeof(fd_entry_batch_meta_t)+sizeof(fd_entry_batch_header_t)+payload_sz;
1991 0 : ulong new_sig = fd_disco_poh_sig( slot, POH_PKT_TYPE_MICROBLOCK, 0UL );
1992 0 : fd_stem_publish( stem, ctx->shred_out->idx, new_sig, ctx->shred_out->chunk, sz, 0UL, 0UL, tspub );
1993 0 : ctx->shred_seq = stem->seqs[ ctx->shred_out->idx ];
1994 0 : ctx->shred_out->chunk = fd_dcache_compact_next( ctx->shred_out->chunk, sz, ctx->shred_out->chunk0, ctx->shred_out->wmark );
1995 0 : }
1996 :
1997 : static inline void
1998 : after_frag( fd_pohh_tile_t * ctx,
1999 : ulong in_idx,
2000 : ulong seq,
2001 : ulong sig,
2002 : ulong sz,
2003 : ulong tsorig,
2004 : ulong tspub,
2005 0 : fd_stem_context_t * stem ) {
2006 0 : (void)in_idx;
2007 0 : (void)seq;
2008 0 : (void)tsorig;
2009 0 : (void)tspub;
2010 :
2011 0 : if( FD_UNLIKELY( ctx->skip_frag ) ) return;
2012 :
2013 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
2014 0 : fd_multi_epoch_leaders_stake_msg_fini( ctx->mleaders );
2015 : /* It might seem like we do not need to do state transitions in and
2016 : out of being the leader here, since leader schedule updates are
2017 : always one epoch in advance (whether we are leader or not would
2018 : never change for the currently executing slot) but this is not
2019 : true for new ledgers when the validator first boots. We will
2020 : likely be the leader in slot 1, and get notified of the leader
2021 : schedule for that slot while we are still in it.
2022 :
2023 : For safety we just handle both transitions, in and out, although
2024 : the only one possible should be into leader. */
2025 0 : ulong next_leader_slot_after_frag = next_leader_slot( ctx );
2026 :
2027 0 : int currently_leader = ctx->slot>=ctx->next_leader_slot;
2028 0 : int leader_after_frag = ctx->slot>=next_leader_slot_after_frag;
2029 :
2030 0 : FD_LOG_INFO(( "stake_update(before_leader=%lu,after_leader=%lu)",
2031 0 : ctx->next_leader_slot,
2032 0 : next_leader_slot_after_frag ));
2033 :
2034 0 : ctx->next_leader_slot = next_leader_slot_after_frag;
2035 0 : if( FD_UNLIKELY( currently_leader && !leader_after_frag ) ) {
2036 : /* Shouldn't ever happen, otherwise we need to do a state
2037 : transition out of being leader. */
2038 0 : FD_LOG_ERR(( "stake update caused us to no longer be leader in an active slot" ));
2039 0 : }
2040 :
2041 : /* Nothing to do if we transition into being leader, since it
2042 : will just get picked up by the regular tick loop. */
2043 0 : if( FD_UNLIKELY( !currently_leader && leader_after_frag ) ) {
2044 0 : publish_plugin_slot_start( ctx, next_leader_slot_after_frag, ctx->reset_slot );
2045 0 : }
2046 :
2047 0 : return;
2048 0 : }
2049 :
2050 0 : if( FD_UNLIKELY( !ctx->microblocks_lower_bound ) ) {
2051 0 : double tick_per_ns = fd_tempo_tick_per_ns( NULL );
2052 0 : fd_histf_sample( ctx->first_microblock_delay, (ulong)((double)(fd_log_wallclock()-ctx->reset_slot_start_ns)/tick_per_ns) );
2053 0 : }
2054 :
2055 0 : ulong target_slot = fd_disco_execle_sig_slot( sig );
2056 :
2057 0 : if( FD_UNLIKELY( target_slot!=ctx->next_leader_slot || target_slot!=ctx->slot ) ) {
2058 0 : FD_LOG_ERR(( "packed too early or late target_slot=%lu, current_slot=%lu. highwater_leader_slot=%lu",
2059 0 : target_slot, ctx->slot, ctx->highwater_leader_slot ));
2060 0 : }
2061 :
2062 0 : FD_TEST( ctx->current_leader_bank );
2063 0 : FD_TEST( ctx->microblocks_lower_bound<ctx->max_microblocks_per_slot );
2064 0 : ctx->microblocks_lower_bound += 1UL;
2065 :
2066 0 : ulong txn_cnt = (sz-sizeof(fd_microblock_trailer_t))/sizeof(fd_txn_p_t);
2067 0 : fd_txn_p_t * txns = (fd_txn_p_t *)(ctx->_txns);
2068 0 : ulong executed_txn_cnt = 0UL;
2069 0 : ulong cus_used = 0UL;
2070 0 : for( ulong i=0UL; i<txn_cnt; i++ ) {
2071 : /* It's important that we check if a transaction is included in the
2072 : block with FD_TXN_P_FLAGS_EXECUTE_SUCCESS since
2073 : actual_consumed_cus may have a nonzero value for excluded
2074 : transactions used for monitoring purposes */
2075 0 : if( FD_LIKELY( txns[ i ].flags & FD_TXN_P_FLAGS_EXECUTE_SUCCESS ) ) {
2076 0 : executed_txn_cnt++;
2077 0 : cus_used += txns[ i ].execle_cu.actual_consumed_cus;
2078 0 : }
2079 0 : }
2080 :
2081 : /* We don't publish transactions that fail to execute. If all the
2082 : transactions failed to execute, the microblock would be empty,
2083 : causing agave to think it's a tick and complain. Instead, we just
2084 : skip the microblock and don't hash or update the hashcnt. */
2085 0 : if( FD_UNLIKELY( !executed_txn_cnt ) ) return;
2086 :
2087 0 : uchar data[ 64 ];
2088 0 : fd_memcpy( data, ctx->hash, 32UL );
2089 0 : fd_memcpy( data+32UL, ctx->_microblock_trailer->hash, 32UL );
2090 0 : fd_sha256_hash( data, 64UL, ctx->hash );
2091 :
2092 0 : ctx->hashcnt++;
2093 0 : FD_TEST( ctx->hashcnt>ctx->last_hashcnt );
2094 0 : ulong hashcnt_delta = ctx->hashcnt - ctx->last_hashcnt;
2095 :
2096 : /* The hashing loop above will never leave us exactly one away from
2097 : crossing a tick boundary, so this increment will never cause the
2098 : current tick (or the slot) to change, except in low power mode
2099 : for development, in which case we do need to register the tick
2100 : with the leader bank. We don't need to publish the tick since
2101 : sending the microblock below is the publishing action. */
2102 0 : if( FD_UNLIKELY( !(ctx->hashcnt%ctx->hashcnt_per_slot ) ) ) {
2103 0 : ctx->slot++;
2104 0 : ctx->hashcnt = 0UL;
2105 0 : }
2106 :
2107 0 : ctx->last_slot = ctx->slot;
2108 0 : ctx->last_hashcnt = ctx->hashcnt;
2109 :
2110 0 : ctx->cus_used += cus_used;
2111 :
2112 0 : if( FD_UNLIKELY( !(ctx->hashcnt%ctx->hashcnt_per_tick ) ) ) {
2113 0 : fd_ext_poh_register_tick( ctx->current_leader_bank, ctx->hash );
2114 0 : if( FD_UNLIKELY( ctx->slot>ctx->next_leader_slot ) ) {
2115 : /* We ticked while leader and are no longer leader... transition
2116 : the state machine. */
2117 0 : publish_plugin_slot_end( ctx, ctx->next_leader_slot, ctx->cus_used );
2118 :
2119 0 : no_longer_leader( ctx );
2120 :
2121 0 : if( FD_UNLIKELY( ctx->slot>=ctx->next_leader_slot ) ) {
2122 : /* We finished a leader slot, and are immediately leader for the
2123 : following slot... transition. */
2124 0 : publish_plugin_slot_start( ctx, ctx->next_leader_slot, ctx->next_leader_slot-1UL );
2125 0 : }
2126 0 : }
2127 0 : }
2128 :
2129 0 : publish_microblock( ctx, stem, target_slot, hashcnt_delta, txn_cnt );
2130 0 : }
2131 :
2132 : static void
2133 : privileged_init( fd_topo_t * topo,
2134 0 : fd_topo_tile_t * tile ) {
2135 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
2136 :
2137 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
2138 0 : fd_pohh_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pohh_tile_t ), sizeof( fd_pohh_tile_t ) );
2139 :
2140 0 : if( FD_UNLIKELY( !strcmp( tile->pohh.identity_key_path, "" ) ) )
2141 0 : FD_LOG_ERR(( "identity_key_path not set" ));
2142 :
2143 0 : const uchar * identity_key = fd_keyload_load( tile->pohh.identity_key_path, /* pubkey only: */ 1 );
2144 0 : fd_memcpy( ctx->identity_key.uc, identity_key, 32UL );
2145 :
2146 0 : if( FD_UNLIKELY( !tile->pohh.bundle.vote_account_path[0] ) ) {
2147 0 : tile->pohh.bundle.enabled = 0;
2148 0 : }
2149 0 : if( FD_UNLIKELY( tile->pohh.bundle.enabled ) ) {
2150 0 : if( FD_UNLIKELY( !fd_base58_decode_32( tile->pohh.bundle.vote_account_path, ctx->bundle.vote_account.uc ) ) ) {
2151 0 : const uchar * vote_key = fd_keyload_load( tile->pohh.bundle.vote_account_path, /* pubkey only: */ 1 );
2152 0 : fd_memcpy( ctx->bundle.vote_account.uc, vote_key, 32UL );
2153 0 : }
2154 0 : }
2155 0 : }
2156 :
2157 : /* The Agave client needs to communicate to the shred tile what
2158 : the shred version is on boot, but shred tile does not live in the
2159 : same address space, so have the PoH tile pass the value through
2160 : via. a shared memory ulong. */
2161 :
2162 : static volatile ulong * fd_shred_version;
2163 :
2164 : void
2165 0 : fd_ext_shred_set_shred_version( ulong shred_version ) {
2166 0 : while( FD_UNLIKELY( !fd_shred_version ) ) FD_SPIN_PAUSE();
2167 0 : *fd_shred_version = shred_version;
2168 0 : }
2169 :
2170 : void
2171 : fd_ext_poh_publish_gossip_vote( uchar * data,
2172 : ulong data_len,
2173 : uint source_ipv4,
2174 0 : uchar * pubkey ) {
2175 0 : (void)pubkey;
2176 0 : uchar txn_with_header[ FD_TPU_RAW_MTU ];
2177 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)txn_with_header;
2178 0 : *txnm = (fd_txn_m_t) { 0UL };
2179 0 : txnm->payload_sz = (ushort)data_len;
2180 0 : txnm->source_ipv4 = source_ipv4;
2181 0 : txnm->source_tpu = FD_TXN_M_TPU_SOURCE_GOSSIP;
2182 0 : fd_memcpy(txn_with_header + sizeof(fd_txn_m_t), data, data_len);
2183 0 : poh_link_publish( &gossip_dedup, 1UL, txn_with_header, fd_txn_m_realized_footprint( txnm, 0, 0 ) );
2184 0 : }
2185 :
2186 : void
2187 : fd_ext_poh_publish_leader_schedule( uchar * data,
2188 0 : ulong data_len ) {
2189 0 : poh_link_publish( &stake_out, 2UL, data, data_len );
2190 0 : }
2191 :
2192 : void
2193 : fd_ext_poh_publish_cluster_info( uchar * data,
2194 0 : ulong data_len ) {
2195 0 : poh_link_publish( &crds_shred, 2UL, data, data_len );
2196 0 : }
2197 :
2198 : void
2199 0 : fd_ext_poh_publish_executed_txn( uchar const * data ) {
2200 0 : static int lock = 0;
2201 :
2202 : /* Need to lock since the link publisher is not concurrent, and replay
2203 : happens on a thread pool. */
2204 0 : for(;;) {
2205 0 : if( FD_LIKELY( FD_ATOMIC_CAS( &lock, 0, 1 )==0 ) ) break;
2206 0 : FD_SPIN_PAUSE();
2207 0 : }
2208 :
2209 0 : FD_COMPILER_MFENCE();
2210 0 : poh_link_publish( &executed_txn, 0UL, data, 64UL );
2211 0 : FD_COMPILER_MFENCE();
2212 :
2213 0 : FD_VOLATILE(lock) = 0;
2214 0 : }
2215 :
2216 : void
2217 : fd_ext_plugin_publish_replay_stage( ulong sig,
2218 : uchar * data,
2219 0 : ulong data_len ) {
2220 0 : poh_link_publish( &replay_plugin, sig, data, data_len );
2221 0 : }
2222 :
2223 : void
2224 : fd_ext_plugin_publish_genesis_hash( ulong sig,
2225 : uchar * data,
2226 0 : ulong data_len ) {
2227 0 : poh_link_publish( &replay_plugin, sig, data, data_len );
2228 0 : }
2229 :
2230 : void
2231 : fd_ext_plugin_publish_start_progress( ulong sig,
2232 : uchar * data,
2233 0 : ulong data_len ) {
2234 0 : poh_link_publish( &start_progress_plugin, sig, data, data_len );
2235 0 : }
2236 :
2237 : void
2238 : fd_ext_plugin_publish_vote_listener( ulong sig,
2239 : uchar * data,
2240 0 : ulong data_len ) {
2241 0 : poh_link_publish( &vote_listener_plugin, sig, data, data_len );
2242 0 : }
2243 :
2244 : void
2245 : fd_ext_plugin_publish_validator_info( ulong sig,
2246 : uchar * data,
2247 0 : ulong data_len ) {
2248 0 : poh_link_publish( &validator_info_plugin, sig, data, data_len );
2249 0 : }
2250 :
2251 : void
2252 : fd_ext_plugin_publish_periodic( ulong sig,
2253 : uchar * data,
2254 0 : ulong data_len ) {
2255 0 : poh_link_publish( &gossip_plugin, sig, data, data_len );
2256 0 : }
2257 :
2258 : void
2259 : fd_ext_resolv_publish_root_bank( uchar * data,
2260 0 : ulong data_len ) {
2261 0 : poh_link_publish( &replay_resolh, 0UL, data, data_len );
2262 0 : }
2263 :
2264 : void
2265 : fd_ext_resolv_publish_completed_blockhash( uchar * data,
2266 0 : ulong data_len ) {
2267 0 : poh_link_publish( &replay_resolh, 1UL, data, data_len );
2268 0 : }
2269 :
2270 : static inline fd_pohh_out_t
2271 : out1( fd_topo_t const * topo,
2272 : fd_topo_tile_t const * tile,
2273 0 : char const * name ) {
2274 0 : ulong idx = ULONG_MAX;
2275 :
2276 0 : for( ulong i=0UL; i<tile->out_cnt; i++ ) {
2277 0 : fd_topo_link_t const * link = &topo->links[ tile->out_link_id[ i ] ];
2278 0 : if( !strcmp( link->name, name ) ) {
2279 0 : if( FD_UNLIKELY( idx!=ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had multiple output links named %s but expected one", tile->name, tile->kind_id, name ));
2280 0 : idx = i;
2281 0 : }
2282 0 : }
2283 :
2284 0 : if( FD_UNLIKELY( idx==ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had no output link named %s", tile->name, tile->kind_id, name ));
2285 :
2286 0 : void * mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ idx ] ].dcache_obj_id ].wksp_id ].wksp;
2287 0 : ulong chunk0 = fd_dcache_compact_chunk0( mem, topo->links[ tile->out_link_id[ idx ] ].dcache );
2288 0 : ulong wmark = fd_dcache_compact_wmark ( mem, topo->links[ tile->out_link_id[ idx ] ].dcache, topo->links[ tile->out_link_id[ idx ] ].mtu );
2289 :
2290 0 : return (fd_pohh_out_t){ .idx = idx, .mem = mem, .chunk0 = chunk0, .wmark = wmark, .chunk = chunk0 };
2291 0 : }
2292 :
2293 : static void
2294 : unprivileged_init( fd_topo_t * topo,
2295 0 : fd_topo_tile_t * tile ) {
2296 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
2297 :
2298 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
2299 0 : fd_pohh_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pohh_tile_t ), sizeof( fd_pohh_tile_t ) );
2300 0 : void * sha256 = FD_SCRATCH_ALLOC_APPEND( l, FD_SHA256_ALIGN, FD_SHA256_FOOTPRINT );
2301 :
2302 0 : #define NONNULL( x ) (__extension__({ \
2303 0 : __typeof__((x)) __x = (x); \
2304 0 : if( FD_UNLIKELY( !__x ) ) FD_LOG_ERR(( #x " was unexpectedly NULL" )); \
2305 0 : __x; }))
2306 :
2307 0 : ctx->mleaders = NONNULL( fd_multi_epoch_leaders_join( fd_multi_epoch_leaders_new( ctx->mleaders_mem ) ) );
2308 0 : ctx->sha256 = NONNULL( fd_sha256_join( fd_sha256_new( sha256 ) ) );
2309 0 : ctx->current_leader_bank = NULL;
2310 0 : ctx->pack_leader_bank = NULL;
2311 0 : ctx->signal_leader_change = NULL;
2312 :
2313 0 : ctx->shred_seq = ULONG_MAX;
2314 0 : ctx->halted_switching_key = 0;
2315 0 : ctx->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->id_keyswitch_obj_id ) );
2316 0 : FD_TEST( ctx->keyswitch );
2317 :
2318 0 : ctx->slot = 0UL;
2319 0 : ctx->hashcnt = 0UL;
2320 0 : ctx->last_hashcnt = 0UL;
2321 0 : ctx->highwater_leader_slot = ULONG_MAX;
2322 0 : ctx->next_leader_slot = ULONG_MAX;
2323 0 : ctx->reset_slot = ULONG_MAX;
2324 :
2325 0 : ctx->lagged_consecutive_leader_start = tile->pohh.lagged_consecutive_leader_start;
2326 0 : ctx->expect_sequential_leader_slot = ULONG_MAX;
2327 :
2328 0 : ctx->expect_pack_idx = 0U;
2329 0 : ctx->microblocks_lower_bound = 0UL;
2330 :
2331 0 : ctx->max_active_descendant = 0UL;
2332 :
2333 0 : if( FD_UNLIKELY( tile->pohh.bundle.enabled ) ) {
2334 0 : ctx->bundle.enabled = 1;
2335 0 : NONNULL( fd_bundle_crank_gen_init( ctx->bundle.gen, (fd_acct_addr_t const *)tile->pohh.bundle.tip_distribution_program_addr,
2336 0 : (fd_acct_addr_t const *)tile->pohh.bundle.tip_payment_program_addr,
2337 0 : (fd_acct_addr_t const *)ctx->bundle.vote_account.uc,
2338 0 : (fd_acct_addr_t const *)ctx->bundle.vote_account.uc, "NAN", 0UL ) ); /* last three arguments are properly bogus */
2339 0 : } else {
2340 0 : ctx->bundle.enabled = 0;
2341 0 : }
2342 :
2343 0 : ulong pohh_shred_obj_id = fd_pod_query_ulong( topo->props, "pohh_shred", ULONG_MAX );
2344 0 : FD_TEST( pohh_shred_obj_id!=ULONG_MAX );
2345 :
2346 0 : fd_shred_version = fd_fseq_join( fd_topo_obj_laddr( topo, pohh_shred_obj_id ) );
2347 0 : FD_TEST( fd_shred_version );
2348 :
2349 0 : poh_link_init( &gossip_dedup, topo, tile, out1( topo, tile, "gossip_dedup" ).idx );
2350 0 : poh_link_init( &stake_out, topo, tile, out1( topo, tile, "stake_out" ).idx );
2351 0 : poh_link_init( &crds_shred, topo, tile, out1( topo, tile, "crds_shred" ).idx );
2352 0 : poh_link_init( &replay_resolh, topo, tile, out1( topo, tile, "replay_resol" ).idx );
2353 0 : poh_link_init( &executed_txn, topo, tile, out1( topo, tile, "executed_txn" ).idx );
2354 :
2355 0 : if( FD_LIKELY( tile->pohh.plugins_enabled ) ) {
2356 0 : poh_link_init( &replay_plugin, topo, tile, out1( topo, tile, "replay_plugi" ).idx );
2357 0 : poh_link_init( &gossip_plugin, topo, tile, out1( topo, tile, "gossip_plugi" ).idx );
2358 0 : poh_link_init( &start_progress_plugin, topo, tile, out1( topo, tile, "startp_plugi" ).idx );
2359 0 : poh_link_init( &vote_listener_plugin, topo, tile, out1( topo, tile, "votel_plugin" ).idx );
2360 0 : poh_link_init( &validator_info_plugin, topo, tile, out1( topo, tile, "valcfg_plugi" ).idx );
2361 0 : } else {
2362 : /* Mark these mcaches as "available", so the system boots, but the
2363 : memory is not set so nothing will actually get published via.
2364 : the links. */
2365 0 : FD_COMPILER_MFENCE();
2366 0 : replay_plugin.mcache = (fd_frag_meta_t*)1;
2367 0 : gossip_plugin.mcache = (fd_frag_meta_t*)1;
2368 0 : start_progress_plugin.mcache = (fd_frag_meta_t*)1;
2369 0 : vote_listener_plugin.mcache = (fd_frag_meta_t*)1;
2370 0 : validator_info_plugin.mcache = (fd_frag_meta_t*)1;
2371 0 : FD_COMPILER_MFENCE();
2372 0 : }
2373 :
2374 0 : FD_LOG_INFO(( "PoH waiting to be initialized by Agave client... %lu %lu", fd_poh_waiting_lock, fd_poh_returned_lock ));
2375 0 : FD_VOLATILE( fd_pohh_global_ctx ) = ctx;
2376 0 : FD_COMPILER_MFENCE();
2377 0 : for(;;) {
2378 0 : if( FD_LIKELY( FD_VOLATILE_CONST( fd_poh_waiting_lock ) ) ) break;
2379 0 : FD_SPIN_PAUSE();
2380 0 : }
2381 0 : FD_VOLATILE( fd_poh_waiting_lock ) = 0UL;
2382 0 : FD_VOLATILE( fd_poh_returned_lock ) = 1UL;
2383 0 : FD_COMPILER_MFENCE();
2384 0 : for(;;) {
2385 0 : if( FD_UNLIKELY( !FD_VOLATILE_CONST( fd_poh_returned_lock ) ) ) break;
2386 0 : FD_SPIN_PAUSE();
2387 0 : }
2388 0 : FD_COMPILER_MFENCE();
2389 :
2390 0 : if( FD_UNLIKELY( ctx->reset_slot==ULONG_MAX ) ) FD_LOG_ERR(( "PoH was not initialized by Agave client" ));
2391 :
2392 0 : fd_histf_join( fd_histf_new( ctx->begin_leader_delay, FD_MHIST_SECONDS_MIN( POHH, BEGIN_LEADER_DELAY_SECONDS ),
2393 0 : FD_MHIST_SECONDS_MAX( POHH, BEGIN_LEADER_DELAY_SECONDS ) ) );
2394 0 : fd_histf_join( fd_histf_new( ctx->first_microblock_delay, FD_MHIST_SECONDS_MIN( POHH, FIRST_MICROBLOCK_DELAY_SECONDS ),
2395 0 : FD_MHIST_SECONDS_MAX( POHH, FIRST_MICROBLOCK_DELAY_SECONDS ) ) );
2396 0 : fd_histf_join( fd_histf_new( ctx->slot_done_delay, FD_MHIST_SECONDS_MIN( POHH, SLOT_DONE_DELAY_SECONDS ),
2397 0 : FD_MHIST_SECONDS_MAX( POHH, SLOT_DONE_DELAY_SECONDS ) ) );
2398 :
2399 0 : fd_histf_join( fd_histf_new( ctx->bundle_init_delay, FD_MHIST_SECONDS_MIN( POHH, BUNDLE_INITIALIZE_DELAY_SECONDS ),
2400 0 : FD_MHIST_SECONDS_MAX( POHH, BUNDLE_INITIALIZE_DELAY_SECONDS ) ) );
2401 :
2402 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
2403 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
2404 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
2405 :
2406 0 : ctx->in[ i ].mem = link_wksp->wksp;
2407 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
2408 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
2409 :
2410 0 : if( !strcmp( link->name, "stake_out" ) ) {
2411 0 : ctx->in_kind[ i ] = IN_KIND_EPOCH;
2412 0 : } else if( !strcmp( link->name, "pack_pohh" ) ) {
2413 0 : ctx->in_kind[ i ] = IN_KIND_PACK;
2414 0 : } else if( !strcmp( link->name, "bank_pohh" ) ) {
2415 0 : ctx->in_kind[ i ] = IN_KIND_BANK;
2416 0 : } else {
2417 0 : FD_LOG_ERR(( "unexpected input link name %s", link->name ));
2418 0 : }
2419 0 : }
2420 :
2421 0 : *ctx->shred_out = out1( topo, tile, "pohh_shred" );
2422 0 : *ctx->pack_out = out1( topo, tile, "pohh_pack" );
2423 0 : ctx->plugin_out->mem = NULL;
2424 0 : if( FD_LIKELY( tile->pohh.plugins_enabled ) ) {
2425 0 : *ctx->plugin_out = out1( topo, tile, "pohh_plugin" );
2426 0 : }
2427 :
2428 0 : ctx->features_activation_avail = 0UL;
2429 0 : for( ulong i=0UL; i<FD_SHRED_FEATURES_ACTIVATION_SLOT_CNT; i++ )
2430 0 : ctx->features_activation->slots[i] = FD_SHRED_FEATURES_ACTIVATION_SLOT_DISABLED;
2431 :
2432 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
2433 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
2434 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
2435 0 : }
2436 :
2437 : /* One tick, one microblock, one plugin slot end, one plugin slot start,
2438 : one leader update, and one features activation. */
2439 0 : #define STEM_BURST (6UL)
2440 :
2441 : /* See explanation in fd_pack */
2442 0 : #define STEM_LAZY (128L*3000L)
2443 :
2444 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_pohh_tile_t
2445 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_pohh_tile_t)
2446 :
2447 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
2448 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
2449 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
2450 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
2451 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
2452 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
2453 :
2454 : #include "../../disco/stem/fd_stem.c"
2455 :
2456 : fd_topo_run_tile_t fd_tile_pohh = {
2457 : .name = "pohh",
2458 : .populate_allowed_seccomp = NULL,
2459 : .populate_allowed_fds = NULL,
2460 : .scratch_align = scratch_align,
2461 : .scratch_footprint = scratch_footprint,
2462 : .privileged_init = privileged_init,
2463 : .unprivileged_init = unprivileged_init,
2464 : .run = stem_run,
2465 : };
|