Line data Source code
1 : #include "../tiles.h"
2 :
3 : #include "generated/fd_pack_tile_seccomp.h"
4 :
5 : #include "../../util/pod/fd_pod_format.h"
6 : #include "../../discof/replay/fd_replay_tile.h" // layering violation
7 : #include "../fd_txn_m.h"
8 : #include "../keyguard/fd_keyload.h"
9 : #include "../keyguard/fd_keyswitch.h"
10 : #include "../keyguard/fd_keyguard.h"
11 : #include "../metrics/fd_metrics.h"
12 : #include "../pack/fd_pack.h"
13 : #include "../pack/fd_pack_cost.h"
14 : #include "../pack/fd_pack_pacing.h"
15 :
16 : #include <linux/unistd.h>
17 : #include <string.h>
18 :
19 : /* fd_pack is responsible for taking verified transactions, and
20 : arranging them into "microblocks" (groups) of transactions to
21 : be executed serially. It can try to do clever things so that
22 : multiple microblocks can execute in parallel, if they don't
23 : write to the same accounts. */
24 :
25 6504 : #define IN_KIND_RESOLV (0UL)
26 147 : #define IN_KIND_POH (1UL)
27 138 : #define IN_KIND_BANK (2UL)
28 30 : #define IN_KIND_SIGN (3UL)
29 0 : #define IN_KIND_REPLAY (4UL)
30 30 : #define IN_KIND_EXECUTED_TXN (5UL)
31 :
32 : /* Pace microblocks, but only slightly. This helps keep performance
33 : more stable. This limit is 2,000 microblocks/second/bank. At 31
34 : transactions/microblock, that's 62k txn/sec/bank. */
35 30 : #define MICROBLOCK_DURATION_NS (0L)
36 :
37 : /* There are 151 accepted blockhashes, but those don't include skips.
38 : This check is neither precise nor accurate, but just good enough.
39 : The bank tile does the final check. We give a little margin for a
40 : few percent skip rate. */
41 117 : #define TRANSACTION_LIFETIME_SLOTS 160UL
42 :
43 : /* Time is normally a long, but pack expects a ulong. Add -LONG_MIN to
44 : the time values so that LONG_MIN maps to 0, LONG_MAX maps to
45 : ULONG_MAX, and everything in between maps linearly with a slope of 1.
46 : Just subtracting LONG_MIN results in signed integer overflow, which
47 : is U.B. */
48 : #define TIME_OFFSET 0x8000000000000000UL
49 : FD_STATIC_ASSERT( (ulong)LONG_MIN+TIME_OFFSET==0UL, time_offset );
50 : FD_STATIC_ASSERT( (ulong)LONG_MAX+TIME_OFFSET==ULONG_MAX, time_offset );
51 :
52 : /* Optionally allow a larger limit for benchmarking */
53 0 : #define LARGER_MAX_COST_PER_BLOCK (18UL*FD_PACK_MAX_COST_PER_BLOCK_LOWER_BOUND)
54 :
55 : /* 1.6 M cost units, enough for 1 max size transaction */
56 : const ulong CUS_PER_MICROBLOCK = 1600000UL;
57 :
58 : #define SMALL_MICROBLOCKS 1
59 :
60 : #if SMALL_MICROBLOCKS
61 : const float VOTE_FRACTION = 1.0f; /* schedule all available votes first */
62 93 : #define EFFECTIVE_TXN_PER_MICROBLOCK 1UL
63 : #else
64 : const float VOTE_FRACTION = 0.75f; /* TODO: Is this the right value? */
65 : #define EFFECTIVE_TXN_PER_MICROBLOCK MAX_TXN_PER_MICROBLOCK
66 : #endif
67 :
68 : /* There's overhead associated with each microblock the bank tile tries
69 : to execute it, so the optimal strategy is not to produce a microblock
70 : with a single transaction as soon as we receive it. Basically, if we
71 : have less than 31 transactions, we want to wait a little to see if we
72 : receive additional transactions before we schedule a microblock. We
73 : can model the optimum amount of time to wait, but the equation is
74 : complicated enough that we want to compute it before compile time.
75 : wait_duration[i] for i in [0, 31] gives the time in nanoseconds pack
76 : should wait after receiving its most recent transaction before
77 : scheduling if it has i transactions available. Unsurprisingly,
78 : wait_duration[31] is 0. wait_duration[0] is ULONG_MAX, so we'll
79 : always wait if we have 0 transactions. */
80 : FD_IMPORT( wait_duration, "src/disco/pack/pack_delay.bin", ulong, 6, "" );
81 :
82 :
83 :
84 : #if FD_PACK_USE_EXTRA_STORAGE
85 : /* When we are done being leader for a slot and we are leader in the
86 : very next slot, it can still take some time to transition. This is
87 : because the bank has to be finalized, a hash calculated, and various
88 : other things done in the replay stage to create the new child bank.
89 :
90 : During that time, pack cannot send transactions to banks so it needs
91 : to be able to buffer. Typically, these so called "leader
92 : transitions" are short (<15 millis), so a low value here would
93 : suffice. However, in some cases when there is memory pressure on the
94 : NUMA node or when the operating system context switches relevant
95 : threads out, it can take significantly longer.
96 :
97 : To prevent drops in these cases and because we assume banks are fast
98 : enough to drain this buffer once we do become leader, we set this
99 : buffer size to be quite large. */
100 :
101 : #define DEQUE_NAME extra_txn_deq
102 : #define DEQUE_T fd_txn_e_t
103 : #define DEQUE_MAX (128UL*1024UL)
104 : #include "../../../../util/tmpl/fd_deque.c"
105 :
106 : #endif
107 :
108 : /* Sync with src/app/shared/fd_config.c */
109 1320 : #define FD_PACK_STRATEGY_PERF 0
110 0 : #define FD_PACK_STRATEGY_BALANCED 1
111 0 : #define FD_PACK_STRATEGY_BUNDLE 2
112 :
113 : static char const * const schedule_strategy_strings[3] = { "PRF", "BAL", "BUN" };
114 :
115 :
116 : typedef struct {
117 : fd_acct_addr_t commission_pubkey[1];
118 : ulong commission;
119 : } block_builder_info_t;
120 :
121 : typedef struct {
122 : fd_wksp_t * mem;
123 : ulong chunk0;
124 : ulong wmark;
125 : } fd_pack_in_ctx_t;
126 :
127 : typedef struct {
128 : fd_pack_t * pack;
129 : fd_txn_e_t * cur_spot;
130 : int is_bundle; /* is the current transaction a bundle */
131 :
132 : uchar executed_txn_sig[ 64UL ];
133 :
134 : /* One of the FD_PACK_STRATEGY_* values defined above */
135 : int strategy;
136 :
137 : /* The value passed to fd_pack_new, etc. */
138 : ulong max_pending_transactions;
139 :
140 : /* The leader slot we are currently packing for, or ULONG_MAX if we
141 : are not the leader. */
142 : ulong leader_slot;
143 : void const * leader_bank;
144 : ulong leader_bank_idx;
145 :
146 : fd_became_leader_t _became_leader[1];
147 :
148 : /* The number of microblocks we have packed for the current leader
149 : slot. Will always be <= slot_max_microblocks. We must track
150 : this so that when we are done we can tell the PoH tile how many
151 : microblocks to expect in the slot. */
152 : ulong slot_microblock_cnt;
153 :
154 : /* Counter which increments when we've finished packing for a slot */
155 : uint pack_idx;
156 :
157 : ulong pack_txn_cnt; /* total num transactions packed since startup */
158 :
159 : /* The maximum number of microblocks that can be packed in this slot.
160 : Provided by the PoH tile when we become leader.*/
161 : ulong slot_max_microblocks;
162 :
163 : /* Cap (in bytes) of the amount of transaction data we produce in each
164 : block to avoid hitting the shred limits. See where this is set for
165 : more explanation. */
166 : ulong slot_max_data;
167 : int larger_shred_limits_per_block;
168 :
169 : /* Consensus critical slot cost limits. */
170 : struct {
171 : ulong slot_max_cost;
172 : ulong slot_max_vote_cost;
173 : ulong slot_max_write_cost_per_acct;
174 : } limits;
175 :
176 : /* If drain_banks is non-zero, then the pack tile must wait until all
177 : banks are idle before scheduling any more microblocks. This is
178 : primarily helpful in irregular leader transitions, e.g. while being
179 : leader for slot N, we switch forks to a slot M (!=N+1) in which we
180 : are also leader. We don't want to execute microblocks for
181 : different slots concurrently. */
182 : int drain_banks;
183 :
184 : /* Updated during housekeeping and used only for checking if the
185 : leader slot has ended. Might be off by one housekeeping duration,
186 : but that should be small relative to a slot duration. */
187 : long approx_wallclock_ns;
188 :
189 : /* approx_tickcount is updated in during_housekeeping() with
190 : fd_tickcount() and will match approx_wallclock_ns. This is done
191 : because we need to include an accurate nanosecond timestamp in
192 : every fd_txn_p_t but don't want to have to call the expensive
193 : fd_log_wallclock() in in the critical path. We can use
194 : fd_tempo_tick_per_ns() to convert from ticks to nanoseconds over
195 : small periods of time. */
196 : long approx_tickcount;
197 :
198 : fd_rng_t * rng;
199 :
200 : /* The end wallclock time of the leader slot we are currently packing
201 : for, if we are currently packing for a slot.*/
202 : long slot_end_ns;
203 :
204 : /* pacer and ticks_per_ns are used for pacing CUs through the slot,
205 : i.e. deciding when to schedule a microblock given the number of CUs
206 : that have been consumed so far. pacer is an opaque pacing object,
207 : which is initialized when the pack tile is packing a slot.
208 : ticks_per_ns is the cached value from tempo. */
209 : fd_pack_pacing_t pacer[1];
210 : double ticks_per_ns;
211 :
212 : /* last_successful_insert stores the tickcount of the last
213 : successful transaction insert. */
214 : long last_successful_insert;
215 :
216 : /* highest_observed_slot stores the highest slot number we've seen
217 : from any transaction coming from the resolv tile. When this
218 : increases, we expire old transactions. */
219 : ulong highest_observed_slot;
220 :
221 : /* microblock_duration_ns, and wait_duration
222 : respectively scaled to be in ticks instead of nanoseconds */
223 : ulong microblock_duration_ticks;
224 : ulong wait_duration_ticks[ MAX_TXN_PER_MICROBLOCK+1UL ];
225 :
226 : #if FD_PACK_USE_EXTRA_STORAGE
227 : /* In addition to the available transactions that pack knows about, we
228 : also store a larger ring buffer for handling cases when pack is
229 : full. This is an fd_deque. */
230 : fd_txn_e_t * extra_txn_deq;
231 : int insert_to_extra; /* whether the last insert was into pack or the extra deq */
232 : #endif
233 :
234 : fd_pack_in_ctx_t in[ 32 ];
235 : int in_kind[ 32 ];
236 :
237 : ulong bank_cnt;
238 : ulong bank_idle_bitset; /* bit i is 1 if we've observed *bank_current[i]==bank_expect[i] */
239 : int poll_cursor; /* in [0, bank_cnt), the next bank to poll */
240 : int use_consumed_cus;
241 : long skip_cnt;
242 : ulong * bank_current[ FD_PACK_MAX_BANK_TILES ];
243 : ulong bank_expect[ FD_PACK_MAX_BANK_TILES ];
244 : /* bank_ready_at[x] means don't check bank x until tickcount is at
245 : least bank_ready_at[x]. */
246 : long bank_ready_at[ FD_PACK_MAX_BANK_TILES ];
247 :
248 : fd_wksp_t * bank_out_mem;
249 : ulong bank_out_chunk0;
250 : ulong bank_out_wmark;
251 : ulong bank_out_chunk;
252 :
253 : fd_wksp_t * poh_out_mem;
254 : ulong poh_out_chunk0;
255 : ulong poh_out_wmark;
256 : ulong poh_out_chunk;
257 :
258 : ulong insert_result[ FD_PACK_INSERT_RETVAL_CNT ];
259 : fd_histf_t schedule_duration[ 1 ];
260 : fd_histf_t no_sched_duration[ 1 ];
261 : fd_histf_t insert_duration [ 1 ];
262 : fd_histf_t complete_duration[ 1 ];
263 :
264 : struct {
265 : uint metric_state;
266 : long metric_state_begin;
267 : long metric_timing[ 16 ];
268 : };
269 :
270 : struct {
271 : long time;
272 : ulong all[ FD_METRICS_TOTAL_SZ ];
273 : } last_sched_metrics[1];
274 :
275 : struct {
276 : ulong id;
277 : ulong txn_cnt;
278 : ulong txn_received;
279 : ulong min_blockhash_slot;
280 : fd_txn_e_t * _txn[ FD_PACK_MAX_TXN_PER_BUNDLE ];
281 : fd_txn_e_t * const * bundle; /* points to _txn when non-NULL */
282 : } current_bundle[1];
283 :
284 : block_builder_info_t blk_engine_cfg[1];
285 :
286 : struct {
287 : int enabled;
288 : int ib_inserted; /* in this slot */
289 : fd_acct_addr_t vote_pubkey[1];
290 : fd_acct_addr_t identity_pubkey[1];
291 : fd_bundle_crank_gen_t gen[1];
292 : fd_acct_addr_t tip_receiver_owner[1];
293 : ulong epoch;
294 : fd_bundle_crank_tip_payment_config_t prev_config[1]; /* as of start of slot, then updated */
295 : uchar recent_blockhash[32];
296 : fd_ed25519_sig_t last_sig[1];
297 :
298 : fd_keyswitch_t * keyswitch;
299 : fd_keyguard_client_t keyguard_client[1];
300 :
301 : ulong metrics[4];
302 : } crank[1];
303 :
304 :
305 : /* Used between during_frag and after_frag */
306 : ulong pending_rebate_sz;
307 : union{ fd_pack_rebate_t rebate[1]; uchar footprint[USHORT_MAX]; } rebate[1];
308 : } fd_pack_ctx_t;
309 :
310 60 : #define BUNDLE_META_SZ 40UL
311 : FD_STATIC_ASSERT( sizeof(block_builder_info_t)==BUNDLE_META_SZ, blk_engine_cfg );
312 :
313 4659 : #define FD_PACK_METRIC_STATE_TRANSACTIONS 0
314 1332 : #define FD_PACK_METRIC_STATE_BANKS 1
315 51 : #define FD_PACK_METRIC_STATE_LEADER 2
316 1332 : #define FD_PACK_METRIC_STATE_MICROBLOCKS 3
317 :
318 : /* Updates one component of the metric state. If the state has changed,
319 : records the change. */
320 : static inline void
321 : update_metric_state( fd_pack_ctx_t * ctx,
322 : long effective_as_of,
323 : int type,
324 7374 : int status ) {
325 7374 : uint current_state = fd_uint_insert_bit( ctx->metric_state, type, status );
326 7374 : if( FD_UNLIKELY( current_state!=ctx->metric_state ) ) {
327 192 : ctx->metric_timing[ ctx->metric_state ] += effective_as_of - ctx->metric_state_begin;
328 192 : ctx->metric_state_begin = effective_as_of;
329 192 : ctx->metric_state = current_state;
330 192 : }
331 7374 : }
332 :
333 : static inline void
334 12 : remove_ib( fd_pack_ctx_t * ctx ) {
335 : /* It's likely the initializer bundle is long scheduled, but we want to
336 : try deleting it just in case. */
337 12 : if( FD_UNLIKELY( ctx->crank->enabled & ctx->crank->ib_inserted ) ) {
338 0 : ulong deleted = fd_pack_delete_transaction( ctx->pack, (fd_ed25519_sig_t const *)ctx->crank->last_sig );
339 0 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
340 0 : }
341 12 : ctx->crank->ib_inserted = 0;
342 12 : }
343 :
344 :
345 : FD_FN_CONST static inline ulong
346 69 : scratch_align( void ) {
347 69 : return 4096UL;
348 69 : }
349 :
350 : FD_FN_PURE static inline ulong
351 33 : scratch_footprint( fd_topo_tile_t const * tile ) {
352 33 : fd_pack_limits_t limits[1] = {{
353 33 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND,
354 33 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND,
355 33 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND,
356 33 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
357 33 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
358 33 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
359 33 : }};
360 :
361 33 : ulong l = FD_LAYOUT_INIT;
362 33 : l = FD_LAYOUT_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
363 33 : l = FD_LAYOUT_APPEND( l, fd_rng_align(), fd_rng_footprint() );
364 33 : l = FD_LAYOUT_APPEND( l, fd_pack_align(), fd_pack_footprint( tile->pack.max_pending_transactions,
365 33 : BUNDLE_META_SZ,
366 33 : tile->pack.bank_tile_count,
367 33 : limits ) );
368 : #if FD_PACK_USE_EXTRA_STORAGE
369 : l = FD_LAYOUT_APPEND( l, extra_txn_deq_align(), extra_txn_deq_footprint() );
370 : #endif
371 33 : return FD_LAYOUT_FINI( l, scratch_align() );
372 33 : }
373 :
374 : static inline void
375 : log_end_block_metrics( fd_pack_ctx_t * ctx,
376 : long now,
377 12 : char const * reason ) {
378 12 : #define DELTA( m ) (fd_metrics_tl[ MIDX(COUNTER, PACK, TRANSACTION_SCHEDULE_##m) ] - ctx->last_sched_metrics->all[ MIDX(COUNTER, PACK, TRANSACTION_SCHEDULE_##m) ])
379 12 : #define AVAIL( m ) (fd_metrics_tl[ MIDX(GAUGE, PACK, AVAILABLE_TRANSACTIONS_##m) ])
380 12 : FD_LOG_INFO(( "pack_end_block(slot=%lu,%s,%lx,ticks_since_last_schedule=%ld,reasons=%lu,%lu,%lu,%lu,%lu,%lu,%lu;remaining=%lu+%lu+%lu+%lu;smallest=%lu;cus=%lu->%lu)",
381 12 : ctx->leader_slot, reason, ctx->bank_idle_bitset, now-ctx->last_sched_metrics->time,
382 12 : DELTA( TAKEN ), DELTA( CU_LIMIT ), DELTA( FAST_PATH ), DELTA( BYTE_LIMIT ), DELTA( WRITE_COST ), DELTA( SLOW_PATH ), DELTA( DEFER_SKIP ),
383 12 : AVAIL(REGULAR), AVAIL(VOTES), AVAIL(BUNDLES), AVAIL(CONFLICTING),
384 12 : (fd_metrics_tl[ MIDX(GAUGE, PACK, SMALLEST_PENDING_TRANSACTION) ]),
385 12 : (ctx->last_sched_metrics->all[ MIDX(GAUGE, PACK, CUS_CONSUMED_IN_BLOCK) ]),
386 12 : (fd_metrics_tl [ MIDX(GAUGE, PACK, CUS_CONSUMED_IN_BLOCK) ])
387 12 : ));
388 12 : #undef AVAIL
389 12 : #undef DELTA
390 12 : }
391 :
392 : static inline void
393 0 : metrics_write( fd_pack_ctx_t * ctx ) {
394 0 : FD_MCNT_ENUM_COPY( PACK, TRANSACTION_INSERTED, ctx->insert_result );
395 0 : FD_MCNT_ENUM_COPY( PACK, METRIC_TIMING, ((ulong*)ctx->metric_timing) );
396 0 : FD_MCNT_ENUM_COPY( PACK, BUNDLE_CRANK_STATUS, ctx->crank->metrics );
397 0 : FD_MHIST_COPY( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS, ctx->schedule_duration );
398 0 : FD_MHIST_COPY( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS, ctx->no_sched_duration );
399 0 : FD_MHIST_COPY( PACK, INSERT_TRANSACTION_DURATION_SECONDS, ctx->insert_duration );
400 0 : FD_MHIST_COPY( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS, ctx->complete_duration );
401 :
402 0 : fd_pack_metrics_write( ctx->pack );
403 0 : }
404 :
405 : static inline void
406 6360 : during_housekeeping( fd_pack_ctx_t * ctx ) {
407 6360 : ctx->approx_wallclock_ns = fd_log_wallclock();
408 6360 : ctx->approx_tickcount = fd_tickcount();
409 :
410 6360 : if( FD_UNLIKELY( ctx->crank->enabled && fd_keyswitch_state_query( ctx->crank->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
411 0 : fd_memcpy( ctx->crank->identity_pubkey, ctx->crank->keyswitch->bytes, 32UL );
412 0 : fd_keyswitch_state( ctx->crank->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
413 0 : }
414 6360 : }
415 :
416 : static inline void
417 : before_credit( fd_pack_ctx_t * ctx,
418 : fd_stem_context_t * stem,
419 7308 : int * charge_busy ) {
420 7308 : (void)stem;
421 :
422 7308 : if( FD_UNLIKELY( (ctx->cur_spot!=NULL) & !ctx->is_bundle ) ) {
423 6 : *charge_busy = 1;
424 :
425 : /* If we were overrun while processing a frag from an in, then
426 : cur_spot is left dangling and not cleaned up, so clean it up here
427 : (by returning the slot to the pool of free slots). If the last
428 : transaction was a bundle, then we don't want to return it. When
429 : we try to process the first transaction in the next bundle, we'll
430 : see we never got the full bundle and cancel the whole last
431 : bundle, returning all the storage to the pool. */
432 : #if FD_PACK_USE_EXTRA_STORAGE
433 : if( FD_LIKELY( !ctx->insert_to_extra ) ) fd_pack_insert_txn_cancel( ctx->pack, ctx->cur_spot );
434 : else extra_txn_deq_remove_tail( ctx->extra_txn_deq );
435 : #else
436 6 : fd_pack_insert_txn_cancel( ctx->pack, ctx->cur_spot );
437 6 : #endif
438 6 : ctx->cur_spot = NULL;
439 6 : }
440 7308 : }
441 :
442 : #if FD_PACK_USE_EXTRA_STORAGE
443 : /* insert_from_extra: helper method to pop the transaction at the head
444 : off the extra txn deque and insert it into pack. Requires that
445 : ctx->extra_txn_deq is non-empty, but it's okay to call it if pack is
446 : full. Returns the result of fd_pack_insert_txn_fini. */
447 : static inline int
448 : insert_from_extra( fd_pack_ctx_t * ctx ) {
449 : fd_txn_e_t * spot = fd_pack_insert_txn_init( ctx->pack );
450 : fd_txn_e_t const * insert = extra_txn_deq_peek_head( ctx->extra_txn_deq );
451 : fd_txn_t const * insert_txn = TXN(insert->txnp);
452 : fd_memcpy( spot->txnp->payload, insert->txnp->payload, insert->txnp->payload_sz );
453 : fd_memcpy( TXN(spot->txnp), insert_txn, fd_txn_footprint( insert_txn->instr_cnt, insert_txn->addr_table_lookup_cnt ) );
454 : fd_memcpy( spot->alt_accts, insert->alt_accts, insert_txn->addr_table_adtl_cnt*sizeof(fd_acct_addr_t) );
455 : spot->txnp->payload_sz = insert->txnp->payload_sz;
456 : spot->txnp->source_tpu = insert->txnp->source_tpu;
457 : spot->txnp->source_ipv4 = insert->txnp->source_ipv4;
458 : spot->txnp->scheduler_arrival_time_nanos = insert->txnp->scheduler_arrival_time_nanos;
459 : extra_txn_deq_remove_head( ctx->extra_txn_deq );
460 :
461 : ulong blockhash_slot = insert->txnp->blockhash_slot;
462 :
463 : ulong deleted;
464 : long insert_duration = -fd_tickcount();
465 : int result = fd_pack_insert_txn_fini( ctx->pack, spot, blockhash_slot, &deleted );
466 : insert_duration += fd_tickcount();
467 :
468 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
469 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ]++;
470 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
471 : FD_MCNT_INC( PACK, TRANSACTION_INSERTED_FROM_EXTRA, 1UL );
472 : return result;
473 : }
474 : #endif
475 :
476 : static inline void
477 : after_credit( fd_pack_ctx_t * ctx,
478 : fd_stem_context_t * stem,
479 : int * opt_poll_in,
480 7308 : int * charge_busy ) {
481 7308 : (void)opt_poll_in;
482 :
483 7308 : if( FD_UNLIKELY( (ctx->skip_cnt--)>0L ) ) return; /* It would take ages for this to hit LONG_MIN */
484 :
485 4674 : long now = fd_tickcount();
486 :
487 4674 : int pacing_bank_cnt = (int)fd_pack_pacing_enabled_bank_cnt( ctx->pacer, now );
488 :
489 4674 : ulong bank_cnt = ctx->bank_cnt;
490 :
491 :
492 : /* If any banks are busy, check one of the busy ones see if it is
493 : still busy. */
494 4674 : if( FD_LIKELY( ctx->bank_idle_bitset!=fd_ulong_mask_lsb( (int)bank_cnt ) ) ) {
495 1305 : int poll_cursor = ctx->poll_cursor;
496 1305 : ulong busy_bitset = (~ctx->bank_idle_bitset) & fd_ulong_mask_lsb( (int)bank_cnt );
497 :
498 : /* Suppose bank_cnt is 4 and idle_bitset looks something like this
499 : (pretending it's a uchar):
500 : 0000 1001
501 : ^ busy cursor is 1
502 : Then busy_bitset is
503 : 0000 0110
504 : Rotate it right by 2 bits
505 : 1000 0001
506 : Find lsb returns 0, so busy cursor remains 2, and we poll bank 2.
507 :
508 : If instead idle_bitset were
509 : 0000 1110
510 : ^
511 : The rotated version would be
512 : 0100 0000
513 : Find lsb will return 6, so busy cursor would be set to 0, and
514 : we'd poll bank 0, which is the right one. */
515 1305 : poll_cursor++;
516 1305 : poll_cursor = (poll_cursor + fd_ulong_find_lsb( fd_ulong_rotate_right( busy_bitset, (poll_cursor&63) ) )) & 63;
517 :
518 1305 : if( FD_UNLIKELY(
519 : /* if microblock duration is 0, bypass the bank_ready_at check
520 : to avoid a potential cache miss. Can't use an ifdef here
521 : because FD_UNLIKELY is a macro, but the compiler should
522 : eliminate the check easily. */
523 1305 : ( (MICROBLOCK_DURATION_NS==0L) || (ctx->bank_ready_at[poll_cursor]<now) ) &&
524 1305 : (fd_fseq_query( ctx->bank_current[poll_cursor] )==ctx->bank_expect[poll_cursor]) ) ) {
525 1305 : *charge_busy = 1;
526 1305 : ctx->bank_idle_bitset |= 1UL<<poll_cursor;
527 :
528 1305 : long complete_duration = -fd_tickcount();
529 1305 : int completed = fd_pack_microblock_complete( ctx->pack, (ulong)poll_cursor );
530 1305 : complete_duration += fd_tickcount();
531 1305 : if( FD_LIKELY( completed ) ) fd_histf_sample( ctx->complete_duration, (ulong)complete_duration );
532 1305 : }
533 :
534 1305 : ctx->poll_cursor = poll_cursor;
535 1305 : }
536 :
537 :
538 : /* If we time out on our slot, then stop being leader. This can only
539 : happen in the first after_credit after a housekeeping. */
540 4674 : if( FD_UNLIKELY( ctx->approx_wallclock_ns>=ctx->slot_end_ns && ctx->leader_slot!=ULONG_MAX ) ) {
541 12 : *charge_busy = 1;
542 :
543 12 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
544 12 : done_packing->microblocks_in_slot = ctx->slot_microblock_cnt;
545 :
546 12 : fd_stem_publish( stem, 1UL, fd_disco_bank_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
547 12 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
548 12 : ctx->pack_idx++;
549 :
550 12 : log_end_block_metrics( ctx, now, "time" );
551 12 : ctx->drain_banks = 1;
552 12 : ctx->leader_slot = ULONG_MAX;
553 12 : ctx->slot_microblock_cnt = 0UL;
554 12 : fd_pack_end_block( ctx->pack );
555 12 : remove_ib( ctx );
556 :
557 12 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_LEADER, 0 );
558 12 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, 0 );
559 12 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, 0 );
560 12 : return;
561 12 : }
562 :
563 : /* Am I leader? If not, see about inserting at most one transaction
564 : from extra storage. It's important not to insert too many
565 : transactions here, or we won't end up servicing dedup_pack enough.
566 : If extra storage is empty or pack is full, do nothing. */
567 4662 : if( FD_UNLIKELY( ctx->leader_slot==ULONG_MAX ) ) {
568 : #if FD_PACK_USE_EXTRA_STORAGE
569 : if( FD_UNLIKELY( !extra_txn_deq_empty( ctx->extra_txn_deq ) &&
570 : fd_pack_avail_txn_cnt( ctx->pack )<ctx->max_pending_transactions ) ) {
571 : *charge_busy = 1;
572 :
573 : int result = insert_from_extra( ctx );
574 : if( FD_LIKELY( result>=0 ) ) ctx->last_successful_insert = now;
575 : }
576 : #endif
577 3282 : return;
578 3282 : }
579 :
580 : /* Am I in drain mode? If so, check if I can exit it */
581 1380 : if( FD_UNLIKELY( ctx->drain_banks ) ) {
582 12 : if( FD_LIKELY( ctx->bank_idle_bitset==fd_ulong_mask_lsb( (int)bank_cnt ) ) ) {
583 12 : ctx->drain_banks = 0;
584 :
585 : /* Pack notifies poh when banks are drained so that poh can
586 : relinquish pack's ownership over the slot bank (by decrementing
587 : its Arc). We do this by sending a ULONG_MAX sig over the
588 : pack_poh mcache.
589 :
590 : TODO: This is only needed for Frankendancer, not Firedancer,
591 : which manages bank lifetime different. */
592 12 : fd_stem_publish( stem, 1UL, ULONG_MAX, 0UL, 0UL, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
593 12 : } else {
594 0 : return;
595 0 : }
596 12 : }
597 :
598 : /* Have I sent the max allowed microblocks? Nothing to do. */
599 1380 : if( FD_UNLIKELY( ctx->slot_microblock_cnt>=ctx->slot_max_microblocks ) ) return;
600 :
601 : /* Do I have enough transactions and/or have I waited enough time? */
602 1380 : if( FD_UNLIKELY( (ulong)(now-ctx->last_successful_insert) <
603 1380 : ctx->wait_duration_ticks[ fd_ulong_min( fd_pack_avail_txn_cnt( ctx->pack ), MAX_TXN_PER_MICROBLOCK ) ] ) ) {
604 60 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, 0 );
605 60 : return;
606 60 : }
607 :
608 1320 : int any_ready = 0;
609 1320 : int any_scheduled = 0;
610 :
611 1320 : *charge_busy = 1;
612 :
613 1320 : if( FD_LIKELY( ctx->crank->enabled ) ) {
614 1320 : block_builder_info_t const * top_meta = fd_pack_peek_bundle_meta( ctx->pack );
615 1320 : if( FD_UNLIKELY( top_meta ) ) {
616 : /* Have bundles, in a reasonable state to crank. */
617 :
618 15 : fd_txn_e_t * _bundle[ 1UL ];
619 15 : fd_txn_e_t * const * bundle = fd_pack_insert_bundle_init( ctx->pack, _bundle, 1UL );
620 :
621 15 : ulong txn_sz = fd_bundle_crank_generate( ctx->crank->gen, ctx->crank->prev_config, top_meta->commission_pubkey,
622 15 : ctx->crank->identity_pubkey, ctx->crank->tip_receiver_owner, ctx->crank->epoch, top_meta->commission,
623 15 : bundle[0]->txnp->payload, TXN( bundle[0]->txnp ) );
624 :
625 15 : if( FD_LIKELY( txn_sz==0UL ) ) { /* Everything in good shape! */
626 6 : fd_pack_insert_bundle_cancel( ctx->pack, bundle, 1UL );
627 6 : fd_pack_set_initializer_bundles_ready( ctx->pack );
628 6 : ctx->crank->metrics[ 0 ]++; /* BUNDLE_CRANK_STATUS_NOT_NEEDED */
629 6 : }
630 9 : else if( FD_LIKELY( txn_sz<ULONG_MAX ) ) {
631 9 : bundle[0]->txnp->payload_sz = (ushort)txn_sz;
632 9 : bundle[0]->txnp->source_tpu = FD_TXN_M_TPU_SOURCE_BUNDLE;
633 9 : bundle[0]->txnp->source_ipv4 = 0; /* not applicable */
634 9 : bundle[0]->txnp->scheduler_arrival_time_nanos = ctx->approx_wallclock_ns + (long)((double)(fd_tickcount() - ctx->approx_tickcount) / ctx->ticks_per_ns);
635 9 : memcpy( bundle[0]->txnp->payload+TXN(bundle[0]->txnp)->recent_blockhash_off, ctx->crank->recent_blockhash, 32UL );
636 :
637 9 : fd_keyguard_client_sign( ctx->crank->keyguard_client, bundle[0]->txnp->payload+1UL,
638 9 : bundle[0]->txnp->payload+65UL, txn_sz-65UL, FD_KEYGUARD_SIGN_TYPE_ED25519 );
639 :
640 9 : memcpy( ctx->crank->last_sig, bundle[0]->txnp->payload+1UL, 64UL );
641 :
642 9 : ctx->crank->ib_inserted = 1;
643 9 : ulong deleted;
644 9 : int retval = fd_pack_insert_bundle_fini( ctx->pack, bundle, 1UL, ctx->leader_slot-1UL, 1, NULL, &deleted );
645 9 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
646 9 : ctx->insert_result[ retval + FD_PACK_INSERT_RETVAL_OFF ]++;
647 9 : if( FD_UNLIKELY( retval<0 ) ) {
648 0 : ctx->crank->metrics[ 3 ]++; /* BUNDLE_CRANK_STATUS_INSERTION_FAILED */
649 0 : FD_LOG_WARNING(( "inserting initializer bundle returned %i", retval ));
650 9 : } else {
651 : /* Update the cached copy of the on-chain state. This seems a
652 : little dangerous, since we're updating it as if the bundle
653 : succeeded without knowing if that's true, but here's why
654 : it's safe:
655 :
656 : From now until we get the rebate call for this initializer
657 : bundle (which lets us know if it succeeded or failed), pack
658 : will be in [Pending] state, which means peek_bundle_meta
659 : will return NULL, so we won't read this state.
660 :
661 : Then, if the initializer bundle failed, we'll go into
662 : [Failed] IB state until the end of the block, which will
663 : cause top_meta to remain NULL so we don't read these values
664 : again.
665 :
666 : Otherwise, the initializer bundle succeeded, which means
667 : that these are the right values to use. */
668 9 : fd_bundle_crank_apply( ctx->crank->gen, ctx->crank->prev_config, top_meta->commission_pubkey,
669 9 : ctx->crank->tip_receiver_owner, ctx->crank->epoch, top_meta->commission );
670 9 : ctx->crank->metrics[ 1 ]++; /* BUNDLE_CRANK_STATUS_INSERTED */
671 9 : }
672 9 : } else {
673 : /* Already logged a warning in this case */
674 0 : fd_pack_insert_bundle_cancel( ctx->pack, bundle, 1UL );
675 0 : ctx->crank->metrics[ 2 ]++; /* BUNDLE_CRANK_STATUS_CREATION_FAILED' */
676 0 : }
677 15 : }
678 1320 : }
679 :
680 : /* Try to schedule the next microblock. */
681 1320 : if( FD_LIKELY( ctx->bank_idle_bitset ) ) { /* Optimize for schedule */
682 1320 : any_ready = 1;
683 :
684 1320 : int i = fd_ulong_find_lsb( ctx->bank_idle_bitset );
685 :
686 1320 : int flags;
687 :
688 1320 : switch( ctx->strategy ) {
689 0 : default:
690 1320 : case FD_PACK_STRATEGY_PERF:
691 1320 : flags = FD_PACK_SCHEDULE_VOTE | FD_PACK_SCHEDULE_BUNDLE | FD_PACK_SCHEDULE_TXN;
692 1320 : break;
693 0 : case FD_PACK_STRATEGY_BALANCED:
694 : /* We want to exempt votes from pacing, so we always allow
695 : scheduling votes. It doesn't really make much sense to pace
696 : bundles, because they get scheduled in FIFO order. However,
697 : we keep pacing for normal transactions. For example, if
698 : pacing_bank_cnt is 0, then pack won't schedule normal
699 : transactions to any bank tile. */
700 0 : flags = FD_PACK_SCHEDULE_VOTE | fd_int_if( i==0, FD_PACK_SCHEDULE_BUNDLE, 0 )
701 0 : | fd_int_if( i<pacing_bank_cnt, FD_PACK_SCHEDULE_TXN, 0 );
702 0 : break;
703 0 : case FD_PACK_STRATEGY_BUNDLE:
704 0 : flags = FD_PACK_SCHEDULE_VOTE | FD_PACK_SCHEDULE_BUNDLE
705 0 : | fd_int_if( ctx->slot_end_ns - ctx->approx_wallclock_ns<50000000L, FD_PACK_SCHEDULE_TXN, 0 );
706 0 : break;
707 1320 : }
708 :
709 1320 : fd_txn_p_t * microblock_dst = fd_chunk_to_laddr( ctx->bank_out_mem, ctx->bank_out_chunk );
710 1320 : long schedule_duration = -fd_tickcount();
711 1320 : ulong schedule_cnt = fd_pack_schedule_next_microblock( ctx->pack, CUS_PER_MICROBLOCK, VOTE_FRACTION, (ulong)i, flags, microblock_dst );
712 1320 : schedule_duration += fd_tickcount();
713 1320 : fd_histf_sample( (schedule_cnt>0UL) ? ctx->schedule_duration : ctx->no_sched_duration, (ulong)schedule_duration );
714 :
715 1320 : if( FD_LIKELY( schedule_cnt ) ) {
716 1320 : any_scheduled = 1;
717 1320 : long now2 = fd_tickcount();
718 1320 : ulong tsorig = (ulong)fd_frag_meta_ts_comp( now ); /* A bound on when we observed bank was idle */
719 1320 : ulong tspub = (ulong)fd_frag_meta_ts_comp( now2 );
720 1320 : ulong chunk = ctx->bank_out_chunk;
721 1320 : ulong msg_sz = schedule_cnt*sizeof(fd_txn_p_t);
722 1320 : fd_microblock_bank_trailer_t * trailer = (fd_microblock_bank_trailer_t*)(microblock_dst+schedule_cnt);
723 1320 : trailer->bank = ctx->leader_bank;
724 1320 : trailer->bank_idx = ctx->leader_bank_idx;
725 1320 : trailer->microblock_idx = ctx->slot_microblock_cnt;
726 1320 : trailer->pack_idx = ctx->pack_idx;
727 1320 : trailer->pack_txn_idx = ctx->pack_txn_cnt;
728 1320 : trailer->is_bundle = !!(microblock_dst->flags & FD_TXN_P_FLAGS_BUNDLE);
729 :
730 1320 : ulong sig = fd_disco_poh_sig( ctx->leader_slot, POH_PKT_TYPE_MICROBLOCK, (ulong)i );
731 1320 : fd_stem_publish( stem, 0UL, sig, chunk, msg_sz+sizeof(fd_microblock_bank_trailer_t), 0UL, tsorig, tspub );
732 1320 : ctx->bank_expect[ i ] = stem->seqs[0]-1UL;
733 1320 : ctx->bank_ready_at[i] = now2 + (long)ctx->microblock_duration_ticks;
734 1320 : ctx->bank_out_chunk = fd_dcache_compact_next( ctx->bank_out_chunk, msg_sz+sizeof(fd_microblock_bank_trailer_t), ctx->bank_out_chunk0, ctx->bank_out_wmark );
735 1320 : ctx->slot_microblock_cnt += fd_ulong_if( trailer->is_bundle, schedule_cnt, 1UL );
736 1320 : ctx->pack_idx += fd_uint_if( trailer->is_bundle, (uint)schedule_cnt, 1U );
737 1320 : ctx->pack_txn_cnt += schedule_cnt;
738 :
739 1320 : ctx->bank_idle_bitset = fd_ulong_pop_lsb( ctx->bank_idle_bitset );
740 1320 : ctx->skip_cnt = (long)schedule_cnt * fd_long_if( ctx->use_consumed_cus, (long)bank_cnt/2L, 1L );
741 1320 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now2 );
742 :
743 1320 : memcpy( ctx->last_sched_metrics->all, (ulong const *)fd_metrics_tl, sizeof(ctx->last_sched_metrics->all) );
744 1320 : ctx->last_sched_metrics->time = now2;
745 :
746 : /* If we're using CU rebates, then we have one in for each bank in
747 : addition to the two normal ones. We want to skip schedule attempts
748 : for (bank_cnt + 1) link polls after a successful schedule attempt.
749 : */
750 1320 : fd_long_store_if( ctx->use_consumed_cus, &(ctx->skip_cnt), (long)(ctx->bank_cnt + 1) );
751 1320 : }
752 1320 : }
753 :
754 1320 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, any_ready );
755 1320 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, any_scheduled );
756 1320 : now = fd_tickcount();
757 1320 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, fd_pack_avail_txn_cnt( ctx->pack )>0 );
758 :
759 : #if FD_PACK_USE_EXTRA_STORAGE
760 : if( FD_UNLIKELY( !extra_txn_deq_empty( ctx->extra_txn_deq ) ) ) {
761 : /* Don't start pulling from the extra storage until the available
762 : transaction count drops below half. */
763 : ulong avail_space = (ulong)fd_long_max( 0L, (long)(ctx->max_pending_transactions>>1)-(long)fd_pack_avail_txn_cnt( ctx->pack ) );
764 : ulong qty_to_insert = fd_ulong_min( 10UL, fd_ulong_min( extra_txn_deq_cnt( ctx->extra_txn_deq ), avail_space ) );
765 : int any_successes = 0;
766 : for( ulong i=0UL; i<qty_to_insert; i++ ) any_successes |= (0<=insert_from_extra( ctx ));
767 : if( FD_LIKELY( any_successes ) ) ctx->last_successful_insert = now;
768 : }
769 : #endif
770 :
771 : /* Did we send the maximum allowed microblocks? Then end the slot. */
772 1320 : if( FD_UNLIKELY( ctx->slot_microblock_cnt==ctx->slot_max_microblocks )) {
773 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_LEADER, 0 );
774 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, 0 );
775 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, 0 );
776 : /* The pack object also does this accounting and increases this
777 : metric, but we end the slot early so won't see it unless we also
778 : increment it here. */
779 0 : FD_MCNT_INC( PACK, MICROBLOCK_PER_BLOCK_LIMIT, 1UL );
780 0 : log_end_block_metrics( ctx, now, "microblock" );
781 :
782 0 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
783 0 : done_packing->microblocks_in_slot = ctx->slot_microblock_cnt;
784 :
785 0 : fd_stem_publish( stem, 1UL, fd_disco_bank_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
786 0 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
787 0 : ctx->pack_idx++;
788 :
789 0 : ctx->drain_banks = 1;
790 0 : ctx->leader_slot = ULONG_MAX;
791 0 : ctx->slot_microblock_cnt = 0UL;
792 0 : fd_pack_end_block( ctx->pack );
793 0 : remove_ib( ctx );
794 :
795 0 : }
796 1320 : }
797 :
798 :
799 : /* At this point, we have started receiving frag seq with details in
800 : mline at time now. Speculatively process it here. */
801 :
802 : static inline void
803 : during_frag( fd_pack_ctx_t * ctx,
804 : ulong in_idx,
805 : ulong seq FD_PARAM_UNUSED,
806 : ulong sig,
807 : ulong chunk,
808 : ulong sz,
809 3291 : ulong ctl FD_PARAM_UNUSED ) {
810 :
811 3291 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
812 :
813 3291 : switch( ctx->in_kind[ in_idx ] ) {
814 0 : case IN_KIND_REPLAY: {
815 0 : if( FD_LIKELY( sig!=REPLAY_SIG_BECAME_LEADER ) ) return;
816 :
817 : /* There was a leader transition. Handle it. */
818 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=sizeof(fd_became_leader_t) ) )
819 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
820 :
821 0 : fd_memcpy( ctx->_became_leader, dcache_entry, sizeof(fd_became_leader_t) );
822 0 : return;
823 0 : }
824 39 : case IN_KIND_POH: {
825 : /* Not interested in stamped microblocks, only leader updates. */
826 39 : if( fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_BECAME_LEADER ) return;
827 :
828 : /* There was a leader transition. Handle it. */
829 39 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=sizeof(fd_became_leader_t) ) )
830 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
831 :
832 39 : fd_memcpy( ctx->_became_leader, dcache_entry, sizeof(fd_became_leader_t) );
833 39 : return;
834 39 : }
835 9 : case IN_KIND_BANK: {
836 9 : FD_TEST( ctx->use_consumed_cus );
837 : /* For a previous slot */
838 9 : if( FD_UNLIKELY( sig!=ctx->leader_slot ) ) return;
839 :
840 9 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz<FD_PACK_REBATE_MIN_SZ
841 9 : || sz>FD_PACK_REBATE_MAX_SZ ) )
842 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
843 :
844 9 : ctx->pending_rebate_sz = sz;
845 9 : fd_memcpy( ctx->rebate, dcache_entry, sz );
846 9 : return;
847 9 : }
848 3243 : case IN_KIND_RESOLV: {
849 3243 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_TPU_RESOLVED_MTU ) )
850 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
851 :
852 3243 : fd_txn_m_t * txnm = (fd_txn_m_t *)dcache_entry;
853 3243 : ulong payload_sz = txnm->payload_sz;
854 3243 : ulong txn_t_sz = txnm->txn_t_sz;
855 3243 : uint source_ipv4 = txnm->source_ipv4;
856 3243 : uchar source_tpu = txnm->source_tpu;
857 3243 : FD_TEST( payload_sz<=FD_TPU_MTU );
858 3243 : FD_TEST( txn_t_sz <=FD_TXN_MAX_SZ );
859 3243 : fd_txn_t * txn = fd_txn_m_txn_t( txnm );
860 :
861 3243 : ulong addr_table_sz = 32UL*txn->addr_table_adtl_cnt;
862 3243 : FD_TEST( addr_table_sz<=32UL*FD_TXN_ACCT_ADDR_MAX );
863 :
864 3243 : if( FD_UNLIKELY( (ctx->leader_slot==ULONG_MAX) & (sig>ctx->highest_observed_slot) ) ) {
865 : /* Using the resolv tile's knowledge of the current slot is a bit
866 : of a hack, since we don't get any info if there are no
867 : transactions and we're not leader. We're actually in exactly
868 : the case where that's okay though. The point of calling
869 : expire_before long before we become leader is so that we don't
870 : drop new but low-fee-paying transactions when pack is clogged
871 : with expired but high-fee-paying transactions. That can only
872 : happen if we are getting transactions. */
873 0 : ctx->highest_observed_slot = sig;
874 0 : ulong exp_cnt = fd_pack_expire_before( ctx->pack, fd_ulong_max( ctx->highest_observed_slot, TRANSACTION_LIFETIME_SLOTS )-TRANSACTION_LIFETIME_SLOTS );
875 0 : FD_MCNT_INC( PACK, TRANSACTION_EXPIRED, exp_cnt );
876 0 : }
877 :
878 :
879 3243 : ulong bundle_id = txnm->block_engine.bundle_id;
880 3243 : if( FD_UNLIKELY( bundle_id ) ) {
881 63 : ctx->is_bundle = 1;
882 63 : if( FD_LIKELY( bundle_id!=ctx->current_bundle->id ) ) {
883 15 : if( FD_UNLIKELY( ctx->current_bundle->bundle ) ) {
884 6 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_PARTIAL_BUNDLE, ctx->current_bundle->txn_received );
885 6 : fd_pack_insert_bundle_cancel( ctx->pack, ctx->current_bundle->bundle, ctx->current_bundle->txn_cnt );
886 6 : }
887 15 : ctx->current_bundle->id = bundle_id;
888 15 : ctx->current_bundle->txn_cnt = txnm->block_engine.bundle_txn_cnt;
889 15 : ctx->current_bundle->min_blockhash_slot = ULONG_MAX;
890 15 : ctx->current_bundle->txn_received = 0UL;
891 :
892 15 : if( FD_UNLIKELY( ctx->current_bundle->txn_cnt==0UL ) ) {
893 0 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_PARTIAL_BUNDLE, 1UL );
894 0 : ctx->current_bundle->id = 0UL;
895 0 : return;
896 0 : }
897 15 : ctx->blk_engine_cfg->commission = txnm->block_engine.commission;
898 15 : memcpy( ctx->blk_engine_cfg->commission_pubkey->b, txnm->block_engine.commission_pubkey, 32UL );
899 :
900 15 : ctx->current_bundle->bundle = fd_pack_insert_bundle_init( ctx->pack, ctx->current_bundle->_txn, ctx->current_bundle->txn_cnt );
901 15 : }
902 63 : ctx->cur_spot = ctx->current_bundle->bundle[ ctx->current_bundle->txn_received ];
903 63 : ctx->current_bundle->min_blockhash_slot = fd_ulong_min( ctx->current_bundle->min_blockhash_slot, sig );
904 3180 : } else {
905 3180 : ctx->is_bundle = 0;
906 : #if FD_PACK_USE_EXTRA_STORAGE
907 : if( FD_LIKELY( ctx->leader_slot!=ULONG_MAX || fd_pack_avail_txn_cnt( ctx->pack )<ctx->max_pending_transactions ) ) {
908 : ctx->cur_spot = fd_pack_insert_txn_init( ctx->pack );
909 : ctx->insert_to_extra = 0;
910 : } else {
911 : if( FD_UNLIKELY( extra_txn_deq_full( ctx->extra_txn_deq ) ) ) {
912 : extra_txn_deq_remove_head( ctx->extra_txn_deq );
913 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_FROM_EXTRA, 1UL );
914 : }
915 : ctx->cur_spot = extra_txn_deq_peek_tail( extra_txn_deq_insert_tail( ctx->extra_txn_deq ) );
916 : /* We want to store the current time in cur_spot so that we can
917 : track its expiration better. We just stash it in the CU
918 : fields, since those aren't important right now. */
919 : ctx->cur_spot->txnp->blockhash_slot = sig;
920 : ctx->insert_to_extra = 1;
921 : FD_MCNT_INC( PACK, TRANSACTION_INSERTED_TO_EXTRA, 1UL );
922 : }
923 : #else
924 3180 : ctx->cur_spot = fd_pack_insert_txn_init( ctx->pack );
925 3180 : #endif
926 3180 : }
927 :
928 : /* We get transactions from the resolv tile.
929 : The transactions should have been parsed and verified. */
930 3243 : FD_MCNT_INC( PACK, NORMAL_TRANSACTION_RECEIVED, 1UL );
931 :
932 3243 : fd_memcpy( ctx->cur_spot->txnp->payload, fd_txn_m_payload( txnm ), payload_sz );
933 3243 : fd_memcpy( TXN(ctx->cur_spot->txnp), txn, txn_t_sz );
934 3243 : fd_memcpy( ctx->cur_spot->alt_accts, fd_txn_m_alut( txnm ), addr_table_sz );
935 3243 : ctx->cur_spot->txnp->scheduler_arrival_time_nanos = ctx->approx_wallclock_ns + (long)((double)(fd_tickcount() - ctx->approx_tickcount) / ctx->ticks_per_ns);
936 3243 : ctx->cur_spot->txnp->payload_sz = payload_sz;
937 3243 : ctx->cur_spot->txnp->source_ipv4 = source_ipv4;
938 3243 : ctx->cur_spot->txnp->source_tpu = source_tpu;
939 :
940 3243 : break;
941 3243 : }
942 0 : case IN_KIND_EXECUTED_TXN: {
943 0 : FD_TEST( sz==64UL );
944 0 : fd_memcpy( ctx->executed_txn_sig, dcache_entry, sz );
945 0 : break;
946 0 : }
947 3291 : }
948 3291 : }
949 :
950 :
951 : /* After the transaction has been fully received, and we know we were
952 : not overrun while reading it, insert it into pack. */
953 :
954 : static inline void
955 : after_frag( fd_pack_ctx_t * ctx,
956 : ulong in_idx,
957 : ulong seq,
958 : ulong sig,
959 : ulong sz,
960 : ulong tsorig,
961 : ulong tspub,
962 3279 : fd_stem_context_t * stem ) {
963 3279 : (void)seq;
964 3279 : (void)sz;
965 3279 : (void)tsorig;
966 3279 : (void)tspub;
967 3279 : (void)stem;
968 :
969 3279 : long now = fd_tickcount();
970 :
971 3279 : ulong leader_slot = ULONG_MAX;
972 3279 : switch( ctx->in_kind[ in_idx ] ) {
973 0 : case IN_KIND_REPLAY:
974 0 : if( FD_UNLIKELY( sig!=REPLAY_SIG_BECAME_LEADER ) ) return;
975 0 : leader_slot = ctx->_became_leader->slot;
976 0 : break;
977 39 : case IN_KIND_POH:
978 39 : if( fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_BECAME_LEADER ) return;
979 39 : leader_slot = fd_disco_poh_sig_slot( sig );
980 39 : break;
981 3240 : default:
982 3240 : break;
983 3279 : }
984 :
985 3279 : switch( ctx->in_kind[ in_idx ] ) {
986 0 : case IN_KIND_REPLAY:
987 39 : case IN_KIND_POH: {
988 39 : long now_ticks = fd_tickcount();
989 39 : long now_ns = fd_log_wallclock();
990 :
991 39 : if( FD_UNLIKELY( ctx->leader_slot!=ULONG_MAX ) ) {
992 0 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
993 0 : done_packing->microblocks_in_slot = ctx->slot_microblock_cnt;
994 :
995 0 : fd_stem_publish( stem, 1UL, fd_disco_bank_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
996 0 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
997 0 : ctx->pack_idx++;
998 :
999 0 : FD_LOG_WARNING(( "switching to slot %lu while packing for slot %lu. Draining bank tiles.", leader_slot, ctx->leader_slot ));
1000 0 : log_end_block_metrics( ctx, now_ticks, "switch" );
1001 0 : ctx->drain_banks = 1;
1002 0 : ctx->leader_slot = ULONG_MAX;
1003 0 : ctx->slot_microblock_cnt = 0UL;
1004 0 : fd_pack_end_block( ctx->pack );
1005 0 : remove_ib( ctx );
1006 0 : }
1007 39 : ctx->leader_slot = leader_slot;
1008 :
1009 39 : ulong exp_cnt = fd_pack_expire_before( ctx->pack, fd_ulong_max( ctx->leader_slot, TRANSACTION_LIFETIME_SLOTS )-TRANSACTION_LIFETIME_SLOTS );
1010 39 : FD_MCNT_INC( PACK, TRANSACTION_EXPIRED, exp_cnt );
1011 :
1012 39 : ctx->leader_bank = ctx->_became_leader->bank;
1013 39 : ctx->leader_bank_idx = ctx->_became_leader->bank_idx;
1014 39 : ctx->slot_max_microblocks = ctx->_became_leader->max_microblocks_in_slot;
1015 : /* Reserve some space in the block for ticks */
1016 39 : ctx->slot_max_data = (ctx->larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK)
1017 39 : - 48UL*(ctx->_became_leader->ticks_per_slot+ctx->_became_leader->total_skipped_ticks);
1018 :
1019 39 : ctx->limits.slot_max_cost = ctx->_became_leader->limits.slot_max_cost;
1020 39 : ctx->limits.slot_max_vote_cost = ctx->_became_leader->limits.slot_max_vote_cost;
1021 39 : ctx->limits.slot_max_write_cost_per_acct = ctx->_became_leader->limits.slot_max_write_cost_per_acct;
1022 :
1023 : /* ticks_per_ns is probably relatively stable over 400ms, but not
1024 : over several hours, so we need to compute the slot duration in
1025 : milliseconds first and then convert to ticks. This doesn't need
1026 : to be super accurate, but we don't want it to vary wildly. */
1027 39 : long end_ticks = now_ticks + (long)((double)fd_long_max( ctx->_became_leader->slot_end_ns - now_ns, 1L )*ctx->ticks_per_ns);
1028 : /* We may still get overrun, but then we'll never use this and just
1029 : reinitialize it the next time when we actually become leader. */
1030 39 : fd_pack_pacing_init( ctx->pacer, now_ticks, end_ticks, (float)ctx->ticks_per_ns, ctx->limits.slot_max_cost );
1031 :
1032 39 : if( FD_UNLIKELY( ctx->crank->enabled ) ) {
1033 : /* If we get overrun, we'll just never use these values, but the
1034 : old values aren't really useful either. */
1035 39 : ctx->crank->epoch = ctx->_became_leader->epoch;
1036 39 : *(ctx->crank->prev_config) = *(ctx->_became_leader->bundle->config);
1037 39 : memcpy( ctx->crank->recent_blockhash, ctx->_became_leader->bundle->last_blockhash, 32UL );
1038 39 : memcpy( ctx->crank->tip_receiver_owner, ctx->_became_leader->bundle->tip_receiver_owner, 32UL );
1039 39 : }
1040 :
1041 39 : FD_LOG_INFO(( "pack_became_leader(slot=%lu,ends_at=%ld)", ctx->leader_slot, ctx->_became_leader->slot_end_ns ));
1042 :
1043 39 : update_metric_state( ctx, fd_tickcount(), FD_PACK_METRIC_STATE_LEADER, 1 );
1044 :
1045 39 : ctx->slot_end_ns = ctx->_became_leader->slot_end_ns;
1046 39 : fd_pack_limits_t limits[ 1 ];
1047 39 : limits->max_cost_per_block = ctx->limits.slot_max_cost;
1048 39 : limits->max_data_bytes_per_block = ctx->slot_max_data;
1049 39 : limits->max_microblocks_per_block = ctx->slot_max_microblocks;
1050 39 : limits->max_vote_cost_per_block = ctx->limits.slot_max_vote_cost;
1051 39 : limits->max_write_cost_per_acct = ctx->limits.slot_max_write_cost_per_acct;
1052 39 : limits->max_txn_per_microblock = ULONG_MAX; /* unused */
1053 39 : fd_pack_set_block_limits( ctx->pack, limits );
1054 39 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now );
1055 :
1056 39 : break;
1057 0 : }
1058 9 : case IN_KIND_BANK: {
1059 : /* For a previous slot */
1060 9 : if( FD_UNLIKELY( sig!=ctx->leader_slot ) ) return;
1061 :
1062 9 : fd_pack_rebate_cus( ctx->pack, ctx->rebate->rebate );
1063 9 : ctx->pending_rebate_sz = 0UL;
1064 9 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now );
1065 9 : break;
1066 9 : }
1067 3231 : case IN_KIND_RESOLV: {
1068 : /* Normal transaction case */
1069 : #if FD_PACK_USE_EXTRA_STORAGE
1070 : if( FD_LIKELY( !ctx->insert_to_extra ) ) {
1071 : #else
1072 3231 : if( 1 ) {
1073 3231 : #endif
1074 3231 : if( FD_UNLIKELY( ctx->is_bundle ) ) {
1075 57 : if( FD_UNLIKELY( ctx->current_bundle->txn_cnt==0UL ) ) return;
1076 57 : if( FD_UNLIKELY( ++(ctx->current_bundle->txn_received)==ctx->current_bundle->txn_cnt ) ) {
1077 9 : ulong deleted;
1078 9 : long insert_duration = -fd_tickcount();
1079 9 : int result = fd_pack_insert_bundle_fini( ctx->pack, ctx->current_bundle->bundle, ctx->current_bundle->txn_cnt, ctx->current_bundle->min_blockhash_slot, 0, ctx->blk_engine_cfg, &deleted );
1080 9 : insert_duration += fd_tickcount();
1081 9 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
1082 9 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ] += ctx->current_bundle->txn_received;
1083 9 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
1084 9 : ctx->current_bundle->bundle = NULL;
1085 9 : }
1086 3174 : } else {
1087 3174 : ulong blockhash_slot = sig;
1088 3174 : ulong deleted;
1089 3174 : long insert_duration = -fd_tickcount();
1090 3174 : int result = fd_pack_insert_txn_fini( ctx->pack, ctx->cur_spot, blockhash_slot, &deleted );
1091 3174 : insert_duration += fd_tickcount();
1092 3174 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
1093 3174 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ]++;
1094 3174 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
1095 3174 : if( FD_LIKELY( result>=0 ) ) ctx->last_successful_insert = now;
1096 3174 : }
1097 3231 : }
1098 :
1099 3231 : ctx->cur_spot = NULL;
1100 3231 : break;
1101 3231 : }
1102 0 : case IN_KIND_EXECUTED_TXN: {
1103 0 : ulong deleted = fd_pack_delete_transaction( ctx->pack, fd_type_pun( ctx->executed_txn_sig ) );
1104 0 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
1105 0 : break;
1106 3231 : }
1107 3279 : }
1108 :
1109 3279 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, fd_pack_avail_txn_cnt( ctx->pack )>0 );
1110 3279 : }
1111 :
1112 : static void
1113 : privileged_init( fd_topo_t * topo,
1114 0 : fd_topo_tile_t * tile ) {
1115 0 : if( FD_LIKELY( !tile->pack.bundle.enabled ) ) return;
1116 0 : if( FD_UNLIKELY( !tile->pack.bundle.vote_account_path[0] ) ) {
1117 0 : FD_LOG_WARNING(( "Disabling bundle crank because no vote account was specified" ));
1118 0 : return;
1119 0 : }
1120 :
1121 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1122 :
1123 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1124 0 : fd_pack_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
1125 :
1126 0 : if( FD_UNLIKELY( !strcmp( tile->pack.bundle.identity_key_path, "" ) ) )
1127 0 : FD_LOG_ERR(( "identity_key_path not set" ));
1128 :
1129 0 : const uchar * identity_key = fd_keyload_load( tile->pack.bundle.identity_key_path, /* pubkey only: */ 1 );
1130 0 : fd_memcpy( ctx->crank->identity_pubkey->b, identity_key, 32UL );
1131 :
1132 0 : if( FD_UNLIKELY( !fd_base58_decode_32( tile->pack.bundle.vote_account_path, ctx->crank->vote_pubkey->b ) ) ) {
1133 0 : const uchar * vote_key = fd_keyload_load( tile->pack.bundle.vote_account_path, /* pubkey only: */ 1 );
1134 0 : fd_memcpy( ctx->crank->vote_pubkey->b, vote_key, 32UL );
1135 0 : }
1136 0 : }
1137 :
1138 : static void
1139 : unprivileged_init( fd_topo_t * topo,
1140 30 : fd_topo_tile_t * tile ) {
1141 30 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1142 :
1143 30 : if( FD_UNLIKELY( tile->pack.max_pending_transactions >= USHORT_MAX-10UL ) ) FD_LOG_ERR(( "pack tile supports up to %lu pending transactions", USHORT_MAX-11UL ));
1144 :
1145 30 : fd_pack_limits_t limits_upper[1] = {{
1146 30 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND,
1147 30 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND,
1148 30 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND,
1149 30 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
1150 30 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
1151 30 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
1152 30 : }};
1153 :
1154 30 : ulong pack_footprint = fd_pack_footprint( tile->pack.max_pending_transactions, BUNDLE_META_SZ, tile->pack.bank_tile_count, limits_upper );
1155 :
1156 30 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1157 30 : fd_pack_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
1158 30 : fd_rng_t * rng = fd_rng_join( fd_rng_new( FD_SCRATCH_ALLOC_APPEND( l, fd_rng_align(), fd_rng_footprint() ), 0U, 0UL ) );
1159 30 : if( FD_UNLIKELY( !rng ) ) FD_LOG_ERR(( "fd_rng_new failed" ));
1160 :
1161 30 : fd_pack_limits_t limits_lower[1] = {{
1162 30 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_LOWER_BOUND,
1163 30 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_LOWER_BOUND,
1164 30 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_LOWER_BOUND,
1165 30 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
1166 30 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
1167 30 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
1168 30 : }};
1169 :
1170 30 : ctx->pack = fd_pack_join( fd_pack_new( FD_SCRATCH_ALLOC_APPEND( l, fd_pack_align(), pack_footprint ),
1171 30 : tile->pack.max_pending_transactions, BUNDLE_META_SZ, tile->pack.bank_tile_count,
1172 30 : limits_lower, rng ) );
1173 30 : if( FD_UNLIKELY( !ctx->pack ) ) FD_LOG_ERR(( "fd_pack_new failed" ));
1174 :
1175 30 : if( FD_UNLIKELY( tile->in_cnt>32UL ) ) FD_LOG_ERR(( "Too many input links (%lu>32) to pack tile", tile->in_cnt ));
1176 :
1177 30 : FD_TEST( tile->in_cnt<sizeof( ctx->in_kind )/sizeof( ctx->in_kind[0] ) );
1178 270 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1179 240 : fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
1180 :
1181 240 : if( FD_LIKELY( !strcmp( link->name, "resolv_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
1182 210 : else if( FD_LIKELY( !strcmp( link->name, "dedup_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
1183 210 : else if( FD_LIKELY( !strcmp( link->name, "poh_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH;
1184 180 : else if( FD_LIKELY( !strcmp( link->name, "bank_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_BANK;
1185 60 : else if( FD_LIKELY( !strcmp( link->name, "sign_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_SIGN;
1186 30 : else if( FD_LIKELY( !strcmp( link->name, "replay_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPLAY;
1187 30 : else if( FD_LIKELY( !strcmp( link->name, "executed_txn" ) ) ) ctx->in_kind[ i ] = IN_KIND_EXECUTED_TXN;
1188 0 : else FD_LOG_ERR(( "pack tile has unexpected input link %lu %s", i, link->name ));
1189 240 : }
1190 :
1191 30 : ulong bank_cnt = 0UL;
1192 750 : for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
1193 720 : fd_topo_tile_t const * consumer_tile = &topo->tiles[ i ];
1194 720 : if( FD_UNLIKELY( strcmp( consumer_tile->name, "bank" ) && strcmp( consumer_tile->name, "replay" ) ) ) continue;
1195 240 : for( ulong j=0UL; j<consumer_tile->in_cnt; j++ ) {
1196 120 : if( FD_UNLIKELY( consumer_tile->in_link_id[ j ]==tile->out_link_id[ 0 ] ) ) bank_cnt++;
1197 120 : }
1198 120 : }
1199 :
1200 : // if( FD_UNLIKELY( !bank_cnt ) ) FD_LOG_ERR(( "pack tile connects to no banking tiles" ));
1201 30 : if( FD_UNLIKELY( bank_cnt>FD_PACK_MAX_BANK_TILES ) ) FD_LOG_ERR(( "pack tile connects to too many banking tiles" ));
1202 : // if( FD_UNLIKELY( bank_cnt!=tile->pack.bank_tile_count ) ) FD_LOG_ERR(( "pack tile connects to %lu banking tiles, but tile->pack.bank_tile_count is %lu", bank_cnt, tile->pack.bank_tile_count ));
1203 :
1204 30 : FD_TEST( (tile->pack.schedule_strategy>=0) & (tile->pack.schedule_strategy<=FD_PACK_STRATEGY_BUNDLE) );
1205 :
1206 30 : ctx->crank->enabled = tile->pack.bundle.enabled;
1207 30 : if( FD_UNLIKELY( tile->pack.bundle.enabled ) ) {
1208 30 : if( FD_UNLIKELY( !fd_bundle_crank_gen_init( ctx->crank->gen, (fd_acct_addr_t const *)tile->pack.bundle.tip_distribution_program_addr,
1209 30 : (fd_acct_addr_t const *)tile->pack.bundle.tip_payment_program_addr,
1210 30 : (fd_acct_addr_t const *)ctx->crank->vote_pubkey->b,
1211 30 : (fd_acct_addr_t const *)tile->pack.bundle.tip_distribution_authority,
1212 30 : schedule_strategy_strings[ tile->pack.schedule_strategy ],
1213 30 : tile->pack.bundle.commission_bps ) ) ) {
1214 0 : FD_LOG_ERR(( "constructing bundle generator failed" ));
1215 0 : }
1216 :
1217 30 : ulong sign_in_idx = fd_topo_find_tile_in_link ( topo, tile, "sign_pack", tile->kind_id );
1218 30 : ulong sign_out_idx = fd_topo_find_tile_out_link( topo, tile, "pack_sign", tile->kind_id );
1219 30 : FD_TEST( sign_in_idx!=ULONG_MAX );
1220 30 : fd_topo_link_t * sign_in = &topo->links[ tile->in_link_id[ sign_in_idx ] ];
1221 30 : fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ sign_out_idx ] ];
1222 30 : if( FD_UNLIKELY( !fd_keyguard_client_join( fd_keyguard_client_new( ctx->crank->keyguard_client,
1223 30 : sign_out->mcache,
1224 30 : sign_out->dcache,
1225 30 : sign_in->mcache,
1226 30 : sign_in->dcache,
1227 30 : sign_out->mtu ) ) ) ) {
1228 0 : FD_LOG_ERR(( "failed to construct keyguard" ));
1229 0 : }
1230 : /* Initialize enough of the prev config that it produces a
1231 : transaction */
1232 30 : ctx->crank->prev_config->discriminator = 0x82ccfa1ee0aa0c9bUL;
1233 30 : ctx->crank->prev_config->tip_receiver->b[1] = 1;
1234 30 : ctx->crank->prev_config->block_builder->b[2] = 1;
1235 :
1236 30 : memset( ctx->crank->tip_receiver_owner, '\0', 32UL );
1237 30 : memset( ctx->crank->recent_blockhash, '\0', 32UL );
1238 30 : memset( ctx->crank->last_sig, '\0', 64UL );
1239 30 : ctx->crank->ib_inserted = 0;
1240 30 : ctx->crank->epoch = 0UL;
1241 30 : ctx->crank->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->keyswitch_obj_id ) );
1242 30 : FD_TEST( ctx->crank->keyswitch );
1243 30 : } else {
1244 0 : memset( ctx->crank, '\0', sizeof(ctx->crank) );
1245 0 : }
1246 :
1247 :
1248 : #if FD_PACK_USE_EXTRA_STORAGE
1249 : ctx->extra_txn_deq = extra_txn_deq_join( extra_txn_deq_new( FD_SCRATCH_ALLOC_APPEND( l, extra_txn_deq_align(),
1250 : extra_txn_deq_footprint() ) ) );
1251 : #endif
1252 :
1253 30 : ctx->cur_spot = NULL;
1254 30 : ctx->is_bundle = 0;
1255 30 : ctx->strategy = tile->pack.schedule_strategy;
1256 30 : ctx->max_pending_transactions = tile->pack.max_pending_transactions;
1257 30 : ctx->leader_slot = ULONG_MAX;
1258 30 : ctx->leader_bank = NULL;
1259 30 : ctx->leader_bank_idx = ULONG_MAX;
1260 30 : ctx->pack_idx = 0UL;
1261 30 : ctx->slot_microblock_cnt = 0UL;
1262 30 : ctx->pack_txn_cnt = 0UL;
1263 30 : ctx->slot_max_microblocks = 0UL;
1264 30 : ctx->slot_max_data = 0UL;
1265 30 : ctx->larger_shred_limits_per_block = tile->pack.larger_shred_limits_per_block;
1266 30 : ctx->drain_banks = 0;
1267 30 : ctx->approx_wallclock_ns = fd_log_wallclock();
1268 30 : ctx->approx_tickcount = fd_tickcount();
1269 30 : ctx->rng = rng;
1270 30 : ctx->ticks_per_ns = fd_tempo_tick_per_ns( NULL );
1271 30 : ctx->last_successful_insert = 0L;
1272 30 : ctx->highest_observed_slot = 0UL;
1273 30 : ctx->microblock_duration_ticks = (ulong)(fd_tempo_tick_per_ns( NULL )*(double)MICROBLOCK_DURATION_NS + 0.5);
1274 : #if FD_PACK_USE_EXTRA_STORAGE
1275 : ctx->insert_to_extra = 0;
1276 : #endif
1277 30 : ctx->use_consumed_cus = tile->pack.use_consumed_cus;
1278 30 : ctx->crank->enabled = tile->pack.bundle.enabled;
1279 :
1280 30 : ctx->wait_duration_ticks[ 0 ] = ULONG_MAX;
1281 930 : for( ulong i=1UL; i<MAX_TXN_PER_MICROBLOCK+1UL; i++ ) {
1282 900 : ctx->wait_duration_ticks[ i ]=(ulong)(fd_tempo_tick_per_ns( NULL )*(double)wait_duration[ i ] + 0.5);
1283 900 : }
1284 :
1285 30 : ctx->limits.slot_max_cost = limits_lower->max_cost_per_block;
1286 30 : ctx->limits.slot_max_vote_cost = limits_lower->max_vote_cost_per_block;
1287 30 : ctx->limits.slot_max_write_cost_per_acct = limits_lower->max_write_cost_per_acct;
1288 :
1289 30 : ctx->bank_cnt = tile->pack.bank_tile_count;
1290 30 : ctx->poll_cursor = 0;
1291 30 : ctx->skip_cnt = 0L;
1292 30 : ctx->bank_idle_bitset = fd_ulong_mask_lsb( (int)tile->pack.bank_tile_count );
1293 60 : for( ulong i=0UL; i<tile->pack.bank_tile_count; i++ ) {
1294 30 : ulong busy_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "bank_busy.%lu", i );
1295 30 : FD_TEST( busy_obj_id!=ULONG_MAX );
1296 30 : ctx->bank_current[ i ] = fd_fseq_join( fd_topo_obj_laddr( topo, busy_obj_id ) );
1297 30 : ctx->bank_expect[ i ] = ULONG_MAX;
1298 30 : if( FD_UNLIKELY( !ctx->bank_current[ i ] ) ) FD_LOG_ERR(( "banking tile %lu has no busy flag", i ));
1299 30 : ctx->bank_ready_at[ i ] = 0L;
1300 30 : FD_TEST( ULONG_MAX==fd_fseq_query( ctx->bank_current[ i ] ) );
1301 30 : }
1302 :
1303 270 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1304 240 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
1305 240 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
1306 :
1307 240 : ctx->in[ i ].mem = link_wksp->wksp;
1308 240 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
1309 240 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
1310 240 : }
1311 :
1312 30 : ctx->bank_out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
1313 30 : ctx->bank_out_chunk0 = fd_dcache_compact_chunk0( ctx->bank_out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
1314 30 : ctx->bank_out_wmark = fd_dcache_compact_wmark ( ctx->bank_out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
1315 30 : ctx->bank_out_chunk = ctx->bank_out_chunk0;
1316 :
1317 30 : ctx->poh_out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 1 ] ].dcache_obj_id ].wksp_id ].wksp;
1318 30 : ctx->poh_out_chunk0 = fd_dcache_compact_chunk0( ctx->poh_out_mem, topo->links[ tile->out_link_id[ 1 ] ].dcache );
1319 30 : ctx->poh_out_wmark = fd_dcache_compact_wmark ( ctx->poh_out_mem, topo->links[ tile->out_link_id[ 1 ] ].dcache, topo->links[ tile->out_link_id[ 1 ] ].mtu );
1320 30 : ctx->poh_out_chunk = ctx->poh_out_chunk0;
1321 :
1322 : /* Initialize metrics storage */
1323 30 : memset( ctx->insert_result, '\0', FD_PACK_INSERT_RETVAL_CNT * sizeof(ulong) );
1324 30 : fd_histf_join( fd_histf_new( ctx->schedule_duration, FD_MHIST_SECONDS_MIN( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS ),
1325 30 : FD_MHIST_SECONDS_MAX( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS ) ) );
1326 30 : fd_histf_join( fd_histf_new( ctx->no_sched_duration, FD_MHIST_SECONDS_MIN( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS ),
1327 30 : FD_MHIST_SECONDS_MAX( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS ) ) );
1328 30 : fd_histf_join( fd_histf_new( ctx->insert_duration, FD_MHIST_SECONDS_MIN( PACK, INSERT_TRANSACTION_DURATION_SECONDS ),
1329 30 : FD_MHIST_SECONDS_MAX( PACK, INSERT_TRANSACTION_DURATION_SECONDS ) ) );
1330 30 : fd_histf_join( fd_histf_new( ctx->complete_duration, FD_MHIST_SECONDS_MIN( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS ),
1331 30 : FD_MHIST_SECONDS_MAX( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS ) ) );
1332 30 : ctx->metric_state = 0;
1333 30 : ctx->metric_state_begin = fd_tickcount();
1334 30 : memset( ctx->metric_timing, '\0', 16*sizeof(long) );
1335 30 : memset( ctx->current_bundle, '\0', sizeof(ctx->current_bundle) );
1336 30 : memset( ctx->blk_engine_cfg, '\0', sizeof(ctx->blk_engine_cfg) );
1337 30 : memset( ctx->last_sched_metrics, '\0', sizeof(ctx->last_sched_metrics) );
1338 30 : memset( ctx->crank->metrics, '\0', sizeof(ctx->crank->metrics) );
1339 :
1340 30 : FD_LOG_INFO(( "packing microblocks of at most %lu transactions to %lu bank tiles using strategy %i", EFFECTIVE_TXN_PER_MICROBLOCK, tile->pack.bank_tile_count, ctx->strategy ));
1341 :
1342 30 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
1343 30 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
1344 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
1345 :
1346 30 : }
1347 :
1348 : static ulong
1349 : populate_allowed_seccomp( fd_topo_t const * topo,
1350 : fd_topo_tile_t const * tile,
1351 : ulong out_cnt,
1352 0 : struct sock_filter * out ) {
1353 0 : (void)topo;
1354 0 : (void)tile;
1355 :
1356 0 : populate_sock_filter_policy_fd_pack_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
1357 0 : return sock_filter_policy_fd_pack_tile_instr_cnt;
1358 0 : }
1359 :
1360 : static ulong
1361 : populate_allowed_fds( fd_topo_t const * topo,
1362 : fd_topo_tile_t const * tile,
1363 : ulong out_fds_cnt,
1364 0 : int * out_fds ) {
1365 0 : (void)topo;
1366 0 : (void)tile;
1367 :
1368 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
1369 :
1370 0 : ulong out_cnt = 0UL;
1371 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
1372 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
1373 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
1374 0 : return out_cnt;
1375 0 : }
1376 :
1377 0 : #define STEM_BURST (1UL)
1378 :
1379 : /* We want lazy (measured in ns) to be small enough that the producer
1380 : and the consumer never have to wait for credits. For most tango
1381 : links, we use a default worst case speed coming from 100 Gbps
1382 : Ethernet. That's not very suitable for microblocks that go from
1383 : pack to bank. Instead we manually estimate the very aggressive
1384 : 1000ns per microblock, and then reduce it further (in line with the
1385 : default lazy value computation) to ensure the random value chosen
1386 : based on this won't lead to credit return stalls. */
1387 0 : #define STEM_LAZY (128L*3000L)
1388 :
1389 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_pack_ctx_t
1390 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_pack_ctx_t)
1391 :
1392 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
1393 0 : #define STEM_CALLBACK_BEFORE_CREDIT before_credit
1394 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
1395 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
1396 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
1397 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
1398 :
1399 : #include "../stem/fd_stem.c"
1400 :
1401 : fd_topo_run_tile_t fd_tile_pack = {
1402 : .name = "pack",
1403 : .populate_allowed_seccomp = populate_allowed_seccomp,
1404 : .populate_allowed_fds = populate_allowed_fds,
1405 : .scratch_align = scratch_align,
1406 : .scratch_footprint = scratch_footprint,
1407 : .privileged_init = privileged_init,
1408 : .unprivileged_init = unprivileged_init,
1409 : .run = stem_run,
1410 : };
|