Line data Source code
1 : #include "../tiles.h"
2 :
3 : #include "generated/fd_pack_tile_seccomp.h"
4 :
5 : #include "../../util/pod/fd_pod_format.h"
6 : #include "../keyguard/fd_keyload.h"
7 : #include "../keyguard/fd_keyswitch.h"
8 : #include "../keyguard/fd_keyguard.h"
9 : #include "../metrics/fd_metrics.h"
10 : #include "../pack/fd_pack.h"
11 : #include "../pack/fd_pack_pacing.h"
12 :
13 : #include <linux/unistd.h>
14 : #include <string.h>
15 :
16 : /* fd_pack is responsible for taking verified transactions, and
17 : arranging them into "microblocks" (groups) of transactions to
18 : be executed serially. It can try to do clever things so that
19 : multiple microblocks can execute in parallel, if they don't
20 : write to the same accounts. */
21 :
22 6504 : #define IN_KIND_RESOLV (0UL)
23 108 : #define IN_KIND_POH (1UL)
24 138 : #define IN_KIND_BANK (2UL)
25 30 : #define IN_KIND_SIGN (3UL)
26 0 : #define IN_KIND_REPLAY (4UL)
27 30 : #define IN_KIND_EXECUTED_TXN (5UL)
28 :
29 : /* Pace microblocks, but only slightly. This helps keep performance
30 : more stable. This limit is 2,000 microblocks/second/bank. At 31
31 : transactions/microblock, that's 62k txn/sec/bank. */
32 30 : #define MICROBLOCK_DURATION_NS (0L)
33 :
34 : /* There are 151 accepted blockhashes, but those don't include skips.
35 : This check is neither precise nor accurate, but just good enough.
36 : The bank tile does the final check. We give a little margin for a
37 : few percent skip rate. */
38 117 : #define TRANSACTION_LIFETIME_SLOTS 160UL
39 :
40 : /* Time is normally a long, but pack expects a ulong. Add -LONG_MIN to
41 : the time values so that LONG_MIN maps to 0, LONG_MAX maps to
42 : ULONG_MAX, and everything in between maps linearly with a slope of 1.
43 : Just subtracting LONG_MIN results in signed integer overflow, which
44 : is U.B. */
45 : #define TIME_OFFSET 0x8000000000000000UL
46 : FD_STATIC_ASSERT( (ulong)LONG_MIN+TIME_OFFSET==0UL, time_offset );
47 : FD_STATIC_ASSERT( (ulong)LONG_MAX+TIME_OFFSET==ULONG_MAX, time_offset );
48 :
49 : /* Optionally allow a larger limit for benchmarking */
50 0 : #define LARGER_MAX_COST_PER_BLOCK (18UL*FD_PACK_MAX_COST_PER_BLOCK_LOWER_BOUND)
51 :
52 : /* 1.6 M cost units, enough for 1 max size transaction */
53 : const ulong CUS_PER_MICROBLOCK = 1600000UL;
54 :
55 : #define SMALL_MICROBLOCKS 1
56 :
57 : #if SMALL_MICROBLOCKS
58 : const float VOTE_FRACTION = 1.0f; /* schedule all available votes first */
59 93 : #define EFFECTIVE_TXN_PER_MICROBLOCK 1UL
60 : #else
61 : const float VOTE_FRACTION = 0.75f; /* TODO: Is this the right value? */
62 : #define EFFECTIVE_TXN_PER_MICROBLOCK MAX_TXN_PER_MICROBLOCK
63 : #endif
64 :
65 : /* There's overhead associated with each microblock the bank tile tries
66 : to execute it, so the optimal strategy is not to produce a microblock
67 : with a single transaction as soon as we receive it. Basically, if we
68 : have less than 31 transactions, we want to wait a little to see if we
69 : receive additional transactions before we schedule a microblock. We
70 : can model the optimum amount of time to wait, but the equation is
71 : complicated enough that we want to compute it before compile time.
72 : wait_duration[i] for i in [0, 31] gives the time in nanoseconds pack
73 : should wait after receiving its most recent transaction before
74 : scheduling if it has i transactions available. Unsurprisingly,
75 : wait_duration[31] is 0. wait_duration[0] is ULONG_MAX, so we'll
76 : always wait if we have 0 transactions. */
77 : FD_IMPORT( wait_duration, "src/disco/pack/pack_delay.bin", ulong, 6, "" );
78 :
79 :
80 :
81 : #if FD_PACK_USE_EXTRA_STORAGE
82 : /* When we are done being leader for a slot and we are leader in the
83 : very next slot, it can still take some time to transition. This is
84 : because the bank has to be finalized, a hash calculated, and various
85 : other things done in the replay stage to create the new child bank.
86 :
87 : During that time, pack cannot send transactions to banks so it needs
88 : to be able to buffer. Typically, these so called "leader
89 : transitions" are short (<15 millis), so a low value here would
90 : suffice. However, in some cases when there is memory pressure on the
91 : NUMA node or when the operating system context switches relevant
92 : threads out, it can take significantly longer.
93 :
94 : To prevent drops in these cases and because we assume banks are fast
95 : enough to drain this buffer once we do become leader, we set this
96 : buffer size to be quite large. */
97 :
98 : #define DEQUE_NAME extra_txn_deq
99 : #define DEQUE_T fd_txn_e_t
100 : #define DEQUE_MAX (128UL*1024UL)
101 : #include "../../../../util/tmpl/fd_deque.c"
102 :
103 : #endif
104 :
105 : /* Sync with src/app/shared/fd_config.c */
106 1321 : #define FD_PACK_STRATEGY_PERF 0
107 0 : #define FD_PACK_STRATEGY_BALANCED 1
108 0 : #define FD_PACK_STRATEGY_BUNDLE 2
109 :
110 : static char const * const schedule_strategy_strings[3] = { "PRF", "BAL", "BUN" };
111 :
112 :
113 : typedef struct {
114 : fd_acct_addr_t commission_pubkey[1];
115 : ulong commission;
116 : } block_builder_info_t;
117 :
118 : typedef struct {
119 : fd_wksp_t * mem;
120 : ulong chunk0;
121 : ulong wmark;
122 : } fd_pack_in_ctx_t;
123 :
124 : typedef struct {
125 : fd_pack_t * pack;
126 : fd_txn_e_t * cur_spot;
127 : int is_bundle; /* is the current transaction a bundle */
128 :
129 : uchar executed_txn_sig[ 64UL ];
130 :
131 : /* One of the FD_PACK_STRATEGY_* values defined above */
132 : int strategy;
133 :
134 : /* The value passed to fd_pack_new, etc. */
135 : ulong max_pending_transactions;
136 :
137 : /* The leader slot we are currently packing for, or ULONG_MAX if we
138 : are not the leader. */
139 : ulong leader_slot;
140 : void const * leader_bank;
141 : ulong leader_bank_idx;
142 :
143 : fd_became_leader_t _became_leader[1];
144 :
145 : /* The number of microblocks we have packed for the current leader
146 : slot. Will always be <= slot_max_microblocks. We must track
147 : this so that when we are done we can tell the PoH tile how many
148 : microblocks to expect in the slot. */
149 : ulong slot_microblock_cnt;
150 :
151 : /* Counter which increments when we've finished packing for a slot */
152 : uint pack_idx;
153 :
154 : ulong pack_txn_cnt; /* total num transactions packed since startup */
155 :
156 : /* The maximum number of microblocks that can be packed in this slot.
157 : Provided by the PoH tile when we become leader.*/
158 : ulong slot_max_microblocks;
159 :
160 : /* Cap (in bytes) of the amount of transaction data we produce in each
161 : block to avoid hitting the shred limits. See where this is set for
162 : more explanation. */
163 : ulong slot_max_data;
164 : int larger_shred_limits_per_block;
165 :
166 : /* Consensus critical slot cost limits. */
167 : struct {
168 : ulong slot_max_cost;
169 : ulong slot_max_vote_cost;
170 : ulong slot_max_write_cost_per_acct;
171 : } limits;
172 :
173 : /* If drain_banks is non-zero, then the pack tile must wait until all
174 : banks are idle before scheduling any more microblocks. This is
175 : primarily helpful in irregular leader transitions, e.g. while being
176 : leader for slot N, we switch forks to a slot M (!=N+1) in which we
177 : are also leader. We don't want to execute microblocks for
178 : different slots concurrently. */
179 : int drain_banks;
180 :
181 : /* Updated during housekeeping and used only for checking if the
182 : leader slot has ended. Might be off by one housekeeping duration,
183 : but that should be small relative to a slot duration. */
184 : long approx_wallclock_ns;
185 :
186 : /* approx_tickcount is updated in during_housekeeping() with
187 : fd_tickcount() and will match approx_wallclock_ns. This is done
188 : because we need to include an accurate nanosecond timestamp in
189 : every fd_txn_p_t but don't want to have to call the expensive
190 : fd_log_wallclock() in in the critical path. We can use
191 : fd_tempo_tick_per_ns() to convert from ticks to nanoseconds over
192 : small periods of time. */
193 : long approx_tickcount;
194 :
195 : fd_rng_t * rng;
196 :
197 : /* The end wallclock time of the leader slot we are currently packing
198 : for, if we are currently packing for a slot.*/
199 : long slot_end_ns;
200 :
201 : /* pacer and ticks_per_ns are used for pacing CUs through the slot,
202 : i.e. deciding when to schedule a microblock given the number of CUs
203 : that have been consumed so far. pacer is an opaque pacing object,
204 : which is initialized when the pack tile is packing a slot.
205 : ticks_per_ns is the cached value from tempo. */
206 : fd_pack_pacing_t pacer[1];
207 : double ticks_per_ns;
208 :
209 : /* last_successful_insert stores the tickcount of the last
210 : successful transaction insert. */
211 : long last_successful_insert;
212 :
213 : /* highest_observed_slot stores the highest slot number we've seen
214 : from any transaction coming from the resolv tile. When this
215 : increases, we expire old transactions. */
216 : ulong highest_observed_slot;
217 :
218 : /* microblock_duration_ns, and wait_duration
219 : respectively scaled to be in ticks instead of nanoseconds */
220 : ulong microblock_duration_ticks;
221 : ulong wait_duration_ticks[ MAX_TXN_PER_MICROBLOCK+1UL ];
222 :
223 : #if FD_PACK_USE_EXTRA_STORAGE
224 : /* In addition to the available transactions that pack knows about, we
225 : also store a larger ring buffer for handling cases when pack is
226 : full. This is an fd_deque. */
227 : fd_txn_e_t * extra_txn_deq;
228 : int insert_to_extra; /* whether the last insert was into pack or the extra deq */
229 : #endif
230 :
231 : fd_pack_in_ctx_t in[ 32 ];
232 : int in_kind[ 32 ];
233 :
234 : ulong bank_cnt;
235 : ulong bank_idle_bitset; /* bit i is 1 if we've observed *bank_current[i]==bank_expect[i] */
236 : int poll_cursor; /* in [0, bank_cnt), the next bank to poll */
237 : int use_consumed_cus;
238 : long skip_cnt;
239 : ulong * bank_current[ FD_PACK_MAX_BANK_TILES ];
240 : ulong bank_expect[ FD_PACK_MAX_BANK_TILES ];
241 : /* bank_ready_at[x] means don't check bank x until tickcount is at
242 : least bank_ready_at[x]. */
243 : long bank_ready_at[ FD_PACK_MAX_BANK_TILES ];
244 :
245 : fd_wksp_t * bank_out_mem;
246 : ulong bank_out_chunk0;
247 : ulong bank_out_wmark;
248 : ulong bank_out_chunk;
249 :
250 : fd_wksp_t * poh_out_mem;
251 : ulong poh_out_chunk0;
252 : ulong poh_out_wmark;
253 : ulong poh_out_chunk;
254 :
255 : ulong insert_result[ FD_PACK_INSERT_RETVAL_CNT ];
256 : fd_histf_t schedule_duration[ 1 ];
257 : fd_histf_t no_sched_duration[ 1 ];
258 : fd_histf_t insert_duration [ 1 ];
259 : fd_histf_t complete_duration[ 1 ];
260 :
261 : struct {
262 : uint metric_state;
263 : long metric_state_begin;
264 : long metric_timing[ 16 ];
265 : };
266 :
267 : struct {
268 : long time;
269 : ulong all[ FD_METRICS_TOTAL_SZ ];
270 : } last_sched_metrics[1];
271 :
272 : struct {
273 : ulong id;
274 : ulong txn_cnt;
275 : ulong txn_received;
276 : ulong min_blockhash_slot;
277 : fd_txn_e_t * _txn[ FD_PACK_MAX_TXN_PER_BUNDLE ];
278 : fd_txn_e_t * const * bundle; /* points to _txn when non-NULL */
279 : } current_bundle[1];
280 :
281 : block_builder_info_t blk_engine_cfg[1];
282 :
283 : struct {
284 : int enabled;
285 : int ib_inserted; /* in this slot */
286 : fd_acct_addr_t vote_pubkey[1];
287 : fd_acct_addr_t identity_pubkey[1];
288 : fd_bundle_crank_gen_t gen[1];
289 : fd_acct_addr_t tip_receiver_owner[1];
290 : ulong epoch;
291 : fd_bundle_crank_tip_payment_config_t prev_config[1]; /* as of start of slot, then updated */
292 : uchar recent_blockhash[32];
293 : fd_ed25519_sig_t last_sig[1];
294 :
295 : fd_keyswitch_t * keyswitch;
296 : fd_keyguard_client_t keyguard_client[1];
297 :
298 : ulong metrics[4];
299 : } crank[1];
300 :
301 :
302 : /* Used between during_frag and after_frag */
303 : ulong pending_rebate_sz;
304 : union{ fd_pack_rebate_t rebate[1]; uchar footprint[USHORT_MAX]; } rebate[1];
305 : } fd_pack_ctx_t;
306 :
307 60 : #define BUNDLE_META_SZ 40UL
308 : FD_STATIC_ASSERT( sizeof(block_builder_info_t)==BUNDLE_META_SZ, blk_engine_cfg );
309 :
310 4660 : #define FD_PACK_METRIC_STATE_TRANSACTIONS 0
311 1333 : #define FD_PACK_METRIC_STATE_BANKS 1
312 51 : #define FD_PACK_METRIC_STATE_LEADER 2
313 1333 : #define FD_PACK_METRIC_STATE_MICROBLOCKS 3
314 :
315 : /* Updates one component of the metric state. If the state has changed,
316 : records the change. */
317 : static inline void
318 : update_metric_state( fd_pack_ctx_t * ctx,
319 : long effective_as_of,
320 : int type,
321 7377 : int status ) {
322 7377 : uint current_state = fd_uint_insert_bit( ctx->metric_state, type, status );
323 7377 : if( FD_UNLIKELY( current_state!=ctx->metric_state ) ) {
324 188 : ctx->metric_timing[ ctx->metric_state ] += effective_as_of - ctx->metric_state_begin;
325 188 : ctx->metric_state_begin = effective_as_of;
326 188 : ctx->metric_state = current_state;
327 188 : }
328 7377 : }
329 :
330 : static inline void
331 12 : remove_ib( fd_pack_ctx_t * ctx ) {
332 : /* It's likely the initializer bundle is long scheduled, but we want to
333 : try deleting it just in case. */
334 12 : if( FD_UNLIKELY( ctx->crank->enabled & ctx->crank->ib_inserted ) ) {
335 0 : ulong deleted = fd_pack_delete_transaction( ctx->pack, (fd_ed25519_sig_t const *)ctx->crank->last_sig );
336 0 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
337 0 : }
338 12 : ctx->crank->ib_inserted = 0;
339 12 : }
340 :
341 :
342 : FD_FN_CONST static inline ulong
343 69 : scratch_align( void ) {
344 69 : return 4096UL;
345 69 : }
346 :
347 : FD_FN_PURE static inline ulong
348 33 : scratch_footprint( fd_topo_tile_t const * tile ) {
349 33 : fd_pack_limits_t limits[1] = {{
350 33 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND,
351 33 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND,
352 33 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND,
353 33 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
354 33 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
355 33 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
356 33 : }};
357 :
358 33 : ulong l = FD_LAYOUT_INIT;
359 33 : l = FD_LAYOUT_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
360 33 : l = FD_LAYOUT_APPEND( l, fd_rng_align(), fd_rng_footprint() );
361 33 : l = FD_LAYOUT_APPEND( l, fd_pack_align(), fd_pack_footprint( tile->pack.max_pending_transactions,
362 33 : BUNDLE_META_SZ,
363 33 : tile->pack.bank_tile_count,
364 33 : limits ) );
365 : #if FD_PACK_USE_EXTRA_STORAGE
366 : l = FD_LAYOUT_APPEND( l, extra_txn_deq_align(), extra_txn_deq_footprint() );
367 : #endif
368 33 : return FD_LAYOUT_FINI( l, scratch_align() );
369 33 : }
370 :
371 : static inline void
372 : log_end_block_metrics( fd_pack_ctx_t * ctx,
373 : long now,
374 12 : char const * reason ) {
375 12 : #define DELTA( m ) (fd_metrics_tl[ MIDX(COUNTER, PACK, TRANSACTION_SCHEDULE_##m) ] - ctx->last_sched_metrics->all[ MIDX(COUNTER, PACK, TRANSACTION_SCHEDULE_##m) ])
376 12 : #define AVAIL( m ) (fd_metrics_tl[ MIDX(GAUGE, PACK, AVAILABLE_TRANSACTIONS_##m) ])
377 12 : FD_LOG_INFO(( "pack_end_block(slot=%lu,%s,%lx,ticks_since_last_schedule=%ld,reasons=%lu,%lu,%lu,%lu,%lu,%lu,%lu;remaining=%lu+%lu+%lu+%lu;smallest=%lu;cus=%lu->%lu)",
378 12 : ctx->leader_slot, reason, ctx->bank_idle_bitset, now-ctx->last_sched_metrics->time,
379 12 : DELTA( TAKEN ), DELTA( CU_LIMIT ), DELTA( FAST_PATH ), DELTA( BYTE_LIMIT ), DELTA( WRITE_COST ), DELTA( SLOW_PATH ), DELTA( DEFER_SKIP ),
380 12 : AVAIL(REGULAR), AVAIL(VOTES), AVAIL(BUNDLES), AVAIL(CONFLICTING),
381 12 : (fd_metrics_tl[ MIDX(GAUGE, PACK, SMALLEST_PENDING_TRANSACTION) ]),
382 12 : (ctx->last_sched_metrics->all[ MIDX(GAUGE, PACK, CUS_CONSUMED_IN_BLOCK) ]),
383 12 : (fd_metrics_tl [ MIDX(GAUGE, PACK, CUS_CONSUMED_IN_BLOCK) ])
384 12 : ));
385 12 : #undef AVAIL
386 12 : #undef DELTA
387 12 : }
388 :
389 : static inline void
390 0 : metrics_write( fd_pack_ctx_t * ctx ) {
391 0 : FD_MCNT_ENUM_COPY( PACK, TRANSACTION_INSERTED, ctx->insert_result );
392 0 : FD_MCNT_ENUM_COPY( PACK, METRIC_TIMING, ((ulong*)ctx->metric_timing) );
393 0 : FD_MCNT_ENUM_COPY( PACK, BUNDLE_CRANK_STATUS, ctx->crank->metrics );
394 0 : FD_MHIST_COPY( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS, ctx->schedule_duration );
395 0 : FD_MHIST_COPY( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS, ctx->no_sched_duration );
396 0 : FD_MHIST_COPY( PACK, INSERT_TRANSACTION_DURATION_SECONDS, ctx->insert_duration );
397 0 : FD_MHIST_COPY( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS, ctx->complete_duration );
398 :
399 0 : fd_pack_metrics_write( ctx->pack );
400 0 : }
401 :
402 : static inline void
403 6360 : during_housekeeping( fd_pack_ctx_t * ctx ) {
404 6360 : ctx->approx_wallclock_ns = fd_log_wallclock();
405 6360 : ctx->approx_tickcount = fd_tickcount();
406 :
407 6360 : if( FD_UNLIKELY( ctx->crank->enabled && fd_keyswitch_state_query( ctx->crank->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
408 0 : fd_memcpy( ctx->crank->identity_pubkey, ctx->crank->keyswitch->bytes, 32UL );
409 0 : fd_keyswitch_state( ctx->crank->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
410 0 : }
411 6360 : }
412 :
413 : static inline void
414 : before_credit( fd_pack_ctx_t * ctx,
415 : fd_stem_context_t * stem,
416 7308 : int * charge_busy ) {
417 7308 : (void)stem;
418 :
419 7308 : if( FD_UNLIKELY( (ctx->cur_spot!=NULL) & !ctx->is_bundle ) ) {
420 6 : *charge_busy = 1;
421 :
422 : /* If we were overrun while processing a frag from an in, then
423 : cur_spot is left dangling and not cleaned up, so clean it up here
424 : (by returning the slot to the pool of free slots). If the last
425 : transaction was a bundle, then we don't want to return it. When
426 : we try to process the first transaction in the next bundle, we'll
427 : see we never got the full bundle and cancel the whole last
428 : bundle, returning all the storage to the pool. */
429 : #if FD_PACK_USE_EXTRA_STORAGE
430 : if( FD_LIKELY( !ctx->insert_to_extra ) ) fd_pack_insert_txn_cancel( ctx->pack, ctx->cur_spot );
431 : else extra_txn_deq_remove_tail( ctx->extra_txn_deq );
432 : #else
433 6 : fd_pack_insert_txn_cancel( ctx->pack, ctx->cur_spot );
434 6 : #endif
435 6 : ctx->cur_spot = NULL;
436 6 : }
437 7308 : }
438 :
439 : #if FD_PACK_USE_EXTRA_STORAGE
440 : /* insert_from_extra: helper method to pop the transaction at the head
441 : off the extra txn deque and insert it into pack. Requires that
442 : ctx->extra_txn_deq is non-empty, but it's okay to call it if pack is
443 : full. Returns the result of fd_pack_insert_txn_fini. */
444 : static inline int
445 : insert_from_extra( fd_pack_ctx_t * ctx ) {
446 : fd_txn_e_t * spot = fd_pack_insert_txn_init( ctx->pack );
447 : fd_txn_e_t const * insert = extra_txn_deq_peek_head( ctx->extra_txn_deq );
448 : fd_txn_t const * insert_txn = TXN(insert->txnp);
449 : fd_memcpy( spot->txnp->payload, insert->txnp->payload, insert->txnp->payload_sz );
450 : fd_memcpy( TXN(spot->txnp), insert_txn, fd_txn_footprint( insert_txn->instr_cnt, insert_txn->addr_table_lookup_cnt ) );
451 : fd_memcpy( spot->alt_accts, insert->alt_accts, insert_txn->addr_table_adtl_cnt*sizeof(fd_acct_addr_t) );
452 : spot->txnp->payload_sz = insert->txnp->payload_sz;
453 : spot->txnp->source_tpu = insert->txnp->source_tpu;
454 : spot->txnp->source_ipv4 = insert->txnp->source_ipv4;
455 : spot->txnp->scheduler_arrival_time_nanos = insert->txnp->scheduler_arrival_time_nanos;
456 : extra_txn_deq_remove_head( ctx->extra_txn_deq );
457 :
458 : ulong blockhash_slot = insert->txnp->blockhash_slot;
459 :
460 : ulong deleted;
461 : long insert_duration = -fd_tickcount();
462 : int result = fd_pack_insert_txn_fini( ctx->pack, spot, blockhash_slot, &deleted );
463 : insert_duration += fd_tickcount();
464 :
465 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
466 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ]++;
467 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
468 : FD_MCNT_INC( PACK, TRANSACTION_INSERTED_FROM_EXTRA, 1UL );
469 : return result;
470 : }
471 : #endif
472 :
473 : static inline void
474 : after_credit( fd_pack_ctx_t * ctx,
475 : fd_stem_context_t * stem,
476 : int * opt_poll_in,
477 7308 : int * charge_busy ) {
478 7308 : (void)opt_poll_in;
479 :
480 7308 : if( FD_UNLIKELY( (ctx->skip_cnt--)>0L ) ) return; /* It would take ages for this to hit LONG_MIN */
481 :
482 4675 : long now = fd_tickcount();
483 :
484 4675 : int pacing_bank_cnt = (int)fd_pack_pacing_enabled_bank_cnt( ctx->pacer, now );
485 :
486 4675 : ulong bank_cnt = ctx->bank_cnt;
487 :
488 :
489 : /* If any banks are busy, check one of the busy ones see if it is
490 : still busy. */
491 4675 : if( FD_LIKELY( ctx->bank_idle_bitset!=fd_ulong_mask_lsb( (int)bank_cnt ) ) ) {
492 1306 : int poll_cursor = ctx->poll_cursor;
493 1306 : ulong busy_bitset = (~ctx->bank_idle_bitset) & fd_ulong_mask_lsb( (int)bank_cnt );
494 :
495 : /* Suppose bank_cnt is 4 and idle_bitset looks something like this
496 : (pretending it's a uchar):
497 : 0000 1001
498 : ^ busy cursor is 1
499 : Then busy_bitset is
500 : 0000 0110
501 : Rotate it right by 2 bits
502 : 1000 0001
503 : Find lsb returns 0, so busy cursor remains 2, and we poll bank 2.
504 :
505 : If instead idle_bitset were
506 : 0000 1110
507 : ^
508 : The rotated version would be
509 : 0100 0000
510 : Find lsb will return 6, so busy cursor would be set to 0, and
511 : we'd poll bank 0, which is the right one. */
512 1306 : poll_cursor++;
513 1306 : poll_cursor = (poll_cursor + fd_ulong_find_lsb( fd_ulong_rotate_right( busy_bitset, (poll_cursor&63) ) )) & 63;
514 :
515 1306 : if( FD_UNLIKELY(
516 : /* if microblock duration is 0, bypass the bank_ready_at check
517 : to avoid a potential cache miss. Can't use an ifdef here
518 : because FD_UNLIKELY is a macro, but the compiler should
519 : eliminate the check easily. */
520 1306 : ( (MICROBLOCK_DURATION_NS==0L) || (ctx->bank_ready_at[poll_cursor]<now) ) &&
521 1306 : (fd_fseq_query( ctx->bank_current[poll_cursor] )==ctx->bank_expect[poll_cursor]) ) ) {
522 1306 : *charge_busy = 1;
523 1306 : ctx->bank_idle_bitset |= 1UL<<poll_cursor;
524 :
525 1306 : long complete_duration = -fd_tickcount();
526 1306 : int completed = fd_pack_microblock_complete( ctx->pack, (ulong)poll_cursor );
527 1306 : complete_duration += fd_tickcount();
528 1306 : if( FD_LIKELY( completed ) ) fd_histf_sample( ctx->complete_duration, (ulong)complete_duration );
529 1306 : }
530 :
531 1306 : ctx->poll_cursor = poll_cursor;
532 1306 : }
533 :
534 :
535 : /* If we time out on our slot, then stop being leader. This can only
536 : happen in the first after_credit after a housekeeping. */
537 4675 : if( FD_UNLIKELY( ctx->approx_wallclock_ns>=ctx->slot_end_ns && ctx->leader_slot!=ULONG_MAX ) ) {
538 12 : *charge_busy = 1;
539 :
540 12 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
541 12 : done_packing->microblocks_in_slot = ctx->slot_microblock_cnt;
542 :
543 12 : fd_stem_publish( stem, 1UL, fd_disco_bank_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
544 12 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
545 12 : ctx->pack_idx++;
546 :
547 12 : log_end_block_metrics( ctx, now, "time" );
548 12 : ctx->drain_banks = 1;
549 12 : ctx->leader_slot = ULONG_MAX;
550 12 : ctx->slot_microblock_cnt = 0UL;
551 12 : fd_pack_end_block( ctx->pack );
552 12 : remove_ib( ctx );
553 :
554 12 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_LEADER, 0 );
555 12 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, 0 );
556 12 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, 0 );
557 12 : return;
558 12 : }
559 :
560 : /* Am I leader? If not, see about inserting at most one transaction
561 : from extra storage. It's important not to insert too many
562 : transactions here, or we won't end up servicing dedup_pack enough.
563 : If extra storage is empty or pack is full, do nothing. */
564 4663 : if( FD_UNLIKELY( ctx->leader_slot==ULONG_MAX ) ) {
565 : #if FD_PACK_USE_EXTRA_STORAGE
566 : if( FD_UNLIKELY( !extra_txn_deq_empty( ctx->extra_txn_deq ) &&
567 : fd_pack_avail_txn_cnt( ctx->pack )<ctx->max_pending_transactions ) ) {
568 : *charge_busy = 1;
569 :
570 : int result = insert_from_extra( ctx );
571 : if( FD_LIKELY( result>=0 ) ) ctx->last_successful_insert = now;
572 : }
573 : #endif
574 3282 : return;
575 3282 : }
576 :
577 : /* Am I in drain mode? If so, check if I can exit it */
578 1381 : if( FD_UNLIKELY( ctx->drain_banks ) ) {
579 12 : if( FD_LIKELY( ctx->bank_idle_bitset==fd_ulong_mask_lsb( (int)bank_cnt ) ) ) {
580 12 : ctx->drain_banks = 0;
581 :
582 : /* Pack notifies poh when banks are drained so that poh can
583 : relinquish pack's ownership over the slot bank (by decrementing
584 : its Arc). We do this by sending a ULONG_MAX sig over the
585 : pack_poh mcache.
586 :
587 : TODO: This is only needed for Frankendancer, not Firedancer,
588 : which manages bank lifetime different. */
589 12 : fd_stem_publish( stem, 1UL, ULONG_MAX, 0UL, 0UL, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
590 12 : } else {
591 0 : return;
592 0 : }
593 12 : }
594 :
595 : /* Have I sent the max allowed microblocks? Nothing to do. */
596 1381 : if( FD_UNLIKELY( ctx->slot_microblock_cnt>=ctx->slot_max_microblocks ) ) return;
597 :
598 : /* Do I have enough transactions and/or have I waited enough time? */
599 1381 : if( FD_UNLIKELY( (ulong)(now-ctx->last_successful_insert) <
600 1381 : ctx->wait_duration_ticks[ fd_ulong_min( fd_pack_avail_txn_cnt( ctx->pack ), MAX_TXN_PER_MICROBLOCK ) ] ) ) {
601 60 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, 0 );
602 60 : return;
603 60 : }
604 :
605 1321 : int any_ready = 0;
606 1321 : int any_scheduled = 0;
607 :
608 1321 : *charge_busy = 1;
609 :
610 1321 : if( FD_LIKELY( ctx->crank->enabled ) ) {
611 1321 : block_builder_info_t const * top_meta = fd_pack_peek_bundle_meta( ctx->pack );
612 1321 : if( FD_UNLIKELY( top_meta ) ) {
613 : /* Have bundles, in a reasonable state to crank. */
614 :
615 15 : fd_txn_e_t * _bundle[ 1UL ];
616 15 : fd_txn_e_t * const * bundle = fd_pack_insert_bundle_init( ctx->pack, _bundle, 1UL );
617 :
618 15 : ulong txn_sz = fd_bundle_crank_generate( ctx->crank->gen, ctx->crank->prev_config, top_meta->commission_pubkey,
619 15 : ctx->crank->identity_pubkey, ctx->crank->tip_receiver_owner, ctx->crank->epoch, top_meta->commission,
620 15 : bundle[0]->txnp->payload, TXN( bundle[0]->txnp ) );
621 :
622 15 : if( FD_LIKELY( txn_sz==0UL ) ) { /* Everything in good shape! */
623 6 : fd_pack_insert_bundle_cancel( ctx->pack, bundle, 1UL );
624 6 : fd_pack_set_initializer_bundles_ready( ctx->pack );
625 6 : ctx->crank->metrics[ 0 ]++; /* BUNDLE_CRANK_STATUS_NOT_NEEDED */
626 6 : }
627 9 : else if( FD_LIKELY( txn_sz<ULONG_MAX ) ) {
628 9 : bundle[0]->txnp->payload_sz = (ushort)txn_sz;
629 9 : bundle[0]->txnp->source_tpu = FD_TXN_M_TPU_SOURCE_BUNDLE;
630 9 : bundle[0]->txnp->source_ipv4 = 0; /* not applicable */
631 9 : bundle[0]->txnp->scheduler_arrival_time_nanos = ctx->approx_wallclock_ns + (long)((double)(fd_tickcount() - ctx->approx_tickcount) / ctx->ticks_per_ns);
632 9 : memcpy( bundle[0]->txnp->payload+TXN(bundle[0]->txnp)->recent_blockhash_off, ctx->crank->recent_blockhash, 32UL );
633 :
634 9 : fd_keyguard_client_sign( ctx->crank->keyguard_client, bundle[0]->txnp->payload+1UL,
635 9 : bundle[0]->txnp->payload+65UL, txn_sz-65UL, FD_KEYGUARD_SIGN_TYPE_ED25519 );
636 :
637 9 : memcpy( ctx->crank->last_sig, bundle[0]->txnp->payload+1UL, 64UL );
638 :
639 9 : ctx->crank->ib_inserted = 1;
640 9 : ulong deleted;
641 9 : int retval = fd_pack_insert_bundle_fini( ctx->pack, bundle, 1UL, ctx->leader_slot-1UL, 1, NULL, &deleted );
642 9 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
643 9 : ctx->insert_result[ retval + FD_PACK_INSERT_RETVAL_OFF ]++;
644 9 : if( FD_UNLIKELY( retval<0 ) ) {
645 0 : ctx->crank->metrics[ 3 ]++; /* BUNDLE_CRANK_STATUS_INSERTION_FAILED */
646 0 : FD_LOG_WARNING(( "inserting initializer bundle returned %i", retval ));
647 9 : } else {
648 : /* Update the cached copy of the on-chain state. This seems a
649 : little dangerous, since we're updating it as if the bundle
650 : succeeded without knowing if that's true, but here's why
651 : it's safe:
652 :
653 : From now until we get the rebate call for this initializer
654 : bundle (which lets us know if it succeeded or failed), pack
655 : will be in [Pending] state, which means peek_bundle_meta
656 : will return NULL, so we won't read this state.
657 :
658 : Then, if the initializer bundle failed, we'll go into
659 : [Failed] IB state until the end of the block, which will
660 : cause top_meta to remain NULL so we don't read these values
661 : again.
662 :
663 : Otherwise, the initializer bundle succeeded, which means
664 : that these are the right values to use. */
665 9 : fd_bundle_crank_apply( ctx->crank->gen, ctx->crank->prev_config, top_meta->commission_pubkey,
666 9 : ctx->crank->tip_receiver_owner, ctx->crank->epoch, top_meta->commission );
667 9 : ctx->crank->metrics[ 1 ]++; /* BUNDLE_CRANK_STATUS_INSERTED */
668 9 : }
669 9 : } else {
670 : /* Already logged a warning in this case */
671 0 : fd_pack_insert_bundle_cancel( ctx->pack, bundle, 1UL );
672 0 : ctx->crank->metrics[ 2 ]++; /* BUNDLE_CRANK_STATUS_CREATION_FAILED' */
673 0 : }
674 15 : }
675 1321 : }
676 :
677 : /* Try to schedule the next microblock. */
678 1321 : if( FD_LIKELY( ctx->bank_idle_bitset ) ) { /* Optimize for schedule */
679 1321 : any_ready = 1;
680 :
681 1321 : int i = fd_ulong_find_lsb( ctx->bank_idle_bitset );
682 :
683 1321 : int flags;
684 :
685 1321 : switch( ctx->strategy ) {
686 0 : default:
687 1321 : case FD_PACK_STRATEGY_PERF:
688 1321 : flags = FD_PACK_SCHEDULE_VOTE | FD_PACK_SCHEDULE_BUNDLE | FD_PACK_SCHEDULE_TXN;
689 1321 : break;
690 0 : case FD_PACK_STRATEGY_BALANCED:
691 : /* We want to exempt votes from pacing, so we always allow
692 : scheduling votes. It doesn't really make much sense to pace
693 : bundles, because they get scheduled in FIFO order. However,
694 : we keep pacing for normal transactions. For example, if
695 : pacing_bank_cnt is 0, then pack won't schedule normal
696 : transactions to any bank tile. */
697 0 : flags = FD_PACK_SCHEDULE_VOTE | fd_int_if( i==0, FD_PACK_SCHEDULE_BUNDLE, 0 )
698 0 : | fd_int_if( i<pacing_bank_cnt, FD_PACK_SCHEDULE_TXN, 0 );
699 0 : break;
700 0 : case FD_PACK_STRATEGY_BUNDLE:
701 0 : flags = FD_PACK_SCHEDULE_VOTE | FD_PACK_SCHEDULE_BUNDLE
702 0 : | fd_int_if( ctx->slot_end_ns - ctx->approx_wallclock_ns<50000000L, FD_PACK_SCHEDULE_TXN, 0 );
703 0 : break;
704 1321 : }
705 :
706 1321 : fd_txn_p_t * microblock_dst = fd_chunk_to_laddr( ctx->bank_out_mem, ctx->bank_out_chunk );
707 1321 : long schedule_duration = -fd_tickcount();
708 1321 : ulong schedule_cnt = fd_pack_schedule_next_microblock( ctx->pack, CUS_PER_MICROBLOCK, VOTE_FRACTION, (ulong)i, flags, microblock_dst );
709 1321 : schedule_duration += fd_tickcount();
710 1321 : fd_histf_sample( (schedule_cnt>0UL) ? ctx->schedule_duration : ctx->no_sched_duration, (ulong)schedule_duration );
711 :
712 1321 : if( FD_LIKELY( schedule_cnt ) ) {
713 1321 : any_scheduled = 1;
714 1321 : long now2 = fd_tickcount();
715 1321 : ulong tsorig = (ulong)fd_frag_meta_ts_comp( now ); /* A bound on when we observed bank was idle */
716 1321 : ulong tspub = (ulong)fd_frag_meta_ts_comp( now2 );
717 1321 : ulong chunk = ctx->bank_out_chunk;
718 1321 : ulong msg_sz = schedule_cnt*sizeof(fd_txn_p_t);
719 1321 : fd_microblock_bank_trailer_t * trailer = (fd_microblock_bank_trailer_t*)(microblock_dst+schedule_cnt);
720 1321 : trailer->bank = ctx->leader_bank;
721 1321 : trailer->bank_idx = ctx->leader_bank_idx;
722 1321 : trailer->microblock_idx = ctx->slot_microblock_cnt;
723 1321 : trailer->pack_idx = ctx->pack_idx;
724 1321 : trailer->pack_txn_idx = ctx->pack_txn_cnt;
725 1321 : trailer->is_bundle = !!(microblock_dst->flags & FD_TXN_P_FLAGS_BUNDLE);
726 :
727 1321 : ulong sig = fd_disco_poh_sig( ctx->leader_slot, POH_PKT_TYPE_MICROBLOCK, (ulong)i );
728 1321 : fd_stem_publish( stem, 0UL, sig, chunk, msg_sz+sizeof(fd_microblock_bank_trailer_t), 0UL, tsorig, tspub );
729 1321 : ctx->bank_expect[ i ] = stem->seqs[0]-1UL;
730 1321 : ctx->bank_ready_at[i] = now2 + (long)ctx->microblock_duration_ticks;
731 1321 : ctx->bank_out_chunk = fd_dcache_compact_next( ctx->bank_out_chunk, msg_sz+sizeof(fd_microblock_bank_trailer_t), ctx->bank_out_chunk0, ctx->bank_out_wmark );
732 1321 : ctx->slot_microblock_cnt += fd_ulong_if( trailer->is_bundle, schedule_cnt, 1UL );
733 1321 : ctx->pack_idx += fd_uint_if( trailer->is_bundle, (uint)schedule_cnt, 1U );
734 1321 : ctx->pack_txn_cnt += schedule_cnt;
735 :
736 1321 : ctx->bank_idle_bitset = fd_ulong_pop_lsb( ctx->bank_idle_bitset );
737 1321 : ctx->skip_cnt = (long)schedule_cnt * fd_long_if( ctx->use_consumed_cus, (long)bank_cnt/2L, 1L );
738 1321 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now2 );
739 :
740 1321 : memcpy( ctx->last_sched_metrics->all, (ulong const *)fd_metrics_tl, sizeof(ctx->last_sched_metrics->all) );
741 1321 : ctx->last_sched_metrics->time = now2;
742 :
743 : /* If we're using CU rebates, then we have one in for each bank in
744 : addition to the two normal ones. We want to skip schedule attempts
745 : for (bank_cnt + 1) link polls after a successful schedule attempt.
746 : */
747 1321 : fd_long_store_if( ctx->use_consumed_cus, &(ctx->skip_cnt), (long)(ctx->bank_cnt + 1) );
748 1321 : }
749 1321 : }
750 :
751 1321 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, any_ready );
752 1321 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, any_scheduled );
753 1321 : now = fd_tickcount();
754 1321 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, fd_pack_avail_txn_cnt( ctx->pack )>0 );
755 :
756 : #if FD_PACK_USE_EXTRA_STORAGE
757 : if( FD_UNLIKELY( !extra_txn_deq_empty( ctx->extra_txn_deq ) ) ) {
758 : /* Don't start pulling from the extra storage until the available
759 : transaction count drops below half. */
760 : ulong avail_space = (ulong)fd_long_max( 0L, (long)(ctx->max_pending_transactions>>1)-(long)fd_pack_avail_txn_cnt( ctx->pack ) );
761 : ulong qty_to_insert = fd_ulong_min( 10UL, fd_ulong_min( extra_txn_deq_cnt( ctx->extra_txn_deq ), avail_space ) );
762 : int any_successes = 0;
763 : for( ulong i=0UL; i<qty_to_insert; i++ ) any_successes |= (0<=insert_from_extra( ctx ));
764 : if( FD_LIKELY( any_successes ) ) ctx->last_successful_insert = now;
765 : }
766 : #endif
767 :
768 : /* Did we send the maximum allowed microblocks? Then end the slot. */
769 1321 : if( FD_UNLIKELY( ctx->slot_microblock_cnt==ctx->slot_max_microblocks )) {
770 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_LEADER, 0 );
771 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, 0 );
772 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, 0 );
773 : /* The pack object also does this accounting and increases this
774 : metric, but we end the slot early so won't see it unless we also
775 : increment it here. */
776 0 : FD_MCNT_INC( PACK, MICROBLOCK_PER_BLOCK_LIMIT, 1UL );
777 0 : log_end_block_metrics( ctx, now, "microblock" );
778 :
779 0 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
780 0 : done_packing->microblocks_in_slot = ctx->slot_microblock_cnt;
781 :
782 0 : fd_stem_publish( stem, 1UL, fd_disco_bank_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
783 0 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
784 0 : ctx->pack_idx++;
785 :
786 0 : ctx->drain_banks = 1;
787 0 : ctx->leader_slot = ULONG_MAX;
788 0 : ctx->slot_microblock_cnt = 0UL;
789 0 : fd_pack_end_block( ctx->pack );
790 0 : remove_ib( ctx );
791 :
792 0 : }
793 1321 : }
794 :
795 :
796 : /* At this point, we have started receiving frag seq with details in
797 : mline at time now. Speculatively process it here. */
798 :
799 : static inline void
800 : during_frag( fd_pack_ctx_t * ctx,
801 : ulong in_idx,
802 : ulong seq FD_PARAM_UNUSED,
803 : ulong sig,
804 : ulong chunk,
805 : ulong sz,
806 3291 : ulong ctl FD_PARAM_UNUSED ) {
807 :
808 3291 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
809 :
810 3291 : switch( ctx->in_kind[ in_idx ] ) {
811 0 : case IN_KIND_REPLAY:
812 39 : case IN_KIND_POH: {
813 : /* Not interested in stamped microblocks, only leader updates. */
814 39 : if( fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_BECAME_LEADER ) return;
815 :
816 : /* There was a leader transition. Handle it. */
817 39 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=sizeof(fd_became_leader_t) ) )
818 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
819 :
820 39 : fd_memcpy( ctx->_became_leader, dcache_entry, sizeof(fd_became_leader_t) );
821 39 : return;
822 39 : }
823 9 : case IN_KIND_BANK: {
824 9 : FD_TEST( ctx->use_consumed_cus );
825 : /* For a previous slot */
826 9 : if( FD_UNLIKELY( sig!=ctx->leader_slot ) ) return;
827 :
828 9 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz<FD_PACK_REBATE_MIN_SZ
829 9 : || sz>FD_PACK_REBATE_MAX_SZ ) )
830 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
831 :
832 9 : ctx->pending_rebate_sz = sz;
833 9 : fd_memcpy( ctx->rebate, dcache_entry, sz );
834 9 : return;
835 9 : }
836 3243 : case IN_KIND_RESOLV: {
837 3243 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_TPU_RESOLVED_MTU ) )
838 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
839 :
840 3243 : fd_txn_m_t * txnm = (fd_txn_m_t *)dcache_entry;
841 3243 : ulong payload_sz = txnm->payload_sz;
842 3243 : ulong txn_t_sz = txnm->txn_t_sz;
843 3243 : uint source_ipv4 = txnm->source_ipv4;
844 3243 : uchar source_tpu = txnm->source_tpu;
845 3243 : FD_TEST( payload_sz<=FD_TPU_MTU );
846 3243 : FD_TEST( txn_t_sz <=FD_TXN_MAX_SZ );
847 3243 : fd_txn_t * txn = fd_txn_m_txn_t( txnm );
848 :
849 3243 : ulong addr_table_sz = 32UL*txn->addr_table_adtl_cnt;
850 3243 : FD_TEST( addr_table_sz<=32UL*FD_TXN_ACCT_ADDR_MAX );
851 :
852 3243 : if( FD_UNLIKELY( (ctx->leader_slot==ULONG_MAX) & (sig>ctx->highest_observed_slot) ) ) {
853 : /* Using the resolv tile's knowledge of the current slot is a bit
854 : of a hack, since we don't get any info if there are no
855 : transactions and we're not leader. We're actually in exactly
856 : the case where that's okay though. The point of calling
857 : expire_before long before we become leader is so that we don't
858 : drop new but low-fee-paying transactions when pack is clogged
859 : with expired but high-fee-paying transactions. That can only
860 : happen if we are getting transactions. */
861 0 : ctx->highest_observed_slot = sig;
862 0 : ulong exp_cnt = fd_pack_expire_before( ctx->pack, fd_ulong_max( ctx->highest_observed_slot, TRANSACTION_LIFETIME_SLOTS )-TRANSACTION_LIFETIME_SLOTS );
863 0 : FD_MCNT_INC( PACK, TRANSACTION_EXPIRED, exp_cnt );
864 0 : }
865 :
866 :
867 3243 : ulong bundle_id = txnm->block_engine.bundle_id;
868 3243 : if( FD_UNLIKELY( bundle_id ) ) {
869 63 : ctx->is_bundle = 1;
870 63 : if( FD_LIKELY( bundle_id!=ctx->current_bundle->id ) ) {
871 15 : if( FD_UNLIKELY( ctx->current_bundle->bundle ) ) {
872 6 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_PARTIAL_BUNDLE, ctx->current_bundle->txn_received );
873 6 : fd_pack_insert_bundle_cancel( ctx->pack, ctx->current_bundle->bundle, ctx->current_bundle->txn_cnt );
874 6 : }
875 15 : ctx->current_bundle->id = bundle_id;
876 15 : ctx->current_bundle->txn_cnt = txnm->block_engine.bundle_txn_cnt;
877 15 : ctx->current_bundle->min_blockhash_slot = ULONG_MAX;
878 15 : ctx->current_bundle->txn_received = 0UL;
879 :
880 15 : if( FD_UNLIKELY( ctx->current_bundle->txn_cnt==0UL ) ) {
881 0 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_PARTIAL_BUNDLE, 1UL );
882 0 : ctx->current_bundle->id = 0UL;
883 0 : return;
884 0 : }
885 15 : ctx->blk_engine_cfg->commission = txnm->block_engine.commission;
886 15 : memcpy( ctx->blk_engine_cfg->commission_pubkey->b, txnm->block_engine.commission_pubkey, 32UL );
887 :
888 15 : ctx->current_bundle->bundle = fd_pack_insert_bundle_init( ctx->pack, ctx->current_bundle->_txn, ctx->current_bundle->txn_cnt );
889 15 : }
890 63 : ctx->cur_spot = ctx->current_bundle->bundle[ ctx->current_bundle->txn_received ];
891 63 : ctx->current_bundle->min_blockhash_slot = fd_ulong_min( ctx->current_bundle->min_blockhash_slot, sig );
892 3180 : } else {
893 3180 : ctx->is_bundle = 0;
894 : #if FD_PACK_USE_EXTRA_STORAGE
895 : if( FD_LIKELY( ctx->leader_slot!=ULONG_MAX || fd_pack_avail_txn_cnt( ctx->pack )<ctx->max_pending_transactions ) ) {
896 : ctx->cur_spot = fd_pack_insert_txn_init( ctx->pack );
897 : ctx->insert_to_extra = 0;
898 : } else {
899 : if( FD_UNLIKELY( extra_txn_deq_full( ctx->extra_txn_deq ) ) ) {
900 : extra_txn_deq_remove_head( ctx->extra_txn_deq );
901 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_FROM_EXTRA, 1UL );
902 : }
903 : ctx->cur_spot = extra_txn_deq_peek_tail( extra_txn_deq_insert_tail( ctx->extra_txn_deq ) );
904 : /* We want to store the current time in cur_spot so that we can
905 : track its expiration better. We just stash it in the CU
906 : fields, since those aren't important right now. */
907 : ctx->cur_spot->txnp->blockhash_slot = sig;
908 : ctx->insert_to_extra = 1;
909 : FD_MCNT_INC( PACK, TRANSACTION_INSERTED_TO_EXTRA, 1UL );
910 : }
911 : #else
912 3180 : ctx->cur_spot = fd_pack_insert_txn_init( ctx->pack );
913 3180 : #endif
914 3180 : }
915 :
916 : /* We get transactions from the resolv tile.
917 : The transactions should have been parsed and verified. */
918 3243 : FD_MCNT_INC( PACK, NORMAL_TRANSACTION_RECEIVED, 1UL );
919 :
920 3243 : fd_memcpy( ctx->cur_spot->txnp->payload, fd_txn_m_payload( txnm ), payload_sz );
921 3243 : fd_memcpy( TXN(ctx->cur_spot->txnp), txn, txn_t_sz );
922 3243 : fd_memcpy( ctx->cur_spot->alt_accts, fd_txn_m_alut( txnm ), addr_table_sz );
923 3243 : ctx->cur_spot->txnp->scheduler_arrival_time_nanos = ctx->approx_wallclock_ns + (long)((double)(fd_tickcount() - ctx->approx_tickcount) / ctx->ticks_per_ns);
924 3243 : ctx->cur_spot->txnp->payload_sz = payload_sz;
925 3243 : ctx->cur_spot->txnp->source_ipv4 = source_ipv4;
926 3243 : ctx->cur_spot->txnp->source_tpu = source_tpu;
927 :
928 3243 : break;
929 3243 : }
930 0 : case IN_KIND_EXECUTED_TXN: {
931 0 : FD_TEST( sz==64UL );
932 0 : fd_memcpy( ctx->executed_txn_sig, dcache_entry, sz );
933 0 : break;
934 0 : }
935 3291 : }
936 3291 : }
937 :
938 :
939 : /* After the transaction has been fully received, and we know we were
940 : not overrun while reading it, insert it into pack. */
941 :
942 : static inline void
943 : after_frag( fd_pack_ctx_t * ctx,
944 : ulong in_idx,
945 : ulong seq,
946 : ulong sig,
947 : ulong sz,
948 : ulong tsorig,
949 : ulong tspub,
950 3279 : fd_stem_context_t * stem ) {
951 3279 : (void)seq;
952 3279 : (void)sz;
953 3279 : (void)tsorig;
954 3279 : (void)tspub;
955 3279 : (void)stem;
956 :
957 3279 : long now = fd_tickcount();
958 :
959 3279 : switch( ctx->in_kind[ in_idx ] ) {
960 0 : case IN_KIND_REPLAY:
961 39 : case IN_KIND_POH: {
962 39 : if( fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_BECAME_LEADER ) return;
963 :
964 39 : long now_ticks = fd_tickcount();
965 39 : long now_ns = fd_log_wallclock();
966 :
967 39 : if( FD_UNLIKELY( ctx->leader_slot!=ULONG_MAX ) ) {
968 0 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
969 0 : done_packing->microblocks_in_slot = ctx->slot_microblock_cnt;
970 :
971 0 : fd_stem_publish( stem, 1UL, fd_disco_bank_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
972 0 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
973 0 : ctx->pack_idx++;
974 :
975 0 : FD_LOG_WARNING(( "switching to slot %lu while packing for slot %lu. Draining bank tiles.", fd_disco_poh_sig_slot( sig ), ctx->leader_slot ));
976 0 : log_end_block_metrics( ctx, now_ticks, "switch" );
977 0 : ctx->drain_banks = 1;
978 0 : ctx->leader_slot = ULONG_MAX;
979 0 : ctx->slot_microblock_cnt = 0UL;
980 0 : fd_pack_end_block( ctx->pack );
981 0 : remove_ib( ctx );
982 0 : }
983 39 : ctx->leader_slot = fd_disco_poh_sig_slot( sig );
984 :
985 39 : ulong exp_cnt = fd_pack_expire_before( ctx->pack, fd_ulong_max( ctx->leader_slot, TRANSACTION_LIFETIME_SLOTS )-TRANSACTION_LIFETIME_SLOTS );
986 39 : FD_MCNT_INC( PACK, TRANSACTION_EXPIRED, exp_cnt );
987 :
988 39 : ctx->leader_bank = ctx->_became_leader->bank;
989 39 : ctx->leader_bank_idx = ctx->_became_leader->bank_idx;
990 39 : ctx->slot_max_microblocks = ctx->_became_leader->max_microblocks_in_slot;
991 : /* Reserve some space in the block for ticks */
992 39 : ctx->slot_max_data = (ctx->larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK)
993 39 : - 48UL*(ctx->_became_leader->ticks_per_slot+ctx->_became_leader->total_skipped_ticks);
994 :
995 39 : ctx->limits.slot_max_cost = ctx->_became_leader->limits.slot_max_cost;
996 39 : ctx->limits.slot_max_vote_cost = ctx->_became_leader->limits.slot_max_vote_cost;
997 39 : ctx->limits.slot_max_write_cost_per_acct = ctx->_became_leader->limits.slot_max_write_cost_per_acct;
998 :
999 : /* ticks_per_ns is probably relatively stable over 400ms, but not
1000 : over several hours, so we need to compute the slot duration in
1001 : milliseconds first and then convert to ticks. This doesn't need
1002 : to be super accurate, but we don't want it to vary wildly. */
1003 39 : long end_ticks = now_ticks + (long)((double)fd_long_max( ctx->_became_leader->slot_end_ns - now_ns, 1L )*ctx->ticks_per_ns);
1004 : /* We may still get overrun, but then we'll never use this and just
1005 : reinitialize it the next time when we actually become leader. */
1006 39 : fd_pack_pacing_init( ctx->pacer, now_ticks, end_ticks, (float)ctx->ticks_per_ns, ctx->limits.slot_max_cost );
1007 :
1008 39 : if( FD_UNLIKELY( ctx->crank->enabled ) ) {
1009 : /* If we get overrun, we'll just never use these values, but the
1010 : old values aren't really useful either. */
1011 39 : ctx->crank->epoch = ctx->_became_leader->epoch;
1012 39 : *(ctx->crank->prev_config) = *(ctx->_became_leader->bundle->config);
1013 39 : memcpy( ctx->crank->recent_blockhash, ctx->_became_leader->bundle->last_blockhash, 32UL );
1014 39 : memcpy( ctx->crank->tip_receiver_owner, ctx->_became_leader->bundle->tip_receiver_owner, 32UL );
1015 39 : }
1016 :
1017 39 : FD_LOG_INFO(( "pack_became_leader(slot=%lu,ends_at=%ld)", ctx->leader_slot, ctx->_became_leader->slot_end_ns ));
1018 :
1019 39 : update_metric_state( ctx, fd_tickcount(), FD_PACK_METRIC_STATE_LEADER, 1 );
1020 :
1021 39 : ctx->slot_end_ns = ctx->_became_leader->slot_end_ns;
1022 39 : fd_pack_limits_t limits[ 1 ];
1023 39 : limits->max_cost_per_block = ctx->limits.slot_max_cost;
1024 39 : limits->max_data_bytes_per_block = ctx->slot_max_data;
1025 39 : limits->max_microblocks_per_block = ctx->slot_max_microblocks;
1026 39 : limits->max_vote_cost_per_block = ctx->limits.slot_max_vote_cost;
1027 39 : limits->max_write_cost_per_acct = ctx->limits.slot_max_write_cost_per_acct;
1028 39 : limits->max_txn_per_microblock = ULONG_MAX; /* unused */
1029 39 : fd_pack_set_block_limits( ctx->pack, limits );
1030 39 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now );
1031 :
1032 39 : break;
1033 39 : }
1034 9 : case IN_KIND_BANK: {
1035 : /* For a previous slot */
1036 9 : if( FD_UNLIKELY( sig!=ctx->leader_slot ) ) return;
1037 :
1038 9 : fd_pack_rebate_cus( ctx->pack, ctx->rebate->rebate );
1039 9 : ctx->pending_rebate_sz = 0UL;
1040 9 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now );
1041 9 : break;
1042 9 : }
1043 3231 : case IN_KIND_RESOLV: {
1044 : /* Normal transaction case */
1045 : #if FD_PACK_USE_EXTRA_STORAGE
1046 : if( FD_LIKELY( !ctx->insert_to_extra ) ) {
1047 : #else
1048 3231 : if( 1 ) {
1049 3231 : #endif
1050 3231 : if( FD_UNLIKELY( ctx->is_bundle ) ) {
1051 57 : if( FD_UNLIKELY( ctx->current_bundle->txn_cnt==0UL ) ) return;
1052 57 : if( FD_UNLIKELY( ++(ctx->current_bundle->txn_received)==ctx->current_bundle->txn_cnt ) ) {
1053 9 : ulong deleted;
1054 9 : long insert_duration = -fd_tickcount();
1055 9 : int result = fd_pack_insert_bundle_fini( ctx->pack, ctx->current_bundle->bundle, ctx->current_bundle->txn_cnt, ctx->current_bundle->min_blockhash_slot, 0, ctx->blk_engine_cfg, &deleted );
1056 9 : insert_duration += fd_tickcount();
1057 9 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
1058 9 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ] += ctx->current_bundle->txn_received;
1059 9 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
1060 9 : ctx->current_bundle->bundle = NULL;
1061 9 : }
1062 3174 : } else {
1063 3174 : ulong blockhash_slot = sig;
1064 3174 : ulong deleted;
1065 3174 : long insert_duration = -fd_tickcount();
1066 3174 : int result = fd_pack_insert_txn_fini( ctx->pack, ctx->cur_spot, blockhash_slot, &deleted );
1067 3174 : insert_duration += fd_tickcount();
1068 3174 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
1069 3174 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ]++;
1070 3174 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
1071 3174 : if( FD_LIKELY( result>=0 ) ) ctx->last_successful_insert = now;
1072 3174 : }
1073 3231 : }
1074 :
1075 3231 : ctx->cur_spot = NULL;
1076 3231 : break;
1077 3231 : }
1078 0 : case IN_KIND_EXECUTED_TXN: {
1079 0 : ulong deleted = fd_pack_delete_transaction( ctx->pack, fd_type_pun( ctx->executed_txn_sig ) );
1080 0 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
1081 0 : break;
1082 3231 : }
1083 3279 : }
1084 :
1085 3279 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, fd_pack_avail_txn_cnt( ctx->pack )>0 );
1086 3279 : }
1087 :
1088 : static void
1089 : privileged_init( fd_topo_t * topo,
1090 0 : fd_topo_tile_t * tile ) {
1091 0 : if( FD_LIKELY( !tile->pack.bundle.enabled ) ) return;
1092 0 : if( FD_UNLIKELY( !tile->pack.bundle.vote_account_path[0] ) ) {
1093 0 : FD_LOG_WARNING(( "Disabling bundle crank because no vote account was specified" ));
1094 0 : return;
1095 0 : }
1096 :
1097 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1098 :
1099 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1100 0 : fd_pack_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
1101 :
1102 0 : if( FD_UNLIKELY( !strcmp( tile->pack.bundle.identity_key_path, "" ) ) )
1103 0 : FD_LOG_ERR(( "identity_key_path not set" ));
1104 :
1105 0 : const uchar * identity_key = fd_keyload_load( tile->pack.bundle.identity_key_path, /* pubkey only: */ 1 );
1106 0 : fd_memcpy( ctx->crank->identity_pubkey->b, identity_key, 32UL );
1107 :
1108 0 : if( FD_UNLIKELY( !fd_base58_decode_32( tile->pack.bundle.vote_account_path, ctx->crank->vote_pubkey->b ) ) ) {
1109 0 : const uchar * vote_key = fd_keyload_load( tile->pack.bundle.vote_account_path, /* pubkey only: */ 1 );
1110 0 : fd_memcpy( ctx->crank->vote_pubkey->b, vote_key, 32UL );
1111 0 : }
1112 0 : }
1113 :
1114 : static void
1115 : unprivileged_init( fd_topo_t * topo,
1116 30 : fd_topo_tile_t * tile ) {
1117 30 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1118 :
1119 30 : if( FD_UNLIKELY( tile->pack.max_pending_transactions >= USHORT_MAX-10UL ) ) FD_LOG_ERR(( "pack tile supports up to %lu pending transactions", USHORT_MAX-11UL ));
1120 :
1121 30 : fd_pack_limits_t limits_upper[1] = {{
1122 30 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND,
1123 30 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND,
1124 30 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND,
1125 30 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
1126 30 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
1127 30 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
1128 30 : }};
1129 :
1130 30 : ulong pack_footprint = fd_pack_footprint( tile->pack.max_pending_transactions, BUNDLE_META_SZ, tile->pack.bank_tile_count, limits_upper );
1131 :
1132 30 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1133 30 : fd_pack_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
1134 30 : fd_rng_t * rng = fd_rng_join( fd_rng_new( FD_SCRATCH_ALLOC_APPEND( l, fd_rng_align(), fd_rng_footprint() ), 0U, 0UL ) );
1135 30 : if( FD_UNLIKELY( !rng ) ) FD_LOG_ERR(( "fd_rng_new failed" ));
1136 :
1137 30 : fd_pack_limits_t limits_lower[1] = {{
1138 30 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_LOWER_BOUND,
1139 30 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_LOWER_BOUND,
1140 30 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_LOWER_BOUND,
1141 30 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
1142 30 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
1143 30 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
1144 30 : }};
1145 :
1146 30 : ctx->pack = fd_pack_join( fd_pack_new( FD_SCRATCH_ALLOC_APPEND( l, fd_pack_align(), pack_footprint ),
1147 30 : tile->pack.max_pending_transactions, BUNDLE_META_SZ, tile->pack.bank_tile_count,
1148 30 : limits_lower, rng ) );
1149 30 : if( FD_UNLIKELY( !ctx->pack ) ) FD_LOG_ERR(( "fd_pack_new failed" ));
1150 :
1151 30 : if( FD_UNLIKELY( tile->in_cnt>32UL ) ) FD_LOG_ERR(( "Too many input links (%lu>32) to pack tile", tile->in_cnt ));
1152 :
1153 30 : FD_TEST( tile->in_cnt<sizeof( ctx->in_kind )/sizeof( ctx->in_kind[0] ) );
1154 270 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1155 240 : fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
1156 :
1157 240 : if( FD_LIKELY( !strcmp( link->name, "resolv_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
1158 210 : else if( FD_LIKELY( !strcmp( link->name, "dedup_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
1159 210 : else if( FD_LIKELY( !strcmp( link->name, "poh_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH;
1160 180 : else if( FD_LIKELY( !strcmp( link->name, "bank_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_BANK;
1161 60 : else if( FD_LIKELY( !strcmp( link->name, "sign_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_SIGN;
1162 30 : else if( FD_LIKELY( !strcmp( link->name, "replay_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPLAY;
1163 30 : else if( FD_LIKELY( !strcmp( link->name, "executed_txn" ) ) ) ctx->in_kind[ i ] = IN_KIND_EXECUTED_TXN;
1164 0 : else FD_LOG_ERR(( "pack tile has unexpected input link %lu %s", i, link->name ));
1165 240 : }
1166 :
1167 30 : ulong bank_cnt = 0UL;
1168 750 : for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
1169 720 : fd_topo_tile_t const * consumer_tile = &topo->tiles[ i ];
1170 720 : if( FD_UNLIKELY( strcmp( consumer_tile->name, "bank" ) && strcmp( consumer_tile->name, "replay" ) ) ) continue;
1171 240 : for( ulong j=0UL; j<consumer_tile->in_cnt; j++ ) {
1172 120 : if( FD_UNLIKELY( consumer_tile->in_link_id[ j ]==tile->out_link_id[ 0 ] ) ) bank_cnt++;
1173 120 : }
1174 120 : }
1175 :
1176 : // if( FD_UNLIKELY( !bank_cnt ) ) FD_LOG_ERR(( "pack tile connects to no banking tiles" ));
1177 30 : if( FD_UNLIKELY( bank_cnt>FD_PACK_MAX_BANK_TILES ) ) FD_LOG_ERR(( "pack tile connects to too many banking tiles" ));
1178 : // if( FD_UNLIKELY( bank_cnt!=tile->pack.bank_tile_count ) ) FD_LOG_ERR(( "pack tile connects to %lu banking tiles, but tile->pack.bank_tile_count is %lu", bank_cnt, tile->pack.bank_tile_count ));
1179 :
1180 30 : FD_TEST( (tile->pack.schedule_strategy>=0) & (tile->pack.schedule_strategy<=FD_PACK_STRATEGY_BUNDLE) );
1181 :
1182 30 : ctx->crank->enabled = tile->pack.bundle.enabled;
1183 30 : if( FD_UNLIKELY( tile->pack.bundle.enabled ) ) {
1184 30 : if( FD_UNLIKELY( !fd_bundle_crank_gen_init( ctx->crank->gen, (fd_acct_addr_t const *)tile->pack.bundle.tip_distribution_program_addr,
1185 30 : (fd_acct_addr_t const *)tile->pack.bundle.tip_payment_program_addr,
1186 30 : (fd_acct_addr_t const *)ctx->crank->vote_pubkey->b,
1187 30 : (fd_acct_addr_t const *)tile->pack.bundle.tip_distribution_authority,
1188 30 : schedule_strategy_strings[ tile->pack.schedule_strategy ],
1189 30 : tile->pack.bundle.commission_bps ) ) ) {
1190 0 : FD_LOG_ERR(( "constructing bundle generator failed" ));
1191 0 : }
1192 :
1193 30 : ulong sign_in_idx = fd_topo_find_tile_in_link ( topo, tile, "sign_pack", tile->kind_id );
1194 30 : ulong sign_out_idx = fd_topo_find_tile_out_link( topo, tile, "pack_sign", tile->kind_id );
1195 30 : FD_TEST( sign_in_idx!=ULONG_MAX );
1196 30 : fd_topo_link_t * sign_in = &topo->links[ tile->in_link_id[ sign_in_idx ] ];
1197 30 : fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ sign_out_idx ] ];
1198 30 : if( FD_UNLIKELY( !fd_keyguard_client_join( fd_keyguard_client_new( ctx->crank->keyguard_client,
1199 30 : sign_out->mcache,
1200 30 : sign_out->dcache,
1201 30 : sign_in->mcache,
1202 30 : sign_in->dcache,
1203 30 : sign_out->mtu ) ) ) ) {
1204 0 : FD_LOG_ERR(( "failed to construct keyguard" ));
1205 0 : }
1206 : /* Initialize enough of the prev config that it produces a
1207 : transaction */
1208 30 : ctx->crank->prev_config->discriminator = 0x82ccfa1ee0aa0c9bUL;
1209 30 : ctx->crank->prev_config->tip_receiver->b[1] = 1;
1210 30 : ctx->crank->prev_config->block_builder->b[2] = 1;
1211 :
1212 30 : memset( ctx->crank->tip_receiver_owner, '\0', 32UL );
1213 30 : memset( ctx->crank->recent_blockhash, '\0', 32UL );
1214 30 : memset( ctx->crank->last_sig, '\0', 64UL );
1215 30 : ctx->crank->ib_inserted = 0;
1216 30 : ctx->crank->epoch = 0UL;
1217 30 : ctx->crank->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->keyswitch_obj_id ) );
1218 30 : FD_TEST( ctx->crank->keyswitch );
1219 30 : } else {
1220 0 : memset( ctx->crank, '\0', sizeof(ctx->crank) );
1221 0 : }
1222 :
1223 :
1224 : #if FD_PACK_USE_EXTRA_STORAGE
1225 : ctx->extra_txn_deq = extra_txn_deq_join( extra_txn_deq_new( FD_SCRATCH_ALLOC_APPEND( l, extra_txn_deq_align(),
1226 : extra_txn_deq_footprint() ) ) );
1227 : #endif
1228 :
1229 30 : ctx->cur_spot = NULL;
1230 30 : ctx->is_bundle = 0;
1231 30 : ctx->strategy = tile->pack.schedule_strategy;
1232 30 : ctx->max_pending_transactions = tile->pack.max_pending_transactions;
1233 30 : ctx->leader_slot = ULONG_MAX;
1234 30 : ctx->leader_bank = NULL;
1235 30 : ctx->leader_bank_idx = ULONG_MAX;
1236 30 : ctx->pack_idx = 0UL;
1237 30 : ctx->slot_microblock_cnt = 0UL;
1238 30 : ctx->pack_txn_cnt = 0UL;
1239 30 : ctx->slot_max_microblocks = 0UL;
1240 30 : ctx->slot_max_data = 0UL;
1241 30 : ctx->larger_shred_limits_per_block = tile->pack.larger_shred_limits_per_block;
1242 30 : ctx->drain_banks = 0;
1243 30 : ctx->approx_wallclock_ns = fd_log_wallclock();
1244 30 : ctx->approx_tickcount = fd_tickcount();
1245 30 : ctx->rng = rng;
1246 30 : ctx->ticks_per_ns = fd_tempo_tick_per_ns( NULL );
1247 30 : ctx->last_successful_insert = 0L;
1248 30 : ctx->highest_observed_slot = 0UL;
1249 30 : ctx->microblock_duration_ticks = (ulong)(fd_tempo_tick_per_ns( NULL )*(double)MICROBLOCK_DURATION_NS + 0.5);
1250 : #if FD_PACK_USE_EXTRA_STORAGE
1251 : ctx->insert_to_extra = 0;
1252 : #endif
1253 30 : ctx->use_consumed_cus = tile->pack.use_consumed_cus;
1254 30 : ctx->crank->enabled = tile->pack.bundle.enabled;
1255 :
1256 30 : ctx->wait_duration_ticks[ 0 ] = ULONG_MAX;
1257 930 : for( ulong i=1UL; i<MAX_TXN_PER_MICROBLOCK+1UL; i++ ) {
1258 900 : ctx->wait_duration_ticks[ i ]=(ulong)(fd_tempo_tick_per_ns( NULL )*(double)wait_duration[ i ] + 0.5);
1259 900 : }
1260 :
1261 30 : ctx->limits.slot_max_cost = limits_lower->max_cost_per_block;
1262 30 : ctx->limits.slot_max_vote_cost = limits_lower->max_vote_cost_per_block;
1263 30 : ctx->limits.slot_max_write_cost_per_acct = limits_lower->max_write_cost_per_acct;
1264 :
1265 30 : ctx->bank_cnt = tile->pack.bank_tile_count;
1266 30 : ctx->poll_cursor = 0;
1267 30 : ctx->skip_cnt = 0L;
1268 30 : ctx->bank_idle_bitset = fd_ulong_mask_lsb( (int)tile->pack.bank_tile_count );
1269 60 : for( ulong i=0UL; i<tile->pack.bank_tile_count; i++ ) {
1270 30 : ulong busy_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "bank_busy.%lu", i );
1271 30 : FD_TEST( busy_obj_id!=ULONG_MAX );
1272 30 : ctx->bank_current[ i ] = fd_fseq_join( fd_topo_obj_laddr( topo, busy_obj_id ) );
1273 30 : ctx->bank_expect[ i ] = ULONG_MAX;
1274 30 : if( FD_UNLIKELY( !ctx->bank_current[ i ] ) ) FD_LOG_ERR(( "banking tile %lu has no busy flag", i ));
1275 30 : ctx->bank_ready_at[ i ] = 0L;
1276 30 : FD_TEST( ULONG_MAX==fd_fseq_query( ctx->bank_current[ i ] ) );
1277 30 : }
1278 :
1279 270 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1280 240 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
1281 240 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
1282 :
1283 240 : ctx->in[ i ].mem = link_wksp->wksp;
1284 240 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
1285 240 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
1286 240 : }
1287 :
1288 30 : ctx->bank_out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
1289 30 : ctx->bank_out_chunk0 = fd_dcache_compact_chunk0( ctx->bank_out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
1290 30 : ctx->bank_out_wmark = fd_dcache_compact_wmark ( ctx->bank_out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
1291 30 : ctx->bank_out_chunk = ctx->bank_out_chunk0;
1292 :
1293 30 : ctx->poh_out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 1 ] ].dcache_obj_id ].wksp_id ].wksp;
1294 30 : ctx->poh_out_chunk0 = fd_dcache_compact_chunk0( ctx->poh_out_mem, topo->links[ tile->out_link_id[ 1 ] ].dcache );
1295 30 : ctx->poh_out_wmark = fd_dcache_compact_wmark ( ctx->poh_out_mem, topo->links[ tile->out_link_id[ 1 ] ].dcache, topo->links[ tile->out_link_id[ 1 ] ].mtu );
1296 30 : ctx->poh_out_chunk = ctx->poh_out_chunk0;
1297 :
1298 : /* Initialize metrics storage */
1299 30 : memset( ctx->insert_result, '\0', FD_PACK_INSERT_RETVAL_CNT * sizeof(ulong) );
1300 30 : fd_histf_join( fd_histf_new( ctx->schedule_duration, FD_MHIST_SECONDS_MIN( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS ),
1301 30 : FD_MHIST_SECONDS_MAX( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS ) ) );
1302 30 : fd_histf_join( fd_histf_new( ctx->no_sched_duration, FD_MHIST_SECONDS_MIN( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS ),
1303 30 : FD_MHIST_SECONDS_MAX( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS ) ) );
1304 30 : fd_histf_join( fd_histf_new( ctx->insert_duration, FD_MHIST_SECONDS_MIN( PACK, INSERT_TRANSACTION_DURATION_SECONDS ),
1305 30 : FD_MHIST_SECONDS_MAX( PACK, INSERT_TRANSACTION_DURATION_SECONDS ) ) );
1306 30 : fd_histf_join( fd_histf_new( ctx->complete_duration, FD_MHIST_SECONDS_MIN( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS ),
1307 30 : FD_MHIST_SECONDS_MAX( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS ) ) );
1308 30 : ctx->metric_state = 0;
1309 30 : ctx->metric_state_begin = fd_tickcount();
1310 30 : memset( ctx->metric_timing, '\0', 16*sizeof(long) );
1311 30 : memset( ctx->current_bundle, '\0', sizeof(ctx->current_bundle) );
1312 30 : memset( ctx->blk_engine_cfg, '\0', sizeof(ctx->blk_engine_cfg) );
1313 30 : memset( ctx->last_sched_metrics, '\0', sizeof(ctx->last_sched_metrics) );
1314 30 : memset( ctx->crank->metrics, '\0', sizeof(ctx->crank->metrics) );
1315 :
1316 30 : FD_LOG_INFO(( "packing microblocks of at most %lu transactions to %lu bank tiles using strategy %i", EFFECTIVE_TXN_PER_MICROBLOCK, tile->pack.bank_tile_count, ctx->strategy ));
1317 :
1318 30 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
1319 30 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
1320 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
1321 :
1322 30 : }
1323 :
1324 : static ulong
1325 : populate_allowed_seccomp( fd_topo_t const * topo,
1326 : fd_topo_tile_t const * tile,
1327 : ulong out_cnt,
1328 0 : struct sock_filter * out ) {
1329 0 : (void)topo;
1330 0 : (void)tile;
1331 :
1332 0 : populate_sock_filter_policy_fd_pack_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
1333 0 : return sock_filter_policy_fd_pack_tile_instr_cnt;
1334 0 : }
1335 :
1336 : static ulong
1337 : populate_allowed_fds( fd_topo_t const * topo,
1338 : fd_topo_tile_t const * tile,
1339 : ulong out_fds_cnt,
1340 0 : int * out_fds ) {
1341 0 : (void)topo;
1342 0 : (void)tile;
1343 :
1344 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
1345 :
1346 0 : ulong out_cnt = 0UL;
1347 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
1348 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
1349 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
1350 0 : return out_cnt;
1351 0 : }
1352 :
1353 0 : #define STEM_BURST (1UL)
1354 :
1355 : /* We want lazy (measured in ns) to be small enough that the producer
1356 : and the consumer never have to wait for credits. For most tango
1357 : links, we use a default worst case speed coming from 100 Gbps
1358 : Ethernet. That's not very suitable for microblocks that go from
1359 : pack to bank. Instead we manually estimate the very aggressive
1360 : 1000ns per microblock, and then reduce it further (in line with the
1361 : default lazy value computation) to ensure the random value chosen
1362 : based on this won't lead to credit return stalls. */
1363 0 : #define STEM_LAZY (128L*3000L)
1364 :
1365 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_pack_ctx_t
1366 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_pack_ctx_t)
1367 :
1368 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
1369 0 : #define STEM_CALLBACK_BEFORE_CREDIT before_credit
1370 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
1371 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
1372 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
1373 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
1374 :
1375 : #include "../stem/fd_stem.c"
1376 :
1377 : fd_topo_run_tile_t fd_tile_pack = {
1378 : .name = "pack",
1379 : .populate_allowed_seccomp = populate_allowed_seccomp,
1380 : .populate_allowed_fds = populate_allowed_fds,
1381 : .scratch_align = scratch_align,
1382 : .scratch_footprint = scratch_footprint,
1383 : .privileged_init = privileged_init,
1384 : .unprivileged_init = unprivileged_init,
1385 : .run = stem_run,
1386 : };
|