Line data Source code
1 : #include "../tiles.h"
2 :
3 : #include "generated/fd_pack_tile_seccomp.h"
4 :
5 : #include "../../util/pod/fd_pod_format.h"
6 : #include "../../discof/replay/fd_replay_tile.h" // layering violation
7 : #include "../fd_txn_m.h"
8 : #include "../keyguard/fd_keyload.h"
9 : #include "../keyguard/fd_keyswitch.h"
10 : #include "../keyguard/fd_keyguard.h"
11 : #include "../metrics/fd_metrics.h"
12 : #include "../pack/fd_pack.h"
13 : #include "../pack/fd_pack_cost.h"
14 : #include "../pack/fd_pack_pacing.h"
15 :
16 : #include <string.h>
17 :
18 : /* fd_pack is responsible for taking verified transactions, and
19 : arranging them into "microblocks" (groups) of transactions to
20 : be executed serially. It can try to do clever things so that
21 : multiple microblocks can execute in parallel, if they don't
22 : write to the same accounts. */
23 :
24 0 : #define IN_KIND_RESOLV (0UL)
25 0 : #define IN_KIND_POH (1UL)
26 0 : #define IN_KIND_EXECLE (2UL)
27 0 : #define IN_KIND_SIGN (3UL)
28 0 : #define IN_KIND_REPLAY (4UL)
29 0 : #define IN_KIND_EXECUTED_TXN (5UL)
30 :
31 : /* Pace microblocks, but only slightly. This helps keep performance
32 : more stable. This limit is 2,000 microblocks/second/execle. At
33 : MAX_TXN_PER_MICROBLOCK transactions/microblock, that's
34 : 2000*MAX_TXN_PER_MICROBLOCK txn/sec/execle. */
35 0 : #define MICROBLOCK_DURATION_NS (0L)
36 :
37 : /* There are 151 accepted blockhashes, but those don't include skips.
38 : This check is neither precise nor accurate, but just good enough.
39 : The execle tile does the final check. We give a little margin for a
40 : few percent skip rate. */
41 0 : #define TRANSACTION_LIFETIME_SLOTS 160UL
42 :
43 : /* Time is normally a long, but pack expects a ulong. Add -LONG_MIN to
44 : the time values so that LONG_MIN maps to 0, LONG_MAX maps to
45 : ULONG_MAX, and everything in between maps linearly with a slope of 1.
46 : Just subtracting LONG_MIN results in signed integer overflow, which
47 : is U.B. */
48 : #define TIME_OFFSET 0x8000000000000000UL
49 : FD_STATIC_ASSERT( (ulong)LONG_MIN+TIME_OFFSET==0UL, time_offset );
50 : FD_STATIC_ASSERT( (ulong)LONG_MAX+TIME_OFFSET==ULONG_MAX, time_offset );
51 :
52 : /* 1.6 M cost units, enough for 1 max size transaction */
53 : const ulong CUS_PER_MICROBLOCK = 1600000UL;
54 :
55 : #define SMALL_MICROBLOCKS 1
56 :
57 : #if SMALL_MICROBLOCKS
58 : const float VOTE_FRACTION = 1.0f; /* schedule all available votes first */
59 0 : #define EFFECTIVE_TXN_PER_MICROBLOCK 1UL
60 : #else
61 : const float VOTE_FRACTION = 0.75f; /* TODO: Is this the right value? */
62 : #define EFFECTIVE_TXN_PER_MICROBLOCK MAX_TXN_PER_MICROBLOCK
63 : #endif
64 :
65 : #if !SMALL_MICROBLOCKS
66 : /* There's overhead associated with each microblock the execle tile
67 : tries to execute it, so the optimal strategy is not to produce a
68 : microblock with a single transaction as soon as we receive it.
69 : Basically, if we have less than MAX_TXN_PER_MICROBLOCK transactions,
70 : we want to wait a little to see if we receive additional transactions
71 : before we schedule a microblock. We can model the optimum amount of
72 : time to wait, but the equation is complicated enough that we want to
73 : compute it before compile time. wait_duration[i] for i in
74 : [0, MAX_TXN_PER_MICROBLOCK] gives the time in nanoseconds pack should
75 : wait after receiving its most recent transaction before scheduling if
76 : it has i transactions available. Unsurprisingly,
77 : wait_duration[MAX_TXN_PER_MICROBLOCK] is 0. wait_duration[0] is
78 : ULONG_MAX, so we'll always wait if we have 0 transactions. */
79 : FD_IMPORT( wait_duration, "src/disco/pack/pack_delay.bin", ulong, 6, "" );
80 : #endif
81 :
82 :
83 :
84 : #if FD_PACK_USE_EXTRA_STORAGE
85 : /* When we are done being leader for a slot and we are leader in the
86 : very next slot, it can still take some time to transition. This is
87 : because the bank has to be finalized, a hash calculated, and various
88 : other things done in the replay stage to create the new child bank.
89 :
90 : During that time, pack cannot send transactions to execles so it
91 : needs to be able to buffer. Typically, these so called "leader
92 : transitions" are short (<15 millis), so a low value here would
93 : suffice. However, in some cases when there is memory pressure on the
94 : NUMA node or when the operating system context switches relevant
95 : threads out, it can take significantly longer.
96 :
97 : To prevent drops in these cases and because we assume execles are
98 : fast enough to drain this buffer once we do become leader, we set
99 : this buffer size to be quite large. */
100 :
101 : #define DEQUE_NAME extra_txn_deq
102 : #define DEQUE_T fd_txn_e_t
103 : #define DEQUE_MAX (128UL*1024UL)
104 : #include "../../../../util/tmpl/fd_deque.c"
105 :
106 : #endif
107 :
108 : /* Sync with src/app/shared/fd_config.c */
109 0 : #define FD_PACK_STRATEGY_PERF 0
110 0 : #define FD_PACK_STRATEGY_BALANCED 1
111 0 : #define FD_PACK_STRATEGY_BUNDLE 2
112 :
113 : static char const * const schedule_strategy_strings[3] = { "PRF", "BAL", "BUN" };
114 :
115 :
116 : typedef struct {
117 : fd_acct_addr_t commission_pubkey[1];
118 : ulong commission;
119 : } block_builder_info_t;
120 :
121 : typedef struct {
122 : long time;
123 : ulong sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_CNT ];
124 : } fd_pack_sched_results_snap_t;
125 :
126 : typedef struct {
127 : fd_wksp_t * mem;
128 : ulong chunk0;
129 : ulong wmark;
130 : } fd_pack_in_ctx_t;
131 :
132 : typedef struct {
133 : fd_pack_t * pack;
134 : fd_txn_e_t * cur_spot;
135 : int is_bundle; /* is the current transaction a bundle */
136 :
137 : uchar executed_txn_sig[ 64UL ];
138 : uchar txn_committed;
139 :
140 : /* One of the FD_PACK_STRATEGY_* values defined above */
141 : int strategy;
142 :
143 : /* The value passed to fd_pack_new, etc. */
144 : ulong max_pending_transactions;
145 :
146 : /* The leader slot we are currently packing for, or ULONG_MAX if we
147 : are not the leader. */
148 : ulong leader_slot;
149 : void const * leader_bank;
150 : ulong leader_bank_idx;
151 :
152 : fd_became_leader_t _became_leader[1];
153 :
154 : /* The number of microblocks we have packed for the current leader
155 : slot. Will always be <= slot_max_microblocks. We must track
156 : this so that when we are done we can tell the PoH tile how many
157 : microblocks to expect in the slot. */
158 : ulong slot_microblock_cnt;
159 :
160 : /* Counter which increments when we've finished packing for a slot */
161 : uint pack_idx;
162 :
163 : ulong pack_txn_cnt; /* total num transactions packed since startup */
164 :
165 : /* The maximum number of microblocks that can be packed in this slot.
166 : Provided by the PoH tile when we become leader.*/
167 : ulong slot_max_microblocks;
168 :
169 : /* Cap (in bytes) of the amount of transaction data we produce in each
170 : block to avoid hitting the shred limits. See where this is set for
171 : more explanation. */
172 : ulong slot_max_data;
173 : int larger_shred_limits_per_block;
174 :
175 : /* Consensus critical slot cost limits. */
176 : struct {
177 : ulong slot_max_cost;
178 : ulong slot_max_vote_cost;
179 : ulong slot_max_write_cost_per_acct;
180 : } limits;
181 :
182 : /* If drain_execle is non-zero, then the pack tile must wait until all
183 : execle are idle before scheduling any more microblocks. This is
184 : primarily helpful in irregular leader transitions, e.g. while being
185 : leader for slot N, we switch forks to a slot M (!=N+1) in which we
186 : are also leader. We don't want to execute microblocks for
187 : different slots concurrently. */
188 : int drain_execle;
189 :
190 : /* Updated during housekeeping and used only for checking if the
191 : leader slot has ended. Might be off by one housekeeping duration,
192 : but that should be small relative to a slot duration. */
193 : long approx_wallclock_ns;
194 :
195 : /* approx_tickcount is updated in during_housekeeping() with
196 : fd_tickcount() and will match approx_wallclock_ns. This is done
197 : because we need to include an accurate nanosecond timestamp in
198 : every fd_txn_p_t but don't want to have to call the expensive
199 : fd_log_wallclock() in in the critical path. We can use
200 : fd_tempo_tick_per_ns() to convert from ticks to nanoseconds over
201 : small periods of time. */
202 : long approx_tickcount;
203 :
204 : fd_rng_t * rng;
205 :
206 : /* The end wallclock time of the leader slot we are currently packing
207 : for, if we are currently packing for a slot.*/
208 : long slot_end_ns;
209 :
210 : /* pacer and ticks_per_ns are used for pacing CUs through the slot,
211 : i.e. deciding when to schedule a microblock given the number of CUs
212 : that have been consumed so far. pacer is an opaque pacing object,
213 : which is initialized when the pack tile is packing a slot.
214 : ticks_per_ns is the cached value from tempo. */
215 : fd_pack_pacing_t pacer[1];
216 : double ticks_per_ns;
217 :
218 : /* last_successful_insert stores the tickcount of the last
219 : successful transaction insert. */
220 : long last_successful_insert;
221 :
222 : /* highest_observed_slot stores the highest slot number we've seen
223 : from any transaction coming from the resolv tile. When this
224 : increases, we expire old transactions. */
225 : ulong highest_observed_slot;
226 :
227 : /* microblock_duration_ns, and wait_duration
228 : respectively scaled to be in ticks instead of nanoseconds */
229 : ulong microblock_duration_ticks;
230 : #if !SMALL_MICROBLOCKS
231 : ulong wait_duration_ticks[ MAX_TXN_PER_MICROBLOCK+1UL ];
232 : #endif
233 :
234 : #if FD_PACK_USE_EXTRA_STORAGE
235 : /* In addition to the available transactions that pack knows about, we
236 : also store a larger ring buffer for handling cases when pack is
237 : full. This is an fd_deque. */
238 : fd_txn_e_t * extra_txn_deq;
239 : int insert_to_extra; /* whether the last insert was into pack or the extra deq */
240 : #endif
241 :
242 : fd_pack_in_ctx_t in[ 32 ];
243 : int in_kind[ 32 ];
244 :
245 : ulong execle_cnt;
246 : ulong execle_idle_bitset; /* bit i is 1 if we've observed *execle_current[i]==execle_expect[i] */
247 : int poll_cursor; /* in [0, execle_cnt), the next execle to poll */
248 : int use_consumed_cus;
249 : long skip_cnt;
250 : ulong * execle_current[ FD_PACK_MAX_EXECLE_TILES ];
251 : ulong execle_expect[ FD_PACK_MAX_EXECLE_TILES ];
252 : /* execle_ready_at[x] means don't check execle x until tickcount is at
253 : least execle_ready_at[x]. */
254 : long execle_ready_at[ FD_PACK_MAX_EXECLE_TILES ];
255 :
256 : fd_wksp_t * execle_out_mem;
257 : ulong execle_out_chunk0;
258 : ulong execle_out_wmark;
259 : ulong execle_out_chunk;
260 :
261 : fd_wksp_t * poh_out_mem;
262 : ulong poh_out_chunk0;
263 : ulong poh_out_wmark;
264 : ulong poh_out_chunk;
265 :
266 : ulong insert_result[ FD_PACK_INSERT_RETVAL_CNT ];
267 : fd_histf_t schedule_duration[ 1 ];
268 : fd_histf_t no_sched_duration[ 1 ];
269 : fd_histf_t insert_duration [ 1 ];
270 : fd_histf_t complete_duration[ 1 ];
271 :
272 : struct {
273 : uint metric_state;
274 : long metric_state_begin;
275 : long metric_timing[ 16 ];
276 : };
277 :
278 : /* last_sched_metrics is a snapshot of the schedule outcome counters
279 : during the last schedule which included at least one successful
280 : outcome. */
281 : fd_pack_sched_results_snap_t last_sched_metrics[ 1 ];
282 :
283 : /* last_sched_metrics is a snapshot of the schedule outcome counters
284 : at the last start-of-leader-block event. */
285 : fd_pack_sched_results_snap_t start_block_sched_metrics[ 1 ];
286 :
287 : struct {
288 : ulong id;
289 : ulong txn_cnt;
290 : ulong txn_received;
291 : ulong min_blockhash_slot;
292 : fd_txn_e_t * _txn[ FD_PACK_MAX_TXN_PER_BUNDLE ];
293 : fd_txn_e_t * const * bundle; /* points to _txn when non-NULL */
294 : } current_bundle[1];
295 :
296 : block_builder_info_t blk_engine_cfg[1];
297 :
298 : struct {
299 : int enabled;
300 : int ib_inserted; /* in this slot */
301 : fd_acct_addr_t vote_pubkey[1];
302 : fd_acct_addr_t identity_pubkey[1];
303 : fd_bundle_crank_gen_t gen[1];
304 : fd_acct_addr_t tip_receiver_owner[1];
305 : ulong epoch;
306 : fd_bundle_crank_tip_payment_config_t prev_config[1]; /* as of start of slot, then updated */
307 : uchar recent_blockhash[32];
308 : fd_ed25519_sig_t last_sig[1];
309 :
310 : fd_keyswitch_t * keyswitch;
311 : fd_keyguard_client_t keyguard_client[1];
312 :
313 : ulong metrics[4];
314 : } crank[1];
315 :
316 :
317 : /* Used between during_frag and after_frag */
318 : ulong pending_rebate_sz;
319 : union{ fd_pack_rebate_t rebate[1]; uchar footprint[USHORT_MAX]; } rebate[1];
320 : } fd_pack_ctx_t;
321 :
322 0 : #define BUNDLE_META_SZ 40UL
323 : FD_STATIC_ASSERT( sizeof(block_builder_info_t)==BUNDLE_META_SZ, blk_engine_cfg );
324 :
325 0 : #define FD_PACK_METRIC_STATE_TRANSACTIONS 0
326 0 : #define FD_PACK_METRIC_STATE_EXECLES 1
327 0 : #define FD_PACK_METRIC_STATE_LEADER 2
328 0 : #define FD_PACK_METRIC_STATE_MICROBLOCKS 3
329 :
330 : /* Updates one component of the metric state. If the state has changed,
331 : records the change. */
332 : static inline void
333 : update_metric_state( fd_pack_ctx_t * ctx,
334 : long effective_as_of,
335 : int type,
336 0 : int status ) {
337 0 : uint current_state = fd_uint_insert_bit( ctx->metric_state, type, status );
338 0 : if( FD_UNLIKELY( current_state!=ctx->metric_state ) ) {
339 0 : ctx->metric_timing[ ctx->metric_state ] += effective_as_of - ctx->metric_state_begin;
340 0 : ctx->metric_state_begin = effective_as_of;
341 0 : ctx->metric_state = current_state;
342 0 : }
343 0 : }
344 :
345 : static inline void
346 0 : remove_ib( fd_pack_ctx_t * ctx ) {
347 : /* It's likely the initializer bundle is long scheduled, but we want to
348 : try deleting it just in case. */
349 0 : if( FD_UNLIKELY( ctx->crank->enabled & ctx->crank->ib_inserted ) ) {
350 0 : ulong deleted = fd_pack_delete_transaction( ctx->pack, (fd_ed25519_sig_t const *)ctx->crank->last_sig );
351 0 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
352 0 : }
353 0 : ctx->crank->ib_inserted = 0;
354 0 : }
355 :
356 :
357 : FD_FN_CONST static inline ulong
358 0 : scratch_align( void ) {
359 0 : return 4096UL;
360 0 : }
361 :
362 : FD_FN_PURE static inline ulong
363 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
364 0 : fd_pack_limits_t limits[1] = {{
365 0 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND,
366 0 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND,
367 0 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND,
368 0 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
369 0 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
370 0 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
371 0 : }};
372 :
373 0 : ulong l = FD_LAYOUT_INIT;
374 0 : l = FD_LAYOUT_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
375 0 : l = FD_LAYOUT_APPEND( l, fd_rng_align(), fd_rng_footprint() );
376 0 : l = FD_LAYOUT_APPEND( l, fd_pack_align(), fd_pack_footprint( tile->pack.max_pending_transactions,
377 0 : BUNDLE_META_SZ,
378 0 : tile->pack.execle_tile_count,
379 0 : limits ) );
380 : #if FD_PACK_USE_EXTRA_STORAGE
381 : l = FD_LAYOUT_APPEND( l, extra_txn_deq_align(), extra_txn_deq_footprint() );
382 : #endif
383 0 : return FD_LAYOUT_FINI( l, scratch_align() );
384 0 : }
385 :
386 : static inline void
387 : log_end_block_metrics( fd_pack_ctx_t * ctx,
388 : long now,
389 : char const * reason,
390 0 : ulong cus_consumed_in_block ) {
391 0 : #define DELTA( m ) (fd_metrics_tl[ MIDX(COUNTER, PACK, TRANSACTION_SCHEDULE_##m) ] - ctx->last_sched_metrics->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_##m##_IDX ])
392 0 : #define AVAIL( m ) (fd_metrics_tl[ MIDX(GAUGE, PACK, AVAILABLE_TRANSACTIONS_##m) ])
393 0 : FD_LOG_INFO(( "pack_end_block(slot=%lu,%s,%lx,ticks_since_last_schedule=%ld,reasons=%lu,%lu,%lu,%lu,%lu,%lu,%lu;remaining=%lu+%lu+%lu+%lu;smallest=%lu;cus=%lu->%lu)",
394 0 : ctx->leader_slot, reason, ctx->execle_idle_bitset, now-ctx->last_sched_metrics->time,
395 0 : DELTA( TAKEN ), DELTA( CU_LIMIT ), DELTA( FAST_PATH ), DELTA( BYTE_LIMIT ), DELTA( WRITE_COST ), DELTA( SLOW_PATH ), DELTA( DEFER_SKIP ),
396 0 : AVAIL(REGULAR), AVAIL(VOTES), AVAIL(BUNDLES), AVAIL(CONFLICTING),
397 0 : (fd_metrics_tl[ MIDX(GAUGE, PACK, SMALLEST_PENDING_TRANSACTION) ]),
398 0 : (cus_consumed_in_block),
399 0 : (fd_metrics_tl[ MIDX(GAUGE, PACK, CUS_CONSUMED_IN_BLOCK) ])
400 0 : ));
401 0 : #undef AVAIL
402 0 : #undef DELTA
403 0 : }
404 :
405 : static inline void
406 0 : get_done_packing( fd_pack_ctx_t * ctx, fd_done_packing_t * done_packing, int reason ) {
407 0 : done_packing->microblocks_in_slot = ctx->slot_microblock_cnt;
408 0 : done_packing->end_slot_reason = reason;
409 0 : fd_pack_get_block_limits( ctx->pack, done_packing->limits_usage, done_packing->limits );
410 :
411 0 : #define DELTA( mem, m ) (fd_metrics_tl[ MIDX(COUNTER, PACK, TRANSACTION_SCHEDULE_##m) ] - ctx->mem->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_##m##_IDX ])
412 0 : done_packing->block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_TAKEN_IDX ] = DELTA( start_block_sched_metrics, TAKEN );
413 0 : done_packing->block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_CU_LIMIT_IDX ] = DELTA( start_block_sched_metrics, CU_LIMIT );
414 0 : done_packing->block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_FAST_PATH_IDX ] = DELTA( start_block_sched_metrics, FAST_PATH );
415 0 : done_packing->block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_BYTE_LIMIT_IDX ] = DELTA( start_block_sched_metrics, BYTE_LIMIT );
416 0 : done_packing->block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_WRITE_COST_IDX ] = DELTA( start_block_sched_metrics, WRITE_COST );
417 0 : done_packing->block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_SLOW_PATH_IDX ] = DELTA( start_block_sched_metrics, SLOW_PATH );
418 0 : done_packing->block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_DEFER_SKIP_IDX ] = DELTA( start_block_sched_metrics, DEFER_SKIP );
419 :
420 0 : done_packing->end_block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_TAKEN_IDX ] = DELTA( last_sched_metrics, TAKEN );
421 0 : done_packing->end_block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_CU_LIMIT_IDX ] = DELTA( last_sched_metrics, CU_LIMIT );
422 0 : done_packing->end_block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_FAST_PATH_IDX ] = DELTA( last_sched_metrics, FAST_PATH );
423 0 : done_packing->end_block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_BYTE_LIMIT_IDX ] = DELTA( last_sched_metrics, BYTE_LIMIT );
424 0 : done_packing->end_block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_WRITE_COST_IDX ] = DELTA( last_sched_metrics, WRITE_COST );
425 0 : done_packing->end_block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_SLOW_PATH_IDX ] = DELTA( last_sched_metrics, SLOW_PATH );
426 0 : done_packing->end_block_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_DEFER_SKIP_IDX ] = DELTA( last_sched_metrics, DEFER_SKIP );
427 0 : #undef DELTA
428 :
429 0 : fd_pack_get_pending_smallest( ctx->pack, done_packing->pending_smallest, done_packing->pending_votes_smallest );
430 0 : }
431 :
432 : static inline void
433 0 : metrics_write( fd_pack_ctx_t * ctx ) {
434 0 : FD_MCNT_ENUM_COPY( PACK, TRANSACTION_INSERTED, ctx->insert_result );
435 0 : FD_MCNT_ENUM_COPY( PACK, METRIC_TIMING, ((ulong*)ctx->metric_timing) );
436 0 : FD_MCNT_ENUM_COPY( PACK, BUNDLE_CRANK_STATUS, ctx->crank->metrics );
437 0 : FD_MHIST_COPY( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS, ctx->schedule_duration );
438 0 : FD_MHIST_COPY( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS, ctx->no_sched_duration );
439 0 : FD_MHIST_COPY( PACK, INSERT_TRANSACTION_DURATION_SECONDS, ctx->insert_duration );
440 0 : FD_MHIST_COPY( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS, ctx->complete_duration );
441 :
442 0 : fd_pack_metrics_write( ctx->pack );
443 0 : }
444 :
445 : static inline void
446 0 : during_housekeeping( fd_pack_ctx_t * ctx ) {
447 0 : ctx->approx_wallclock_ns = fd_log_wallclock();
448 0 : ctx->approx_tickcount = fd_tickcount();
449 :
450 0 : if( FD_UNLIKELY( ctx->crank->enabled && fd_keyswitch_state_query( ctx->crank->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
451 0 : fd_memcpy( ctx->crank->identity_pubkey, ctx->crank->keyswitch->bytes, 32UL );
452 0 : fd_keyswitch_state( ctx->crank->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
453 0 : }
454 0 : }
455 :
456 : static inline void
457 : before_credit( fd_pack_ctx_t * ctx,
458 : fd_stem_context_t * stem,
459 0 : int * charge_busy ) {
460 0 : (void)stem;
461 :
462 0 : if( FD_UNLIKELY( (ctx->cur_spot!=NULL) & !ctx->is_bundle ) ) {
463 0 : *charge_busy = 1;
464 :
465 : /* If we were overrun while processing a frag from an in, then
466 : cur_spot is left dangling and not cleaned up, so clean it up here
467 : (by returning the slot to the pool of free slots). If the last
468 : transaction was a bundle, then we don't want to return it. When
469 : we try to process the first transaction in the next bundle, we'll
470 : see we never got the full bundle and cancel the whole last
471 : bundle, returning all the storage to the pool. */
472 : #if FD_PACK_USE_EXTRA_STORAGE
473 : if( FD_LIKELY( !ctx->insert_to_extra ) ) fd_pack_insert_txn_cancel( ctx->pack, ctx->cur_spot );
474 : else extra_txn_deq_remove_tail( ctx->extra_txn_deq );
475 : #else
476 0 : fd_pack_insert_txn_cancel( ctx->pack, ctx->cur_spot );
477 0 : #endif
478 0 : ctx->cur_spot = NULL;
479 0 : }
480 0 : }
481 :
482 : #if FD_PACK_USE_EXTRA_STORAGE
483 : /* insert_from_extra: helper method to pop the transaction at the head
484 : off the extra txn deque and insert it into pack. Requires that
485 : ctx->extra_txn_deq is non-empty, but it's okay to call it if pack is
486 : full. Returns the result of fd_pack_insert_txn_fini. */
487 : static inline int
488 : insert_from_extra( fd_pack_ctx_t * ctx ) {
489 : fd_txn_e_t * spot = fd_pack_insert_txn_init( ctx->pack );
490 : fd_txn_e_t const * insert = extra_txn_deq_peek_head( ctx->extra_txn_deq );
491 : fd_txn_t const * insert_txn = TXN(insert->txnp);
492 : fd_memcpy( spot->txnp->payload, insert->txnp->payload, insert->txnp->payload_sz );
493 : fd_memcpy( TXN(spot->txnp), insert_txn, fd_txn_footprint( insert_txn->instr_cnt, insert_txn->addr_table_lookup_cnt ) );
494 : fd_memcpy( spot->alt_accts, insert->alt_accts, insert_txn->addr_table_adtl_cnt*sizeof(fd_acct_addr_t) );
495 : spot->txnp->payload_sz = insert->txnp->payload_sz;
496 : spot->txnp->source_tpu = insert->txnp->source_tpu;
497 : spot->txnp->source_ipv4 = insert->txnp->source_ipv4;
498 : spot->txnp->scheduler_arrival_time_nanos = insert->txnp->scheduler_arrival_time_nanos;
499 : extra_txn_deq_remove_head( ctx->extra_txn_deq );
500 :
501 : ulong blockhash_slot = insert->txnp->blockhash_slot;
502 :
503 : ulong deleted;
504 : long insert_duration = -fd_tickcount();
505 : int result = fd_pack_insert_txn_fini( ctx->pack, spot, blockhash_slot, &deleted );
506 : insert_duration += fd_tickcount();
507 :
508 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
509 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ]++;
510 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
511 : FD_MCNT_INC( PACK, TRANSACTION_INSERTED_FROM_EXTRA, 1UL );
512 : return result;
513 : }
514 : #endif
515 :
516 : static inline void
517 : after_credit( fd_pack_ctx_t * ctx,
518 : fd_stem_context_t * stem,
519 : int * opt_poll_in,
520 0 : int * charge_busy ) {
521 0 : (void)opt_poll_in;
522 :
523 0 : if( FD_UNLIKELY( (ctx->skip_cnt--)>0L ) ) return; /* It would take ages for this to hit LONG_MIN */
524 :
525 0 : long now = fd_tickcount();
526 :
527 0 : int pacing_execle_cnt = (int)fd_pack_pacing_enabled_bank_cnt( ctx->pacer, now );
528 :
529 0 : ulong execle_cnt = ctx->execle_cnt;
530 :
531 :
532 : /* If any execle are busy, check one of the busy ones see if it is
533 : still busy. */
534 0 : if( FD_LIKELY( ctx->execle_idle_bitset!=fd_ulong_mask_lsb( (int)execle_cnt ) ) ) {
535 0 : int poll_cursor = ctx->poll_cursor;
536 0 : ulong busy_bitset = (~ctx->execle_idle_bitset) & fd_ulong_mask_lsb( (int)execle_cnt );
537 :
538 : /* Suppose execle_cnt is 4 and idle_bitset looks something like this
539 : (pretending it's a uchar):
540 : 0000 1001
541 : ^ busy cursor is 1
542 : Then busy_bitset is
543 : 0000 0110
544 : Rotate it right by 2 bits
545 : 1000 0001
546 : Find lsb returns 0, so busy cursor remains 2, and we poll
547 : execle 2.
548 :
549 : If instead idle_bitset were
550 : 0000 1110
551 : ^
552 : The rotated version would be
553 : 0100 0000
554 : Find lsb will return 6, so busy cursor would be set to 0, and
555 : we'd poll execle 0, which is the right one. */
556 0 : poll_cursor++;
557 0 : poll_cursor = (poll_cursor + fd_ulong_find_lsb( fd_ulong_rotate_right( busy_bitset, (poll_cursor&63) ) )) & 63;
558 :
559 0 : if( FD_UNLIKELY(
560 : /* if microblock duration is 0, bypass the execle_ready_at check
561 : to avoid a potential cache miss. Can't use an ifdef here
562 : because FD_UNLIKELY is a macro, but the compiler should
563 : eliminate the check easily. */
564 0 : ( (MICROBLOCK_DURATION_NS==0L) || (ctx->execle_ready_at[poll_cursor]<now) ) &&
565 0 : (fd_fseq_query( ctx->execle_current[poll_cursor] )==ctx->execle_expect[poll_cursor]) ) ) {
566 0 : *charge_busy = 1;
567 0 : ctx->execle_idle_bitset |= 1UL<<poll_cursor;
568 :
569 0 : long complete_duration = -fd_tickcount();
570 0 : int completed = fd_pack_microblock_complete( ctx->pack, (ulong)poll_cursor );
571 0 : complete_duration += fd_tickcount();
572 0 : if( FD_LIKELY( completed ) ) fd_histf_sample( ctx->complete_duration, (ulong)complete_duration );
573 0 : }
574 :
575 0 : ctx->poll_cursor = poll_cursor;
576 0 : }
577 :
578 :
579 : /* If we time out on our slot, then stop being leader. This can only
580 : happen in the first after_credit after a housekeeping. */
581 0 : if( FD_UNLIKELY( ctx->approx_wallclock_ns>=ctx->slot_end_ns && ctx->leader_slot!=ULONG_MAX ) ) {
582 0 : *charge_busy = 1;
583 :
584 0 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
585 0 : get_done_packing( ctx, done_packing, FD_PACK_END_SLOT_REASON_TIME ); /* needs to be called before fd_pack_end_block */
586 0 : fd_pack_end_block( ctx->pack );
587 0 : fd_pack_get_top_writers( ctx->pack, done_packing->limits_usage->top_writers ); /* needs to be called after fd_pack_end_block */
588 :
589 0 : fd_stem_publish( stem, 1UL, fd_disco_execle_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
590 0 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
591 0 : ctx->pack_idx++;
592 :
593 0 : log_end_block_metrics( ctx, now, "time", done_packing->limits_usage->block_cost );
594 0 : ctx->drain_execle = 1;
595 0 : ctx->leader_slot = ULONG_MAX;
596 0 : ctx->slot_microblock_cnt = 0UL;
597 0 : remove_ib( ctx );
598 :
599 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_LEADER, 0 );
600 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_EXECLES, 0 );
601 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, 0 );
602 0 : return;
603 0 : }
604 :
605 : /* Am I leader? If not, see about inserting at most one transaction
606 : from extra storage. It's important not to insert too many
607 : transactions here, or we won't end up servicing dedup_pack enough.
608 : If extra storage is empty or pack is full, do nothing. */
609 0 : if( FD_UNLIKELY( ctx->leader_slot==ULONG_MAX ) ) {
610 : #if FD_PACK_USE_EXTRA_STORAGE
611 : if( FD_UNLIKELY( !extra_txn_deq_empty( ctx->extra_txn_deq ) &&
612 : fd_pack_avail_txn_cnt( ctx->pack )<ctx->max_pending_transactions ) ) {
613 : *charge_busy = 1;
614 :
615 : int result = insert_from_extra( ctx );
616 : if( FD_LIKELY( result>=0 ) ) ctx->last_successful_insert = now;
617 : }
618 : #endif
619 0 : return;
620 0 : }
621 :
622 : /* Am I in drain mode? If so, check if I can exit it */
623 0 : if( FD_UNLIKELY( ctx->drain_execle ) ) {
624 0 : if( FD_LIKELY( ctx->execle_idle_bitset==fd_ulong_mask_lsb( (int)execle_cnt ) ) ) {
625 0 : ctx->drain_execle = 0;
626 :
627 : /* Pack notifies poh when execle are drained so that poh can
628 : relinquish pack's ownership over the slot execle (by decrementing
629 : its Arc). We do this by sending a ULONG_MAX sig over the
630 : pack_poh mcache.
631 :
632 : TODO: This is only needed for Frankendancer, not Firedancer,
633 : which manages bank lifetime different. */
634 0 : fd_stem_publish( stem, 1UL, ULONG_MAX, 0UL, 0UL, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
635 0 : } else {
636 0 : return;
637 0 : }
638 0 : }
639 :
640 : /* Have I sent the max allowed microblocks? Nothing to do. */
641 0 : if( FD_UNLIKELY( ctx->slot_microblock_cnt>=ctx->slot_max_microblocks ) ) return;
642 :
643 : /* Do I have enough transactions and/or have I waited enough time? */
644 : #if !SMALL_MICROBLOCKS
645 : if( FD_UNLIKELY( (ulong)(now-ctx->last_successful_insert) <
646 : ctx->wait_duration_ticks[ fd_ulong_min( fd_pack_avail_txn_cnt( ctx->pack ), MAX_TXN_PER_MICROBLOCK ) ] ) ) {
647 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, 0 );
648 : return;
649 : }
650 : #endif
651 :
652 0 : int any_ready = 0;
653 0 : int any_scheduled = 0;
654 :
655 0 : *charge_busy = 1;
656 :
657 0 : if( FD_LIKELY( ctx->crank->enabled ) ) {
658 0 : block_builder_info_t const * top_meta = fd_pack_peek_bundle_meta( ctx->pack );
659 0 : if( FD_UNLIKELY( top_meta ) ) {
660 : /* Have bundles, in a reasonable state to crank. */
661 :
662 0 : fd_txn_e_t * _bundle[ 1UL ];
663 0 : fd_txn_e_t * const * bundle = fd_pack_insert_bundle_init( ctx->pack, _bundle, 1UL );
664 :
665 0 : ulong txn_sz = fd_bundle_crank_generate( ctx->crank->gen, ctx->crank->prev_config, top_meta->commission_pubkey,
666 0 : ctx->crank->identity_pubkey, ctx->crank->tip_receiver_owner, ctx->crank->epoch, top_meta->commission,
667 0 : bundle[0]->txnp->payload, TXN( bundle[0]->txnp ) );
668 :
669 0 : if( FD_LIKELY( txn_sz==0UL ) ) { /* Everything in good shape! */
670 0 : fd_pack_insert_bundle_cancel( ctx->pack, bundle, 1UL );
671 0 : fd_pack_set_initializer_bundles_ready( ctx->pack );
672 0 : ctx->crank->metrics[ 0 ]++; /* BUNDLE_CRANK_STATUS_NOT_NEEDED */
673 0 : }
674 0 : else if( FD_LIKELY( txn_sz<ULONG_MAX ) ) {
675 0 : bundle[0]->txnp->payload_sz = (ushort)txn_sz;
676 0 : bundle[0]->txnp->source_tpu = FD_TXN_M_TPU_SOURCE_BUNDLE;
677 0 : bundle[0]->txnp->source_ipv4 = 0; /* not applicable */
678 0 : bundle[0]->txnp->scheduler_arrival_time_nanos = ctx->approx_wallclock_ns + (long)((double)(fd_tickcount() - ctx->approx_tickcount) / ctx->ticks_per_ns);
679 0 : memcpy( bundle[0]->txnp->payload+TXN(bundle[0]->txnp)->recent_blockhash_off, ctx->crank->recent_blockhash, 32UL );
680 :
681 0 : fd_keyguard_client_sign( ctx->crank->keyguard_client, bundle[0]->txnp->payload+1UL,
682 0 : bundle[0]->txnp->payload+65UL, txn_sz-65UL, FD_KEYGUARD_SIGN_TYPE_ED25519 );
683 :
684 0 : memcpy( ctx->crank->last_sig, bundle[0]->txnp->payload+1UL, 64UL );
685 :
686 0 : ctx->crank->ib_inserted = 1;
687 0 : ulong deleted;
688 0 : int retval = fd_pack_insert_bundle_fini( ctx->pack, bundle, 1UL, ctx->leader_slot-1UL, 1, NULL, &deleted );
689 0 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
690 0 : ctx->insert_result[ retval + FD_PACK_INSERT_RETVAL_OFF ]++;
691 0 : if( FD_UNLIKELY( retval<0 ) ) {
692 0 : ctx->crank->metrics[ 3 ]++; /* BUNDLE_CRANK_STATUS_INSERTION_FAILED */
693 0 : FD_LOG_WARNING(( "inserting initializer bundle returned %i", retval ));
694 0 : } else {
695 : /* Update the cached copy of the on-chain state. This seems a
696 : little dangerous, since we're updating it as if the bundle
697 : succeeded without knowing if that's true, but here's why
698 : it's safe:
699 :
700 : From now until we get the rebate call for this initializer
701 : bundle (which lets us know if it succeeded or failed), pack
702 : will be in [Pending] state, which means peek_bundle_meta
703 : will return NULL, so we won't read this state.
704 :
705 : Then, if the initializer bundle failed, we'll go into
706 : [Failed] IB state until the end of the block, which will
707 : cause top_meta to remain NULL so we don't read these values
708 : again.
709 :
710 : Otherwise, the initializer bundle succeeded, which means
711 : that these are the right values to use. */
712 0 : fd_bundle_crank_apply( ctx->crank->gen, ctx->crank->prev_config, top_meta->commission_pubkey,
713 0 : ctx->crank->tip_receiver_owner, ctx->crank->epoch, top_meta->commission );
714 0 : ctx->crank->metrics[ 1 ]++; /* BUNDLE_CRANK_STATUS_INSERTED */
715 0 : }
716 0 : } else {
717 : /* Already logged a warning in this case */
718 0 : fd_pack_insert_bundle_cancel( ctx->pack, bundle, 1UL );
719 0 : ctx->crank->metrics[ 2 ]++; /* BUNDLE_CRANK_STATUS_CREATION_FAILED' */
720 0 : }
721 0 : }
722 0 : }
723 :
724 : /* Try to schedule the next microblock. */
725 0 : if( FD_LIKELY( ctx->execle_idle_bitset ) ) { /* Optimize for schedule */
726 0 : any_ready = 1;
727 :
728 0 : int i = fd_ulong_find_lsb( ctx->execle_idle_bitset );
729 :
730 0 : int flags;
731 :
732 0 : switch( ctx->strategy ) {
733 0 : default:
734 0 : case FD_PACK_STRATEGY_PERF:
735 0 : flags = FD_PACK_SCHEDULE_VOTE | FD_PACK_SCHEDULE_BUNDLE | FD_PACK_SCHEDULE_TXN;
736 0 : break;
737 0 : case FD_PACK_STRATEGY_BALANCED:
738 : /* We want to exempt votes from pacing, so we always allow
739 : scheduling votes. It doesn't really make much sense to pace
740 : bundles, because they get scheduled in FIFO order. However,
741 : we keep pacing for normal transactions. For example, if
742 : pacing_execle_cnt is 0, then pack won't schedule normal
743 : transactions to any execle tile. */
744 0 : flags = FD_PACK_SCHEDULE_VOTE | fd_int_if( i==0, FD_PACK_SCHEDULE_BUNDLE, 0 )
745 0 : | fd_int_if( i<pacing_execle_cnt, FD_PACK_SCHEDULE_TXN, 0 );
746 0 : break;
747 0 : case FD_PACK_STRATEGY_BUNDLE:
748 0 : flags = FD_PACK_SCHEDULE_VOTE | FD_PACK_SCHEDULE_BUNDLE
749 0 : | fd_int_if( ctx->slot_end_ns - ctx->approx_wallclock_ns<50000000L, FD_PACK_SCHEDULE_TXN, 0 );
750 0 : break;
751 0 : }
752 :
753 0 : fd_txn_e_t * microblock_dst = fd_chunk_to_laddr( ctx->execle_out_mem, ctx->execle_out_chunk );
754 0 : long schedule_duration = -fd_tickcount();
755 0 : ulong schedule_cnt = fd_pack_schedule_next_microblock( ctx->pack, CUS_PER_MICROBLOCK, VOTE_FRACTION, (ulong)i, flags, microblock_dst );
756 0 : schedule_duration += fd_tickcount();
757 0 : fd_histf_sample( (schedule_cnt>0UL) ? ctx->schedule_duration : ctx->no_sched_duration, (ulong)schedule_duration );
758 :
759 0 : if( FD_LIKELY( schedule_cnt ) ) {
760 0 : any_scheduled = 1;
761 0 : long now2 = fd_tickcount();
762 0 : ulong tsorig = (ulong)fd_frag_meta_ts_comp( now ); /* A bound on when we observed execle was idle */
763 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( now2 );
764 0 : ulong chunk = ctx->execle_out_chunk;
765 0 : ulong msg_sz = schedule_cnt*sizeof(fd_txn_e_t);
766 0 : fd_microblock_execle_trailer_t * trailer = (fd_microblock_execle_trailer_t*)(microblock_dst+schedule_cnt);
767 0 : trailer->bank = ctx->leader_bank;
768 0 : trailer->bank_idx = ctx->leader_bank_idx;
769 0 : trailer->microblock_idx = ctx->slot_microblock_cnt;
770 0 : trailer->pack_idx = ctx->pack_idx;
771 0 : trailer->pack_txn_idx = ctx->pack_txn_cnt;
772 0 : trailer->is_bundle = !!(microblock_dst->txnp->flags & FD_TXN_P_FLAGS_BUNDLE);
773 :
774 0 : ulong sig = fd_disco_poh_sig( ctx->leader_slot, POH_PKT_TYPE_MICROBLOCK, (ulong)i );
775 0 : fd_stem_publish( stem, 0UL, sig, chunk, msg_sz+sizeof(fd_microblock_execle_trailer_t), 0UL, tsorig, tspub );
776 0 : ctx->execle_expect[ i ] = stem->seqs[0]-1UL;
777 0 : ctx->execle_ready_at[i] = now2 + (long)ctx->microblock_duration_ticks;
778 0 : ctx->execle_out_chunk = fd_dcache_compact_next( ctx->execle_out_chunk, msg_sz+sizeof(fd_microblock_execle_trailer_t), ctx->execle_out_chunk0, ctx->execle_out_wmark );
779 0 : ctx->slot_microblock_cnt += fd_ulong_if( trailer->is_bundle, schedule_cnt, 1UL );
780 0 : ctx->pack_idx += fd_uint_if( trailer->is_bundle, (uint)schedule_cnt, 1U );
781 0 : ctx->pack_txn_cnt += schedule_cnt;
782 :
783 0 : ctx->execle_idle_bitset = fd_ulong_pop_lsb( ctx->execle_idle_bitset );
784 0 : ctx->skip_cnt = (long)schedule_cnt * fd_long_if( ctx->use_consumed_cus, (long)execle_cnt/2L, 1L );
785 0 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now2 );
786 :
787 0 : ctx->last_sched_metrics->time = now2;
788 0 : fd_pack_get_sched_metrics( ctx->pack, ctx->last_sched_metrics->sched_results );
789 :
790 : /* If we're using CU rebates, then we have one in for each execle
791 : in addition to the two normal ones. We want to skip schedule
792 : attempts for (execle_cnt + 1) link polls after a successful
793 : schedule attempt. */
794 0 : fd_long_store_if( ctx->use_consumed_cus, &(ctx->skip_cnt), (long)(ctx->execle_cnt + 1) );
795 0 : }
796 0 : }
797 :
798 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_EXECLES, any_ready );
799 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, any_scheduled );
800 0 : now = fd_tickcount();
801 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, fd_pack_avail_txn_cnt( ctx->pack )>0 );
802 :
803 : #if FD_PACK_USE_EXTRA_STORAGE
804 : if( FD_UNLIKELY( !extra_txn_deq_empty( ctx->extra_txn_deq ) ) ) {
805 : /* Don't start pulling from the extra storage until the available
806 : transaction count drops below half. */
807 : ulong avail_space = (ulong)fd_long_max( 0L, (long)(ctx->max_pending_transactions>>1)-(long)fd_pack_avail_txn_cnt( ctx->pack ) );
808 : ulong qty_to_insert = fd_ulong_min( 10UL, fd_ulong_min( extra_txn_deq_cnt( ctx->extra_txn_deq ), avail_space ) );
809 : int any_successes = 0;
810 : for( ulong i=0UL; i<qty_to_insert; i++ ) any_successes |= (0<=insert_from_extra( ctx ));
811 : if( FD_LIKELY( any_successes ) ) ctx->last_successful_insert = now;
812 : }
813 : #endif
814 :
815 : /* Did we send the maximum allowed microblocks? Then end the slot. */
816 0 : if( FD_UNLIKELY( ctx->slot_microblock_cnt==ctx->slot_max_microblocks )) {
817 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_LEADER, 0 );
818 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_EXECLES, 0 );
819 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, 0 );
820 : /* The pack object also does this accounting and increases this
821 : metric, but we end the slot early so won't see it unless we also
822 : increment it here. */
823 0 : FD_MCNT_INC( PACK, MICROBLOCK_PER_BLOCK_LIMIT, 1UL );
824 :
825 0 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
826 0 : get_done_packing( ctx, done_packing, FD_PACK_END_SLOT_REASON_MICROBLOCK );
827 0 : fd_pack_end_block( ctx->pack );
828 0 : fd_pack_get_top_writers( ctx->pack, done_packing->limits_usage->top_writers );
829 :
830 0 : fd_stem_publish( stem, 1UL, fd_disco_execle_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
831 0 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
832 0 : ctx->pack_idx++;
833 :
834 0 : log_end_block_metrics( ctx, now, "microblock", done_packing->limits_usage->block_cost );
835 0 : ctx->drain_execle = 1;
836 0 : ctx->leader_slot = ULONG_MAX;
837 0 : ctx->slot_microblock_cnt = 0UL;
838 0 : remove_ib( ctx );
839 0 : }
840 0 : }
841 :
842 :
843 : /* At this point, we have started receiving frag seq with details in
844 : mline at time now. Speculatively process it here. */
845 :
846 : static inline void
847 : during_frag( fd_pack_ctx_t * ctx,
848 : ulong in_idx,
849 : ulong seq FD_PARAM_UNUSED,
850 : ulong sig,
851 : ulong chunk,
852 : ulong sz,
853 0 : ulong ctl FD_PARAM_UNUSED ) {
854 :
855 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
856 :
857 0 : switch( ctx->in_kind[ in_idx ] ) {
858 0 : case IN_KIND_REPLAY: {
859 0 : if( FD_LIKELY( sig==REPLAY_SIG_TXN_EXECUTED ) ) {
860 0 : fd_replay_txn_executed_t const * txn_executed = fd_type_pun_const( dcache_entry );
861 0 : ctx->txn_committed = !!txn_executed->is_committable;
862 0 : if( FD_UNLIKELY( !txn_executed->is_committable ) ) return;
863 0 : memcpy( ctx->executed_txn_sig, fd_txn_get_signatures( TXN(txn_executed->txn), txn_executed->txn->payload ), FD_TXN_SIGNATURE_SZ );
864 0 : return;
865 0 : }
866 0 : if( FD_LIKELY( sig!=REPLAY_SIG_BECAME_LEADER ) ) return;
867 :
868 : /* There was a leader transition. Handle it. */
869 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=sizeof(fd_became_leader_t) ) )
870 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
871 :
872 0 : fd_memcpy( ctx->_became_leader, dcache_entry, sizeof(fd_became_leader_t) );
873 0 : return;
874 0 : }
875 0 : case IN_KIND_POH: {
876 : /* Not interested in stamped microblocks, only leader updates. */
877 0 : if( fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_BECAME_LEADER ) return;
878 :
879 : /* There was a leader transition. Handle it. */
880 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=sizeof(fd_became_leader_t) ) )
881 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
882 :
883 0 : fd_memcpy( ctx->_became_leader, dcache_entry, sizeof(fd_became_leader_t) );
884 0 : return;
885 0 : }
886 0 : case IN_KIND_EXECLE: {
887 0 : FD_TEST( ctx->use_consumed_cus );
888 : /* For a previous slot */
889 0 : if( FD_UNLIKELY( sig!=ctx->leader_slot ) ) return;
890 :
891 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz<FD_PACK_REBATE_MIN_SZ
892 0 : || sz>FD_PACK_REBATE_MAX_SZ ) )
893 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
894 :
895 0 : ctx->pending_rebate_sz = sz;
896 0 : fd_memcpy( ctx->rebate, dcache_entry, sz );
897 0 : return;
898 0 : }
899 0 : case IN_KIND_RESOLV: {
900 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_TPU_RESOLVED_MTU ) )
901 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
902 :
903 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)dcache_entry;
904 0 : ulong payload_sz = txnm->payload_sz;
905 0 : ulong txn_t_sz = txnm->txn_t_sz;
906 0 : uint source_ipv4 = txnm->source_ipv4;
907 0 : uchar source_tpu = txnm->source_tpu;
908 0 : FD_TEST( payload_sz<=FD_TPU_MTU );
909 0 : FD_TEST( txn_t_sz <=FD_TXN_MAX_SZ );
910 0 : fd_txn_t * txn = fd_txn_m_txn_t( txnm );
911 :
912 0 : ulong addr_table_sz = 32UL*txn->addr_table_adtl_cnt;
913 0 : FD_TEST( addr_table_sz<=32UL*FD_TXN_ACCT_ADDR_MAX );
914 :
915 0 : if( FD_UNLIKELY( (ctx->leader_slot==ULONG_MAX) & (sig>ctx->highest_observed_slot) ) ) {
916 : /* Using the resolv tile's knowledge of the current slot is a bit
917 : of a hack, since we don't get any info if there are no
918 : transactions and we're not leader. We're actually in exactly
919 : the case where that's okay though. The point of calling
920 : expire_before long before we become leader is so that we don't
921 : drop new but low-fee-paying transactions when pack is clogged
922 : with expired but high-fee-paying transactions. That can only
923 : happen if we are getting transactions. */
924 0 : ctx->highest_observed_slot = sig;
925 0 : ulong exp_cnt = fd_pack_expire_before( ctx->pack, fd_ulong_max( ctx->highest_observed_slot, TRANSACTION_LIFETIME_SLOTS )-TRANSACTION_LIFETIME_SLOTS );
926 0 : FD_MCNT_INC( PACK, TRANSACTION_EXPIRED, exp_cnt );
927 0 : }
928 :
929 :
930 0 : ulong bundle_id = txnm->block_engine.bundle_id;
931 0 : if( FD_UNLIKELY( bundle_id ) ) {
932 0 : ctx->is_bundle = 1;
933 0 : if( FD_LIKELY( bundle_id!=ctx->current_bundle->id ) ) {
934 0 : if( FD_UNLIKELY( ctx->current_bundle->bundle ) ) {
935 0 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_PARTIAL_BUNDLE, ctx->current_bundle->txn_received );
936 0 : fd_pack_insert_bundle_cancel( ctx->pack, ctx->current_bundle->bundle, ctx->current_bundle->txn_cnt );
937 0 : }
938 0 : ctx->current_bundle->id = bundle_id;
939 0 : ctx->current_bundle->txn_cnt = txnm->block_engine.bundle_txn_cnt;
940 0 : ctx->current_bundle->min_blockhash_slot = ULONG_MAX;
941 0 : ctx->current_bundle->txn_received = 0UL;
942 :
943 0 : if( FD_UNLIKELY( ctx->current_bundle->txn_cnt==0UL ) ) {
944 0 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_PARTIAL_BUNDLE, 1UL );
945 0 : ctx->current_bundle->id = 0UL;
946 0 : return;
947 0 : }
948 0 : ctx->blk_engine_cfg->commission = txnm->block_engine.commission;
949 0 : memcpy( ctx->blk_engine_cfg->commission_pubkey->b, txnm->block_engine.commission_pubkey, 32UL );
950 :
951 0 : ctx->current_bundle->bundle = fd_pack_insert_bundle_init( ctx->pack, ctx->current_bundle->_txn, ctx->current_bundle->txn_cnt );
952 0 : }
953 0 : ctx->cur_spot = ctx->current_bundle->bundle[ ctx->current_bundle->txn_received ];
954 0 : ctx->current_bundle->min_blockhash_slot = fd_ulong_min( ctx->current_bundle->min_blockhash_slot, sig );
955 0 : } else {
956 0 : ctx->is_bundle = 0;
957 : #if FD_PACK_USE_EXTRA_STORAGE
958 : if( FD_LIKELY( ctx->leader_slot!=ULONG_MAX || fd_pack_avail_txn_cnt( ctx->pack )<ctx->max_pending_transactions ) ) {
959 : ctx->cur_spot = fd_pack_insert_txn_init( ctx->pack );
960 : ctx->insert_to_extra = 0;
961 : } else {
962 : if( FD_UNLIKELY( extra_txn_deq_full( ctx->extra_txn_deq ) ) ) {
963 : extra_txn_deq_remove_head( ctx->extra_txn_deq );
964 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_FROM_EXTRA, 1UL );
965 : }
966 : ctx->cur_spot = extra_txn_deq_peek_tail( extra_txn_deq_insert_tail( ctx->extra_txn_deq ) );
967 : /* We want to store the current time in cur_spot so that we can
968 : track its expiration better. We just stash it in the CU
969 : fields, since those aren't important right now. */
970 : ctx->cur_spot->txnp->blockhash_slot = sig;
971 : ctx->insert_to_extra = 1;
972 : FD_MCNT_INC( PACK, TRANSACTION_INSERTED_TO_EXTRA, 1UL );
973 : }
974 : #else
975 0 : ctx->cur_spot = fd_pack_insert_txn_init( ctx->pack );
976 0 : #endif
977 0 : }
978 :
979 : /* We get transactions from the resolv tile.
980 : The transactions should have been parsed and verified. */
981 0 : FD_MCNT_INC( PACK, NORMAL_TRANSACTION_RECEIVED, 1UL );
982 :
983 0 : fd_memcpy( ctx->cur_spot->txnp->payload, fd_txn_m_payload( txnm ), payload_sz );
984 0 : fd_memcpy( TXN(ctx->cur_spot->txnp), txn, txn_t_sz );
985 0 : fd_memcpy( ctx->cur_spot->alt_accts, fd_txn_m_alut( txnm ), addr_table_sz );
986 0 : ctx->cur_spot->txnp->scheduler_arrival_time_nanos = ctx->approx_wallclock_ns + (long)((double)(fd_tickcount() - ctx->approx_tickcount) / ctx->ticks_per_ns);
987 0 : ctx->cur_spot->txnp->payload_sz = payload_sz;
988 0 : ctx->cur_spot->txnp->source_ipv4 = source_ipv4;
989 0 : ctx->cur_spot->txnp->source_tpu = source_tpu;
990 :
991 0 : break;
992 0 : }
993 0 : case IN_KIND_EXECUTED_TXN: {
994 0 : FD_TEST( sz==64UL );
995 0 : fd_memcpy( ctx->executed_txn_sig, dcache_entry, sz );
996 0 : break;
997 0 : }
998 0 : }
999 0 : }
1000 :
1001 :
1002 : /* After the transaction has been fully received, and we know we were
1003 : not overrun while reading it, insert it into pack. */
1004 :
1005 : static inline void
1006 : after_frag( fd_pack_ctx_t * ctx,
1007 : ulong in_idx,
1008 : ulong seq,
1009 : ulong sig,
1010 : ulong sz,
1011 : ulong tsorig,
1012 : ulong tspub,
1013 0 : fd_stem_context_t * stem ) {
1014 0 : (void)seq;
1015 0 : (void)sz;
1016 0 : (void)tsorig;
1017 0 : (void)tspub;
1018 0 : (void)stem;
1019 :
1020 0 : long now = fd_tickcount();
1021 :
1022 0 : ulong leader_slot = ULONG_MAX;
1023 0 : switch( ctx->in_kind[ in_idx ] ) {
1024 0 : case IN_KIND_REPLAY:
1025 0 : if( FD_LIKELY( sig==REPLAY_SIG_TXN_EXECUTED && ctx->txn_committed ) ) {
1026 0 : ulong deleted = fd_pack_delete_transaction( ctx->pack, fd_type_pun( ctx->executed_txn_sig ) );
1027 0 : FD_MCNT_INC( PACK, TRANSACTION_ALREADY_EXECUTED, deleted );
1028 0 : }
1029 0 : if( FD_UNLIKELY( sig!=REPLAY_SIG_BECAME_LEADER ) ) return;
1030 0 : leader_slot = ctx->_became_leader->slot;
1031 :
1032 0 : ctx->start_block_sched_metrics->time = now;
1033 0 : fd_pack_get_sched_metrics( ctx->pack, ctx->start_block_sched_metrics->sched_results );
1034 0 : break;
1035 0 : case IN_KIND_POH:
1036 0 : if( fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_BECAME_LEADER ) return;
1037 0 : leader_slot = fd_disco_poh_sig_slot( sig );
1038 0 : break;
1039 0 : default:
1040 0 : break;
1041 0 : }
1042 :
1043 0 : switch( ctx->in_kind[ in_idx ] ) {
1044 0 : case IN_KIND_REPLAY:
1045 0 : case IN_KIND_POH: {
1046 0 : long now_ticks = fd_tickcount();
1047 0 : long now_ns = fd_log_wallclock();
1048 :
1049 0 : if( FD_UNLIKELY( ctx->leader_slot!=ULONG_MAX ) ) {
1050 0 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->poh_out_mem, ctx->poh_out_chunk );
1051 0 : get_done_packing( ctx, done_packing, FD_PACK_END_SLOT_REASON_LEADER_SWITCH );
1052 0 : fd_pack_end_block( ctx->pack );
1053 0 : fd_pack_get_top_writers( ctx->pack, done_packing->limits_usage->top_writers );
1054 :
1055 0 : fd_stem_publish( stem, 1UL, fd_disco_execle_sig( ctx->leader_slot, ctx->pack_idx ), ctx->poh_out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1056 0 : ctx->poh_out_chunk = fd_dcache_compact_next( ctx->poh_out_chunk, sizeof(fd_done_packing_t), ctx->poh_out_chunk0, ctx->poh_out_wmark );
1057 0 : ctx->pack_idx++;
1058 :
1059 0 : FD_LOG_WARNING(( "switching to slot %lu while packing for slot %lu. Draining execle tiles.", leader_slot, ctx->leader_slot ));
1060 0 : log_end_block_metrics( ctx, now_ticks, "switch", done_packing->limits_usage->block_cost );
1061 0 : ctx->drain_execle = 1;
1062 0 : ctx->leader_slot = ULONG_MAX;
1063 0 : ctx->slot_microblock_cnt = 0UL;
1064 0 : remove_ib( ctx );
1065 0 : }
1066 0 : ctx->leader_slot = leader_slot;
1067 :
1068 0 : ulong exp_cnt = fd_pack_expire_before( ctx->pack, fd_ulong_max( ctx->leader_slot, TRANSACTION_LIFETIME_SLOTS )-TRANSACTION_LIFETIME_SLOTS );
1069 0 : FD_MCNT_INC( PACK, TRANSACTION_EXPIRED, exp_cnt );
1070 :
1071 0 : ctx->leader_bank = ctx->_became_leader->bank;
1072 0 : ctx->leader_bank_idx = ctx->_became_leader->bank_idx;
1073 0 : ctx->slot_max_microblocks = ctx->_became_leader->max_microblocks_in_slot;
1074 : /* Reserve some space in the block for ticks */
1075 0 : ctx->slot_max_data = (ctx->larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK)
1076 0 : - 48UL*(ctx->_became_leader->ticks_per_slot+ctx->_became_leader->total_skipped_ticks);
1077 :
1078 0 : ctx->limits.slot_max_cost = ctx->_became_leader->limits.slot_max_cost;
1079 0 : ctx->limits.slot_max_vote_cost = ctx->_became_leader->limits.slot_max_vote_cost;
1080 0 : ctx->limits.slot_max_write_cost_per_acct = ctx->_became_leader->limits.slot_max_write_cost_per_acct;
1081 :
1082 : /* ticks_per_ns is probably relatively stable over 400ms, but not
1083 : over several hours, so we need to compute the slot duration in
1084 : milliseconds first and then convert to ticks. This doesn't need
1085 : to be super accurate, but we don't want it to vary wildly. */
1086 0 : long end_ticks = now_ticks + (long)((double)fd_long_max( ctx->_became_leader->slot_end_ns - now_ns, 1L )*ctx->ticks_per_ns);
1087 : /* We may still get overrun, but then we'll never use this and just
1088 : reinitialize it the next time when we actually become leader. */
1089 0 : fd_pack_pacing_init( ctx->pacer, now_ticks, end_ticks, (float)ctx->ticks_per_ns, ctx->limits.slot_max_cost );
1090 :
1091 0 : if( FD_UNLIKELY( ctx->crank->enabled ) ) {
1092 : /* If we get overrun, we'll just never use these values, but the
1093 : old values aren't really useful either. */
1094 0 : ctx->crank->epoch = ctx->_became_leader->epoch;
1095 0 : *(ctx->crank->prev_config) = *(ctx->_became_leader->bundle->config);
1096 0 : memcpy( ctx->crank->recent_blockhash, ctx->_became_leader->bundle->last_blockhash, 32UL );
1097 0 : memcpy( ctx->crank->tip_receiver_owner, ctx->_became_leader->bundle->tip_receiver_owner, 32UL );
1098 0 : }
1099 :
1100 0 : FD_LOG_INFO(( "pack_became_leader(slot=%lu,ends_at=%ld)", ctx->leader_slot, ctx->_became_leader->slot_end_ns ));
1101 :
1102 0 : update_metric_state( ctx, fd_tickcount(), FD_PACK_METRIC_STATE_LEADER, 1 );
1103 :
1104 0 : ctx->slot_end_ns = ctx->_became_leader->slot_end_ns;
1105 0 : fd_pack_limits_t limits[ 1 ];
1106 0 : limits->max_cost_per_block = ctx->limits.slot_max_cost;
1107 0 : limits->max_data_bytes_per_block = ctx->slot_max_data;
1108 0 : limits->max_microblocks_per_block = ctx->slot_max_microblocks;
1109 0 : limits->max_vote_cost_per_block = ctx->limits.slot_max_vote_cost;
1110 0 : limits->max_write_cost_per_acct = ctx->limits.slot_max_write_cost_per_acct;
1111 0 : limits->max_txn_per_microblock = ULONG_MAX; /* unused */
1112 0 : fd_pack_set_block_limits( ctx->pack, limits );
1113 0 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now );
1114 :
1115 0 : break;
1116 0 : }
1117 0 : case IN_KIND_EXECLE: {
1118 : /* For a previous slot */
1119 0 : if( FD_UNLIKELY( sig!=ctx->leader_slot ) ) return;
1120 :
1121 0 : fd_pack_rebate_cus( ctx->pack, ctx->rebate->rebate );
1122 0 : ctx->pending_rebate_sz = 0UL;
1123 0 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now );
1124 0 : break;
1125 0 : }
1126 0 : case IN_KIND_RESOLV: {
1127 : /* Normal transaction case */
1128 : #if FD_PACK_USE_EXTRA_STORAGE
1129 : if( FD_LIKELY( !ctx->insert_to_extra ) ) {
1130 : #else
1131 0 : if( 1 ) {
1132 0 : #endif
1133 0 : if( FD_UNLIKELY( ctx->is_bundle ) ) {
1134 0 : if( FD_UNLIKELY( ctx->current_bundle->txn_cnt==0UL ) ) return;
1135 0 : if( FD_UNLIKELY( ++(ctx->current_bundle->txn_received)==ctx->current_bundle->txn_cnt ) ) {
1136 0 : ulong deleted;
1137 0 : long insert_duration = -fd_tickcount();
1138 0 : int result = fd_pack_insert_bundle_fini( ctx->pack, ctx->current_bundle->bundle, ctx->current_bundle->txn_cnt, ctx->current_bundle->min_blockhash_slot, 0, ctx->blk_engine_cfg, &deleted );
1139 0 : insert_duration += fd_tickcount();
1140 0 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
1141 0 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ] += ctx->current_bundle->txn_received;
1142 0 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
1143 0 : ctx->current_bundle->bundle = NULL;
1144 0 : }
1145 0 : } else {
1146 0 : ulong blockhash_slot = sig;
1147 0 : ulong deleted;
1148 0 : long insert_duration = -fd_tickcount();
1149 0 : int result = fd_pack_insert_txn_fini( ctx->pack, ctx->cur_spot, blockhash_slot, &deleted );
1150 0 : insert_duration += fd_tickcount();
1151 0 : FD_MCNT_INC( PACK, TRANSACTION_DELETED, deleted );
1152 0 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ]++;
1153 0 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
1154 0 : if( FD_LIKELY( result>=0 ) ) ctx->last_successful_insert = now;
1155 0 : }
1156 0 : }
1157 :
1158 0 : ctx->cur_spot = NULL;
1159 0 : break;
1160 0 : }
1161 0 : case IN_KIND_EXECUTED_TXN: {
1162 0 : ulong deleted = fd_pack_delete_transaction( ctx->pack, fd_type_pun( ctx->executed_txn_sig ) );
1163 0 : FD_MCNT_INC( PACK, TRANSACTION_ALREADY_EXECUTED, deleted );
1164 0 : break;
1165 0 : }
1166 0 : }
1167 :
1168 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, fd_pack_avail_txn_cnt( ctx->pack )>0 );
1169 0 : }
1170 :
1171 : static void
1172 : privileged_init( fd_topo_t * topo,
1173 0 : fd_topo_tile_t * tile ) {
1174 0 : if( FD_LIKELY( !tile->pack.bundle.enabled ) ) return;
1175 0 : if( FD_UNLIKELY( !tile->pack.bundle.vote_account_path[0] ) ) {
1176 0 : FD_LOG_WARNING(( "Disabling bundle crank because no vote account was specified" ));
1177 0 : return;
1178 0 : }
1179 :
1180 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1181 :
1182 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1183 0 : fd_pack_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
1184 :
1185 0 : if( FD_UNLIKELY( !strcmp( tile->pack.bundle.identity_key_path, "" ) ) )
1186 0 : FD_LOG_ERR(( "identity_key_path not set" ));
1187 :
1188 0 : const uchar * identity_key = fd_keyload_load( tile->pack.bundle.identity_key_path, /* pubkey only: */ 1 );
1189 0 : fd_memcpy( ctx->crank->identity_pubkey->b, identity_key, 32UL );
1190 :
1191 0 : if( FD_UNLIKELY( !fd_base58_decode_32( tile->pack.bundle.vote_account_path, ctx->crank->vote_pubkey->b ) ) ) {
1192 0 : const uchar * vote_key = fd_keyload_load( tile->pack.bundle.vote_account_path, /* pubkey only: */ 1 );
1193 0 : fd_memcpy( ctx->crank->vote_pubkey->b, vote_key, 32UL );
1194 0 : }
1195 0 : }
1196 :
1197 : static void
1198 : unprivileged_init( fd_topo_t * topo,
1199 0 : fd_topo_tile_t * tile ) {
1200 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1201 :
1202 0 : if( FD_UNLIKELY( tile->pack.max_pending_transactions >= USHORT_MAX-10UL ) ) FD_LOG_ERR(( "pack tile supports up to %lu pending transactions", USHORT_MAX-11UL ));
1203 :
1204 0 : fd_pack_limits_t limits_upper[1] = {{
1205 0 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_UPPER_BOUND,
1206 0 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_UPPER_BOUND,
1207 0 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_UPPER_BOUND,
1208 0 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
1209 0 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
1210 0 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
1211 0 : }};
1212 :
1213 0 : ulong pack_footprint = fd_pack_footprint( tile->pack.max_pending_transactions, BUNDLE_META_SZ, tile->pack.execle_tile_count, limits_upper );
1214 :
1215 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1216 0 : fd_pack_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
1217 0 : fd_rng_t * rng = fd_rng_join( fd_rng_new( FD_SCRATCH_ALLOC_APPEND( l, fd_rng_align(), fd_rng_footprint() ), 0U, 0UL ) );
1218 0 : if( FD_UNLIKELY( !rng ) ) FD_LOG_ERR(( "fd_rng_new failed" ));
1219 :
1220 0 : fd_pack_limits_t limits_lower[1] = {{
1221 0 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK_LOWER_BOUND,
1222 0 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK_LOWER_BOUND,
1223 0 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT_LOWER_BOUND,
1224 0 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
1225 0 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
1226 0 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
1227 0 : }};
1228 :
1229 0 : ctx->pack = fd_pack_join( fd_pack_new( FD_SCRATCH_ALLOC_APPEND( l, fd_pack_align(), pack_footprint ),
1230 0 : tile->pack.max_pending_transactions, BUNDLE_META_SZ, tile->pack.execle_tile_count,
1231 0 : limits_lower, rng ) );
1232 0 : if( FD_UNLIKELY( !ctx->pack ) ) FD_LOG_ERR(( "fd_pack_new failed" ));
1233 :
1234 0 : if( FD_UNLIKELY( tile->in_cnt>32UL ) ) FD_LOG_ERR(( "Too many input links (%lu>32) to pack tile", tile->in_cnt ));
1235 :
1236 0 : FD_TEST( tile->in_cnt<sizeof( ctx->in_kind )/sizeof( ctx->in_kind[0] ) );
1237 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1238 0 : fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
1239 :
1240 0 : if( FD_LIKELY( !strcmp( link->name, "resolv_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
1241 0 : else if( FD_LIKELY( !strcmp( link->name, "resolh_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
1242 0 : else if( FD_LIKELY( !strcmp( link->name, "poh_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH;
1243 0 : else if( FD_LIKELY( !strcmp( link->name, "pohh_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH;
1244 0 : else if( FD_LIKELY( !strcmp( link->name, "bank_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_EXECLE;
1245 0 : else if( FD_LIKELY( !strcmp( link->name, "execle_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_EXECLE;
1246 0 : else if( FD_LIKELY( !strcmp( link->name, "sign_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_SIGN;
1247 0 : else if( FD_LIKELY( !strcmp( link->name, "replay_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPLAY;
1248 0 : else if( FD_LIKELY( !strcmp( link->name, "executed_txn" ) ) ) ctx->in_kind[ i ] = IN_KIND_EXECUTED_TXN;
1249 0 : else FD_LOG_ERR(( "pack tile has unexpected input link %lu %s", i, link->name ));
1250 0 : }
1251 :
1252 0 : ulong execle_cnt = 0UL;
1253 0 : for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
1254 0 : fd_topo_tile_t const * consumer_tile = &topo->tiles[ i ];
1255 0 : if( FD_UNLIKELY( strcmp( consumer_tile->name, "execle" ) && strcmp( consumer_tile->name, "replay" ) ) ) continue;
1256 0 : for( ulong j=0UL; j<consumer_tile->in_cnt; j++ ) {
1257 0 : if( FD_UNLIKELY( consumer_tile->in_link_id[ j ]==tile->out_link_id[ 0 ] ) ) execle_cnt++;
1258 0 : }
1259 0 : }
1260 :
1261 : // if( FD_UNLIKELY( !execle_cnt ) ) FD_LOG_ERR(( "pack tile connects to no execle tiles" ));
1262 0 : if( FD_UNLIKELY( execle_cnt>FD_PACK_MAX_EXECLE_TILES ) ) FD_LOG_ERR(( "pack tile connects to too many execle tiles" ));
1263 : // if( FD_UNLIKELY( execle_cnt!=tile->pack.execle_tile_count ) ) FD_LOG_ERR(( "pack tile connects to %lu execle tiles, but tile->pack.execle_tile_count is %lu", execle_cnt, tile->pack.execle_tile_count ));
1264 :
1265 0 : FD_TEST( (tile->pack.schedule_strategy>=0) & (tile->pack.schedule_strategy<=FD_PACK_STRATEGY_BUNDLE) );
1266 :
1267 0 : ctx->crank->enabled = tile->pack.bundle.enabled;
1268 0 : if( FD_UNLIKELY( tile->pack.bundle.enabled ) ) {
1269 0 : if( FD_UNLIKELY( !fd_bundle_crank_gen_init( ctx->crank->gen, (fd_acct_addr_t const *)tile->pack.bundle.tip_distribution_program_addr,
1270 0 : (fd_acct_addr_t const *)tile->pack.bundle.tip_payment_program_addr,
1271 0 : (fd_acct_addr_t const *)ctx->crank->vote_pubkey->b,
1272 0 : (fd_acct_addr_t const *)tile->pack.bundle.tip_distribution_authority,
1273 0 : schedule_strategy_strings[ tile->pack.schedule_strategy ],
1274 0 : tile->pack.bundle.commission_bps ) ) ) {
1275 0 : FD_LOG_ERR(( "constructing bundle generator failed" ));
1276 0 : }
1277 :
1278 0 : ulong sign_in_idx = fd_topo_find_tile_in_link ( topo, tile, "sign_pack", tile->kind_id );
1279 0 : ulong sign_out_idx = fd_topo_find_tile_out_link( topo, tile, "pack_sign", tile->kind_id );
1280 0 : FD_TEST( sign_in_idx!=ULONG_MAX );
1281 0 : fd_topo_link_t * sign_in = &topo->links[ tile->in_link_id[ sign_in_idx ] ];
1282 0 : fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ sign_out_idx ] ];
1283 0 : if( FD_UNLIKELY( !fd_keyguard_client_join( fd_keyguard_client_new( ctx->crank->keyguard_client,
1284 0 : sign_out->mcache,
1285 0 : sign_out->dcache,
1286 0 : sign_in->mcache,
1287 0 : sign_in->dcache,
1288 0 : sign_out->mtu ) ) ) ) {
1289 0 : FD_LOG_ERR(( "failed to construct keyguard" ));
1290 0 : }
1291 : /* Initialize enough of the prev config that it produces a
1292 : transaction */
1293 0 : ctx->crank->prev_config->discriminator = 0x82ccfa1ee0aa0c9bUL;
1294 0 : ctx->crank->prev_config->tip_receiver->b[1] = 1;
1295 0 : ctx->crank->prev_config->block_builder->b[2] = 1;
1296 :
1297 0 : memset( ctx->crank->tip_receiver_owner, '\0', 32UL );
1298 0 : memset( ctx->crank->recent_blockhash, '\0', 32UL );
1299 0 : memset( ctx->crank->last_sig, '\0', 64UL );
1300 0 : ctx->crank->ib_inserted = 0;
1301 0 : ctx->crank->epoch = 0UL;
1302 0 : ctx->crank->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->id_keyswitch_obj_id ) );
1303 0 : FD_TEST( ctx->crank->keyswitch );
1304 0 : } else {
1305 0 : memset( ctx->crank, '\0', sizeof(ctx->crank) );
1306 0 : }
1307 :
1308 :
1309 : #if FD_PACK_USE_EXTRA_STORAGE
1310 : ctx->extra_txn_deq = extra_txn_deq_join( extra_txn_deq_new( FD_SCRATCH_ALLOC_APPEND( l, extra_txn_deq_align(),
1311 : extra_txn_deq_footprint() ) ) );
1312 : #endif
1313 :
1314 0 : ctx->cur_spot = NULL;
1315 0 : ctx->is_bundle = 0;
1316 0 : ctx->strategy = tile->pack.schedule_strategy;
1317 0 : ctx->max_pending_transactions = tile->pack.max_pending_transactions;
1318 0 : ctx->leader_slot = ULONG_MAX;
1319 0 : ctx->leader_bank = NULL;
1320 0 : ctx->leader_bank_idx = ULONG_MAX;
1321 0 : ctx->pack_idx = 0UL;
1322 0 : ctx->slot_microblock_cnt = 0UL;
1323 0 : ctx->pack_txn_cnt = 0UL;
1324 0 : ctx->slot_max_microblocks = 0UL;
1325 0 : ctx->slot_max_data = 0UL;
1326 0 : ctx->larger_shred_limits_per_block = tile->pack.larger_shred_limits_per_block;
1327 0 : ctx->drain_execle = 0;
1328 0 : ctx->approx_wallclock_ns = fd_log_wallclock();
1329 0 : ctx->approx_tickcount = fd_tickcount();
1330 0 : ctx->rng = rng;
1331 0 : ctx->ticks_per_ns = fd_tempo_tick_per_ns( NULL );
1332 0 : ctx->last_successful_insert = 0L;
1333 0 : ctx->highest_observed_slot = 0UL;
1334 0 : ctx->microblock_duration_ticks = (ulong)(fd_tempo_tick_per_ns( NULL )*(double)MICROBLOCK_DURATION_NS + 0.5);
1335 : #if FD_PACK_USE_EXTRA_STORAGE
1336 : ctx->insert_to_extra = 0;
1337 : #endif
1338 0 : ctx->use_consumed_cus = tile->pack.use_consumed_cus;
1339 0 : ctx->crank->enabled = tile->pack.bundle.enabled;
1340 :
1341 : #if !SMALL_MICROBLOCKS
1342 : ctx->wait_duration_ticks[ 0 ] = ULONG_MAX;
1343 : for( ulong i=1UL; i<MAX_TXN_PER_MICROBLOCK+1UL; i++ ) {
1344 : ctx->wait_duration_ticks[ i ]=(ulong)(fd_tempo_tick_per_ns( NULL )*(double)wait_duration[ i ] + 0.5);
1345 : }
1346 : #endif
1347 :
1348 0 : ctx->limits.slot_max_cost = limits_lower->max_cost_per_block;
1349 0 : ctx->limits.slot_max_vote_cost = limits_lower->max_vote_cost_per_block;
1350 0 : ctx->limits.slot_max_write_cost_per_acct = limits_lower->max_write_cost_per_acct;
1351 :
1352 0 : ctx->execle_cnt = tile->pack.execle_tile_count;
1353 0 : ctx->poll_cursor = 0;
1354 0 : ctx->skip_cnt = 0L;
1355 0 : ctx->execle_idle_bitset = fd_ulong_mask_lsb( (int)tile->pack.execle_tile_count );
1356 0 : for( ulong i=0UL; i<tile->pack.execle_tile_count; i++ ) {
1357 0 : ulong busy_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "execle_busy.%lu", i );
1358 0 : FD_TEST( busy_obj_id!=ULONG_MAX );
1359 0 : ctx->execle_current[ i ] = fd_fseq_join( fd_topo_obj_laddr( topo, busy_obj_id ) );
1360 0 : ctx->execle_expect[ i ] = ULONG_MAX;
1361 0 : if( FD_UNLIKELY( !ctx->execle_current[ i ] ) ) FD_LOG_ERR(( "execle tile %lu has no busy flag", i ));
1362 0 : ctx->execle_ready_at[ i ] = 0L;
1363 0 : FD_TEST( ULONG_MAX==fd_fseq_query( ctx->execle_current[ i ] ) );
1364 0 : }
1365 :
1366 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1367 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
1368 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
1369 :
1370 0 : ctx->in[ i ].mem = link_wksp->wksp;
1371 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
1372 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
1373 0 : }
1374 :
1375 0 : ctx->execle_out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
1376 0 : ctx->execle_out_chunk0 = fd_dcache_compact_chunk0( ctx->execle_out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
1377 0 : ctx->execle_out_wmark = fd_dcache_compact_wmark ( ctx->execle_out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
1378 0 : ctx->execle_out_chunk = ctx->execle_out_chunk0;
1379 :
1380 0 : ctx->poh_out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 1 ] ].dcache_obj_id ].wksp_id ].wksp;
1381 0 : ctx->poh_out_chunk0 = fd_dcache_compact_chunk0( ctx->poh_out_mem, topo->links[ tile->out_link_id[ 1 ] ].dcache );
1382 0 : ctx->poh_out_wmark = fd_dcache_compact_wmark ( ctx->poh_out_mem, topo->links[ tile->out_link_id[ 1 ] ].dcache, topo->links[ tile->out_link_id[ 1 ] ].mtu );
1383 0 : ctx->poh_out_chunk = ctx->poh_out_chunk0;
1384 :
1385 : /* Initialize metrics storage */
1386 0 : memset( ctx->insert_result, '\0', FD_PACK_INSERT_RETVAL_CNT * sizeof(ulong) );
1387 0 : fd_histf_join( fd_histf_new( ctx->schedule_duration, FD_MHIST_SECONDS_MIN( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS ),
1388 0 : FD_MHIST_SECONDS_MAX( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS ) ) );
1389 0 : fd_histf_join( fd_histf_new( ctx->no_sched_duration, FD_MHIST_SECONDS_MIN( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS ),
1390 0 : FD_MHIST_SECONDS_MAX( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS ) ) );
1391 0 : fd_histf_join( fd_histf_new( ctx->insert_duration, FD_MHIST_SECONDS_MIN( PACK, INSERT_TRANSACTION_DURATION_SECONDS ),
1392 0 : FD_MHIST_SECONDS_MAX( PACK, INSERT_TRANSACTION_DURATION_SECONDS ) ) );
1393 0 : fd_histf_join( fd_histf_new( ctx->complete_duration, FD_MHIST_SECONDS_MIN( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS ),
1394 0 : FD_MHIST_SECONDS_MAX( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS ) ) );
1395 0 : ctx->metric_state = 0;
1396 0 : ctx->metric_state_begin = fd_tickcount();
1397 0 : memset( ctx->metric_timing, '\0', 16*sizeof(long) );
1398 0 : memset( ctx->current_bundle, '\0', sizeof(ctx->current_bundle) );
1399 0 : memset( ctx->blk_engine_cfg, '\0', sizeof(ctx->blk_engine_cfg) );
1400 0 : memset( ctx->last_sched_metrics, '\0', sizeof(ctx->last_sched_metrics) );
1401 0 : memset( ctx->start_block_sched_metrics, '\0', sizeof(ctx->start_block_sched_metrics) );
1402 0 : memset( ctx->crank->metrics, '\0', sizeof(ctx->crank->metrics) );
1403 :
1404 0 : FD_LOG_INFO(( "packing microblocks of at most %lu transactions to %lu execle tiles using strategy %i", EFFECTIVE_TXN_PER_MICROBLOCK, tile->pack.execle_tile_count, ctx->strategy ));
1405 :
1406 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
1407 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
1408 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
1409 :
1410 0 : }
1411 :
1412 : static ulong
1413 : populate_allowed_seccomp( fd_topo_t const * topo,
1414 : fd_topo_tile_t const * tile,
1415 : ulong out_cnt,
1416 0 : struct sock_filter * out ) {
1417 0 : (void)topo;
1418 0 : (void)tile;
1419 :
1420 0 : populate_sock_filter_policy_fd_pack_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
1421 0 : return sock_filter_policy_fd_pack_tile_instr_cnt;
1422 0 : }
1423 :
1424 : static ulong
1425 : populate_allowed_fds( fd_topo_t const * topo,
1426 : fd_topo_tile_t const * tile,
1427 : ulong out_fds_cnt,
1428 0 : int * out_fds ) {
1429 0 : (void)topo;
1430 0 : (void)tile;
1431 :
1432 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
1433 :
1434 0 : ulong out_cnt = 0UL;
1435 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
1436 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
1437 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
1438 0 : return out_cnt;
1439 0 : }
1440 :
1441 0 : #define STEM_BURST (1UL)
1442 :
1443 : /* We want lazy (measured in ns) to be small enough that the producer
1444 : and the consumer never have to wait for credits. For most tango
1445 : links, we use a default worst case speed coming from 100 Gbps
1446 : Ethernet. That's not very suitable for microblocks that go from
1447 : pack to bank. Instead we manually estimate the very aggressive
1448 : 1000ns per microblock, and then reduce it further (in line with the
1449 : default lazy value computation) to ensure the random value chosen
1450 : based on this won't lead to credit return stalls. */
1451 0 : #define STEM_LAZY (128L*3000L)
1452 :
1453 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_pack_ctx_t
1454 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_pack_ctx_t)
1455 :
1456 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
1457 0 : #define STEM_CALLBACK_BEFORE_CREDIT before_credit
1458 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
1459 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
1460 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
1461 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
1462 :
1463 : #include "../stem/fd_stem.c"
1464 :
1465 : fd_topo_run_tile_t fd_tile_pack = {
1466 : .name = "pack",
1467 : .populate_allowed_seccomp = populate_allowed_seccomp,
1468 : .populate_allowed_fds = populate_allowed_fds,
1469 : .scratch_align = scratch_align,
1470 : .scratch_footprint = scratch_footprint,
1471 : .privileged_init = privileged_init,
1472 : .unprivileged_init = unprivileged_init,
1473 : .run = stem_run,
1474 : };
|