Line data Source code
1 : #include "../tiles.h"
2 :
3 : #include "generated/fd_pack_tile_seccomp.h"
4 :
5 : #include "../topo/fd_pod_format.h"
6 : #include "../keyguard/fd_keyload.h"
7 : #include "../keyguard/fd_keyswitch.h"
8 : #include "../keyguard/fd_keyguard.h"
9 : #include "../shred/fd_shredder.h"
10 : #include "../metrics/fd_metrics.h"
11 : #include "../pack/fd_pack.h"
12 : #include "../pack/fd_pack_pacing.h"
13 : #include "../../ballet/base64/fd_base64.h"
14 :
15 : #include <linux/unistd.h>
16 :
17 : /* fd_pack is responsible for taking verified transactions, and
18 : arranging them into "microblocks" (groups) of transactions to
19 : be executed serially. It can try to do clever things so that
20 : multiple microblocks can execute in parallel, if they don't
21 : write to the same accounts. */
22 :
23 0 : #define IN_KIND_RESOLV (0UL)
24 0 : #define IN_KIND_POH (1UL)
25 0 : #define IN_KIND_BANK (2UL)
26 0 : #define IN_KIND_SIGN (3UL)
27 :
28 : #define MAX_SLOTS_PER_EPOCH 432000UL
29 :
30 : /* Pace microblocks, but only slightly. This helps keep performance
31 : more stable. This limit is 2,000 microblocks/second/bank. At 31
32 : transactions/microblock, that's 62k txn/sec/bank. */
33 0 : #define MICROBLOCK_DURATION_NS (0L)
34 :
35 : /* There are 151 accepted blockhashes, but those don't include skips.
36 : This check is neither precise nor accurate, but just good enough.
37 : The bank tile does the final check. We give a little margin for a
38 : few percent skip rate. */
39 0 : #define TRANSACTION_LIFETIME_SLOTS 160UL
40 :
41 : /* Time is normally a long, but pack expects a ulong. Add -LONG_MIN to
42 : the time values so that LONG_MIN maps to 0, LONG_MAX maps to
43 : ULONG_MAX, and everything in between maps linearly with a slope of 1.
44 : Just subtracting LONG_MIN results in signed integer overflow, which
45 : is U.B. */
46 : #define TIME_OFFSET 0x8000000000000000UL
47 : FD_STATIC_ASSERT( (ulong)LONG_MIN+TIME_OFFSET==0UL, time_offset );
48 : FD_STATIC_ASSERT( (ulong)LONG_MAX+TIME_OFFSET==ULONG_MAX, time_offset );
49 :
50 :
51 : /* Optionally allow a larger limit for benchmarking */
52 0 : #define LARGER_MAX_COST_PER_BLOCK (18UL*48000000UL)
53 :
54 : /* 1.5 M cost units, enough for 1 max size transaction */
55 : const ulong CUS_PER_MICROBLOCK = 1500000UL;
56 :
57 : #define SMALL_MICROBLOCKS 1
58 :
59 : #if SMALL_MICROBLOCKS
60 : const float VOTE_FRACTION = 1.0f; /* schedule all available votes first */
61 3 : #define EFFECTIVE_TXN_PER_MICROBLOCK 1UL
62 : #else
63 : const float VOTE_FRACTION = 0.75f; /* TODO: Is this the right value? */
64 : #define EFFECTIVE_TXN_PER_MICROBLOCK MAX_TXN_PER_MICROBLOCK
65 : #endif
66 :
67 : /* There's overhead associated with each microblock the bank tile tries
68 : to execute it, so the optimal strategy is not to produce a microblock
69 : with a single transaction as soon as we receive it. Basically, if we
70 : have less than 31 transactions, we want to wait a little to see if we
71 : receive additional transactions before we schedule a microblock. We
72 : can model the optimum amount of time to wait, but the equation is
73 : complicated enough that we want to compute it before compile time.
74 : wait_duration[i] for i in [0, 31] gives the time in nanoseconds pack
75 : should wait after receiving its most recent transaction before
76 : scheduling if it has i transactions available. Unsurprisingly,
77 : wait_duration[31] is 0. wait_duration[0] is ULONG_MAX, so we'll
78 : always wait if we have 0 transactions. */
79 : FD_IMPORT( wait_duration, "src/disco/pack/pack_delay.bin", ulong, 6, "" );
80 :
81 :
82 :
83 : #if FD_PACK_USE_EXTRA_STORAGE
84 : /* When we are done being leader for a slot and we are leader in the
85 : very next slot, it can still take some time to transition. This is
86 : because the bank has to be finalized, a hash calculated, and various
87 : other things done in the replay stage to create the new child bank.
88 :
89 : During that time, pack cannot send transactions to banks so it needs
90 : to be able to buffer. Typically, these so called "leader
91 : transitions" are short (<15 millis), so a low value here would
92 : suffice. However, in some cases when there is memory pressure on the
93 : NUMA node or when the operating system context switches relevant
94 : threads out, it can take significantly longer.
95 :
96 : To prevent drops in these cases and because we assume banks are fast
97 : enough to drain this buffer once we do become leader, we set this
98 : buffer size to be quite large. */
99 :
100 : #define DEQUE_NAME extra_txn_deq
101 : #define DEQUE_T fd_txn_e_t
102 : #define DEQUE_MAX (128UL*1024UL)
103 : #include "../../../../util/tmpl/fd_deque.c"
104 :
105 : #endif
106 :
107 : typedef struct {
108 : fd_acct_addr_t commission_pubkey[1];
109 : ulong commission;
110 : } block_builder_info_t;
111 :
112 : typedef struct {
113 : fd_wksp_t * mem;
114 : ulong chunk0;
115 : ulong wmark;
116 : } fd_pack_in_ctx_t;
117 :
118 : typedef struct {
119 : fd_pack_t * pack;
120 : fd_txn_e_t * cur_spot;
121 : int is_bundle; /* is the current transaction a bundle */
122 :
123 : /* The value passed to fd_pack_new, etc. */
124 : ulong max_pending_transactions;
125 :
126 : /* The leader slot we are currently packing for, or ULONG_MAX if we
127 : are not the leader. */
128 : ulong leader_slot;
129 : void const * leader_bank;
130 :
131 : /* The number of microblocks we have packed for the current leader
132 : slot. Will always be <= slot_max_microblocks. We must track
133 : this so that when we are done we can tell the PoH tile how many
134 : microblocks to expect in the slot. */
135 : ulong slot_microblock_cnt;
136 :
137 : /* The maximum number of microblocks that can be packed in this slot.
138 : Provided by the PoH tile when we become leader.*/
139 : ulong slot_max_microblocks;
140 :
141 : /* Cap (in bytes) of the amount of transaction data we produce in each
142 : block to avoid hitting the shred limits. See where this is set for
143 : more explanation. */
144 : ulong slot_max_data;
145 : int larger_shred_limits_per_block;
146 :
147 : /* Cost limit (in cost units) for each block. Typically
148 : FD_PACK_MAX_COST_PER_BLOCK or LARDER_MAX_COST_PER_BLOCK. */
149 : ulong slot_max_cost;
150 :
151 : /* If drain_banks is non-zero, then the pack tile must wait until all
152 : banks are idle before scheduling any more microblocks. This is
153 : primarily helpful in irregular leader transitions, e.g. while being
154 : leader for slot N, we switch forks to a slot M (!=N+1) in which we
155 : are also leader. We don't want to execute microblocks for
156 : different slots concurrently. */
157 : int drain_banks;
158 :
159 : /* Updated during housekeeping and used only for checking if the
160 : leader slot has ended. Might be off by one housekeeping duration,
161 : but that should be small relative to a slot duration. */
162 : long approx_wallclock_ns;
163 :
164 : fd_rng_t * rng;
165 :
166 : /* The end wallclock time of the leader slot we are currently packing
167 : for, if we are currently packing for a slot.
168 :
169 : _slot_end_ns is used as a temporary between during_frag and
170 : after_frag in case the tile gets overrun. */
171 : long _slot_end_ns;
172 : long slot_end_ns;
173 :
174 : /* pacer and ticks_per_ns are used for pacing CUs through the slot,
175 : i.e. deciding when to schedule a microblock given the number of CUs
176 : that have been consumed so far. pacer is an opaque pacing object,
177 : which is initialized when the pack tile is packing a slot.
178 : ticks_per_ns is the cached value from tempo. */
179 : fd_pack_pacing_t pacer[1];
180 : double ticks_per_ns;
181 :
182 : /* last_successful_insert stores the tickcount of the last
183 : successful transaction insert. */
184 : long last_successful_insert;
185 :
186 : /* highest_observed_slot stores the highest slot number we've seen
187 : from any transaction coming from the resolv tile. When this
188 : increases, we expire old transactions. */
189 : ulong highest_observed_slot;
190 :
191 : /* microblock_duration_ns, and wait_duration
192 : respectively scaled to be in ticks instead of nanoseconds */
193 : ulong microblock_duration_ticks;
194 : ulong wait_duration_ticks[ MAX_TXN_PER_MICROBLOCK+1UL ];
195 :
196 : #if FD_PACK_USE_EXTRA_STORAGE
197 : /* In addition to the available transactions that pack knows about, we
198 : also store a larger ring buffer for handling cases when pack is
199 : full. This is an fd_deque. */
200 : fd_txn_e_t * extra_txn_deq;
201 : int insert_to_extra; /* whether the last insert was into pack or the extra deq */
202 : #endif
203 :
204 : fd_pack_in_ctx_t in[ 32 ];
205 : int in_kind[ 32 ];
206 :
207 : ulong bank_cnt;
208 : ulong bank_idle_bitset; /* bit i is 1 if we've observed *bank_current[i]==bank_expect[i] */
209 : int poll_cursor; /* in [0, bank_cnt), the next bank to poll */
210 : int use_consumed_cus;
211 : long skip_cnt;
212 : ulong * bank_current[ FD_PACK_MAX_BANK_TILES ];
213 : ulong bank_expect[ FD_PACK_MAX_BANK_TILES ];
214 : /* bank_ready_at[x] means don't check bank x until tickcount is at
215 : least bank_ready_at[x]. */
216 : long bank_ready_at[ FD_PACK_MAX_BANK_TILES ];
217 :
218 : fd_wksp_t * out_mem;
219 : ulong out_chunk0;
220 : ulong out_wmark;
221 : ulong out_chunk;
222 :
223 : ulong insert_result[ FD_PACK_INSERT_RETVAL_CNT ];
224 : fd_histf_t schedule_duration[ 1 ];
225 : fd_histf_t no_sched_duration[ 1 ];
226 : fd_histf_t insert_duration [ 1 ];
227 : fd_histf_t complete_duration[ 1 ];
228 :
229 : struct {
230 : uint metric_state;
231 : long metric_state_begin;
232 : long metric_timing[ 16 ];
233 : };
234 :
235 : struct {
236 : long time;
237 : ulong all[ FD_METRICS_TOTAL_SZ ];
238 : } last_sched_metrics[1];
239 :
240 : struct {
241 : ulong id;
242 : ulong txn_cnt;
243 : ulong txn_received;
244 : ulong min_blockhash_slot;
245 : fd_txn_e_t * _txn[ FD_PACK_MAX_TXN_PER_BUNDLE ];
246 : fd_txn_e_t * const * bundle; /* points to _txn when non-NULL */
247 : } current_bundle[1];
248 :
249 : block_builder_info_t blk_engine_cfg[1];
250 :
251 : struct {
252 : int enabled;
253 : int ib_inserted; /* in this slot */
254 : fd_acct_addr_t vote_pubkey[1];
255 : fd_acct_addr_t identity_pubkey[1];
256 : fd_bundle_crank_gen_t gen[1];
257 : fd_acct_addr_t tip_receiver_owner[1];
258 : ulong epoch;
259 : fd_bundle_crank_tip_payment_config_t prev_config[1]; /* as of start of slot, then updated */
260 : uchar recent_blockhash[32];
261 : fd_ed25519_sig_t last_sig[1];
262 :
263 : fd_keyswitch_t * keyswitch;
264 : fd_keyguard_client_t keyguard_client[1];
265 :
266 : ulong metrics[4];
267 : } crank[1];
268 :
269 :
270 : /* Used between during_frag and after_frag */
271 : ulong pending_rebate_sz;
272 : union{ fd_pack_rebate_t rebate[1]; uchar footprint[USHORT_MAX]; } rebate[1];
273 : } fd_pack_ctx_t;
274 :
275 0 : #define BUNDLE_META_SZ 40UL
276 : FD_STATIC_ASSERT( sizeof(block_builder_info_t)==BUNDLE_META_SZ, blk_engine_cfg );
277 :
278 0 : #define FD_PACK_METRIC_STATE_TRANSACTIONS 0
279 0 : #define FD_PACK_METRIC_STATE_BANKS 1
280 0 : #define FD_PACK_METRIC_STATE_LEADER 2
281 0 : #define FD_PACK_METRIC_STATE_MICROBLOCKS 3
282 :
283 : /* Updates one component of the metric state. If the state has changed,
284 : records the change. */
285 : static inline void
286 : update_metric_state( fd_pack_ctx_t * ctx,
287 : long effective_as_of,
288 : int type,
289 0 : int status ) {
290 0 : uint current_state = fd_uint_insert_bit( ctx->metric_state, type, status );
291 0 : if( FD_UNLIKELY( current_state!=ctx->metric_state ) ) {
292 0 : ctx->metric_timing[ ctx->metric_state ] += effective_as_of - ctx->metric_state_begin;
293 0 : ctx->metric_state_begin = effective_as_of;
294 0 : ctx->metric_state = current_state;
295 0 : }
296 0 : }
297 :
298 : static inline void
299 0 : remove_ib( fd_pack_ctx_t * ctx ) {
300 : /* It's likely the initializer bundle is long scheduled, but we want to
301 : try deleting it just in case. */
302 0 : if( FD_UNLIKELY( ctx->crank->enabled & ctx->crank->ib_inserted ) )
303 0 : fd_pack_delete_transaction( ctx->pack, (fd_ed25519_sig_t const *)ctx->crank->last_sig );
304 0 : ctx->crank->ib_inserted = 0;
305 0 : }
306 :
307 :
308 : FD_FN_CONST static inline ulong
309 3 : scratch_align( void ) {
310 3 : return 4096UL;
311 3 : }
312 :
313 : FD_FN_PURE static inline ulong
314 3 : scratch_footprint( fd_topo_tile_t const * tile ) {
315 3 : fd_pack_limits_t limits[1] = {{
316 3 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK,
317 3 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK,
318 3 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT,
319 3 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
320 3 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
321 3 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
322 3 : }};
323 :
324 3 : ulong l = FD_LAYOUT_INIT;
325 3 : l = FD_LAYOUT_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
326 3 : l = FD_LAYOUT_APPEND( l, fd_rng_align(), fd_rng_footprint() );
327 3 : l = FD_LAYOUT_APPEND( l, fd_pack_align(), fd_pack_footprint( tile->pack.max_pending_transactions,
328 3 : BUNDLE_META_SZ,
329 3 : tile->pack.bank_tile_count,
330 3 : limits ) );
331 : #if FD_PACK_USE_EXTRA_STORAGE
332 : l = FD_LAYOUT_APPEND( l, extra_txn_deq_align(), extra_txn_deq_footprint() );
333 : #endif
334 3 : return FD_LAYOUT_FINI( l, scratch_align() );
335 3 : }
336 :
337 : static inline void
338 : log_end_block_metrics( fd_pack_ctx_t * ctx,
339 : long now,
340 0 : char const * reason ) {
341 0 : #define DELTA( m ) (fd_metrics_tl[ MIDX(COUNTER, PACK, TRANSACTION_SCHEDULE_##m) ] - ctx->last_sched_metrics->all[ MIDX(COUNTER, PACK, TRANSACTION_SCHEDULE_##m) ])
342 0 : #define AVAIL( m ) (fd_metrics_tl[ MIDX(GAUGE, PACK, AVAILABLE_TRANSACTIONS_##m) ])
343 0 : FD_LOG_INFO(( "pack_end_block(slot=%lu,%s,%lx,ticks_since_last_schedule=%ld,reasons=%lu,%lu,%lu,%lu,%lu,%lu,%lu;remaining=%lu+%lu+%lu+%lu;smallest=%lu;cus=%lu->%lu)",
344 0 : ctx->leader_slot, reason, ctx->bank_idle_bitset, now-ctx->last_sched_metrics->time,
345 0 : DELTA( TAKEN ), DELTA( CU_LIMIT ), DELTA( FAST_PATH ), DELTA( BYTE_LIMIT ), DELTA( WRITE_COST ), DELTA( SLOW_PATH ), DELTA( DEFER_SKIP ),
346 0 : AVAIL(REGULAR), AVAIL(VOTES), AVAIL(BUNDLES), AVAIL(CONFLICTING),
347 0 : (fd_metrics_tl[ MIDX(GAUGE, PACK, SMALLEST_PENDING_TRANSACTION) ]),
348 0 : (ctx->last_sched_metrics->all[ MIDX(GAUGE, PACK, CUS_CONSUMED_IN_BLOCK) ]),
349 0 : (fd_metrics_tl [ MIDX(GAUGE, PACK, CUS_CONSUMED_IN_BLOCK) ])
350 0 : ));
351 0 : #undef AVAIL
352 0 : #undef DELTA
353 0 : }
354 :
355 : static inline void
356 0 : metrics_write( fd_pack_ctx_t * ctx ) {
357 0 : FD_MCNT_ENUM_COPY( PACK, TRANSACTION_INSERTED, ctx->insert_result );
358 0 : FD_MCNT_ENUM_COPY( PACK, METRIC_TIMING, ((ulong*)ctx->metric_timing) );
359 0 : FD_MCNT_ENUM_COPY( PACK, BUNDLE_CRANK_STATUS, ctx->crank->metrics );
360 0 : FD_MHIST_COPY( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS, ctx->schedule_duration );
361 0 : FD_MHIST_COPY( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS, ctx->no_sched_duration );
362 0 : FD_MHIST_COPY( PACK, INSERT_TRANSACTION_DURATION_SECONDS, ctx->insert_duration );
363 0 : FD_MHIST_COPY( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS, ctx->complete_duration );
364 :
365 0 : fd_pack_metrics_write( ctx->pack );
366 0 : }
367 :
368 : static inline void
369 0 : during_housekeeping( fd_pack_ctx_t * ctx ) {
370 0 : ctx->approx_wallclock_ns = fd_log_wallclock();
371 :
372 0 : if( FD_UNLIKELY( ctx->crank->enabled && fd_keyswitch_state_query( ctx->crank->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
373 0 : fd_memcpy( ctx->crank->identity_pubkey, ctx->crank->keyswitch->bytes, 32UL );
374 0 : fd_keyswitch_state( ctx->crank->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
375 0 : }
376 0 : }
377 :
378 : static inline void
379 : before_credit( fd_pack_ctx_t * ctx,
380 : fd_stem_context_t * stem,
381 0 : int * charge_busy ) {
382 0 : (void)stem;
383 :
384 0 : if( FD_UNLIKELY( (ctx->cur_spot!=NULL) & !ctx->is_bundle ) ) {
385 0 : *charge_busy = 1;
386 :
387 : /* If we were overrun while processing a frag from an in, then
388 : cur_spot is left dangling and not cleaned up, so clean it up here
389 : (by returning the slot to the pool of free slots). If the last
390 : transaction was a bundle, then we don't want to return it. When
391 : we try to process the first transaction in the next bundle, we'll
392 : see we never got the full bundle and cancel the whole last
393 : bundle, returning all the storage to the pool. */
394 : #if FD_PACK_USE_EXTRA_STORAGE
395 : if( FD_LIKELY( !ctx->insert_to_extra ) ) fd_pack_insert_txn_cancel( ctx->pack, ctx->cur_spot );
396 : else extra_txn_deq_remove_tail( ctx->extra_txn_deq );
397 : #else
398 0 : fd_pack_insert_txn_cancel( ctx->pack, ctx->cur_spot );
399 0 : #endif
400 0 : ctx->cur_spot = NULL;
401 0 : }
402 0 : }
403 :
404 : #if FD_PACK_USE_EXTRA_STORAGE
405 : /* insert_from_extra: helper method to pop the transaction at the head
406 : off the extra txn deque and insert it into pack. Requires that
407 : ctx->extra_txn_deq is non-empty, but it's okay to call it if pack is
408 : full. Returns the result of fd_pack_insert_txn_fini. */
409 : static inline int
410 : insert_from_extra( fd_pack_ctx_t * ctx ) {
411 : fd_txn_e_t * spot = fd_pack_insert_txn_init( ctx->pack );
412 : fd_txn_e_t const * insert = extra_txn_deq_peek_head( ctx->extra_txn_deq );
413 : fd_txn_t const * insert_txn = TXN(insert->txnp);
414 : fd_memcpy( spot->txnp->payload, insert->txnp->payload, insert->txnp->payload_sz );
415 : fd_memcpy( TXN(spot->txnp), insert_txn, fd_txn_footprint( insert_txn->instr_cnt, insert_txn->addr_table_lookup_cnt ) );
416 : fd_memcpy( spot->alt_accts, insert->alt_accts, insert_txn->addr_table_adtl_cnt*sizeof(fd_acct_addr_t) );
417 : spot->txnp->payload_sz = insert->txnp->payload_sz;
418 : extra_txn_deq_remove_head( ctx->extra_txn_deq );
419 :
420 : ulong blockhash_slot = insert->txnp->blockhash_slot;
421 :
422 : long insert_duration = -fd_tickcount();
423 : int result = fd_pack_insert_txn_fini( ctx->pack, spot, blockhash_slot );
424 : insert_duration += fd_tickcount();
425 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ]++;
426 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
427 : FD_MCNT_INC( PACK, TRANSACTION_INSERTED_FROM_EXTRA, 1UL );
428 : return result;
429 : }
430 : #endif
431 :
432 : static inline void
433 : after_credit( fd_pack_ctx_t * ctx,
434 : fd_stem_context_t * stem,
435 : int * opt_poll_in,
436 0 : int * charge_busy ) {
437 0 : (void)opt_poll_in;
438 :
439 0 : if( FD_UNLIKELY( (ctx->skip_cnt--)>0L ) ) return; /* It would take ages for this to hit LONG_MIN */
440 :
441 0 : long now = fd_tickcount();
442 :
443 0 : int pacing_bank_cnt = (int)fd_pack_pacing_enabled_bank_cnt( ctx->pacer, now );
444 0 : if( FD_UNLIKELY( !pacing_bank_cnt ) ) return;
445 :
446 0 : ulong bank_cnt = ctx->bank_cnt;
447 :
448 : /* If we're using CU rebates, then we have one in for each bank in
449 : addition to the two normal ones. That means that after_credit will
450 : be called about (bank_cnt/2) times more frequently per transaction
451 : we receive. */
452 0 : fd_long_store_if( ctx->use_consumed_cus, &(ctx->skip_cnt), (long)(bank_cnt/2UL) );
453 :
454 : /* If any banks are busy, check one of the busy ones see if it is
455 : still busy. */
456 0 : if( FD_LIKELY( ctx->bank_idle_bitset!=fd_ulong_mask_lsb( (int)bank_cnt ) ) ) {
457 0 : int poll_cursor = ctx->poll_cursor;
458 0 : ulong busy_bitset = (~ctx->bank_idle_bitset) & fd_ulong_mask_lsb( (int)bank_cnt );
459 :
460 : /* Suppose bank_cnt is 4 and idle_bitset looks something like this
461 : (pretending it's a uchar):
462 : 0000 1001
463 : ^ busy cursor is 1
464 : Then busy_bitset is
465 : 0000 0110
466 : Rotate it right by 2 bits
467 : 1000 0001
468 : Find lsb returns 0, so busy cursor remains 2, and we poll bank 2.
469 :
470 : If instead idle_bitset were
471 : 0000 1110
472 : ^
473 : The rotated version would be
474 : 0100 0000
475 : Find lsb will return 6, so busy cursor would be set to 0, and
476 : we'd poll bank 0, which is the right one. */
477 0 : poll_cursor++;
478 0 : poll_cursor = (poll_cursor + fd_ulong_find_lsb( fd_ulong_rotate_right( busy_bitset, (poll_cursor&63) ) )) & 63;
479 :
480 0 : if( FD_UNLIKELY(
481 : /* if microblock duration is 0, bypass the bank_ready_at check
482 : to avoid a potential cache miss. Can't use an ifdef here
483 : because FD_UNLIKELY is a macro, but the compiler should
484 : eliminate the check easily. */
485 0 : ( (MICROBLOCK_DURATION_NS==0L) || (ctx->bank_ready_at[poll_cursor]<now) ) &&
486 0 : (fd_fseq_query( ctx->bank_current[poll_cursor] )==ctx->bank_expect[poll_cursor]) ) ) {
487 0 : *charge_busy = 1;
488 0 : ctx->bank_idle_bitset |= 1UL<<poll_cursor;
489 :
490 0 : long complete_duration = -fd_tickcount();
491 0 : int completed = fd_pack_microblock_complete( ctx->pack, (ulong)poll_cursor );
492 0 : complete_duration += fd_tickcount();
493 0 : if( FD_LIKELY( completed ) ) fd_histf_sample( ctx->complete_duration, (ulong)complete_duration );
494 0 : }
495 :
496 0 : ctx->poll_cursor = poll_cursor;
497 0 : }
498 :
499 :
500 : /* If we time out on our slot, then stop being leader. This can only
501 : happen in the first after_credit after a housekeeping. */
502 0 : if( FD_UNLIKELY( ctx->approx_wallclock_ns>=ctx->slot_end_ns && ctx->leader_slot!=ULONG_MAX ) ) {
503 0 : *charge_busy = 1;
504 :
505 0 : if( FD_UNLIKELY( ctx->slot_microblock_cnt<ctx->slot_max_microblocks )) {
506 : /* As an optimization, The PoH tile will automatically end a slot
507 : if it receives the maximum allowed microblocks, since it knows
508 : there is nothing left to receive. In that case, we don't need
509 : to send a DONE_PACKING notification, since they are already on
510 : the next slot. If we did send one it would just get dropped. */
511 0 : fd_done_packing_t * done_packing = fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
512 0 : done_packing->microblocks_in_slot = ctx->slot_microblock_cnt;
513 :
514 0 : fd_stem_publish( stem, 0UL, fd_disco_poh_sig( ctx->leader_slot, POH_PKT_TYPE_DONE_PACKING, ULONG_MAX ), ctx->out_chunk, sizeof(fd_done_packing_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
515 0 : ctx->out_chunk = fd_dcache_compact_next( ctx->out_chunk, sizeof(fd_done_packing_t), ctx->out_chunk0, ctx->out_wmark );
516 0 : }
517 :
518 0 : log_end_block_metrics( ctx, now, "time" );
519 0 : ctx->drain_banks = 1;
520 0 : ctx->leader_slot = ULONG_MAX;
521 0 : ctx->slot_microblock_cnt = 0UL;
522 0 : fd_pack_end_block( ctx->pack );
523 0 : remove_ib( ctx );
524 :
525 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_LEADER, 0 );
526 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, 0 );
527 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, 0 );
528 0 : return;
529 0 : }
530 :
531 : /* Am I leader? If not, see about inserting at most one transaction
532 : from extra storage. It's important not to insert too many
533 : transactions here, or we won't end up servicing dedup_pack enough.
534 : If extra storage is empty or pack is full, do nothing. */
535 0 : if( FD_UNLIKELY( ctx->leader_slot==ULONG_MAX ) ) {
536 : #if FD_PACK_USE_EXTRA_STORAGE
537 : if( FD_UNLIKELY( !extra_txn_deq_empty( ctx->extra_txn_deq ) &&
538 : fd_pack_avail_txn_cnt( ctx->pack )<ctx->max_pending_transactions ) ) {
539 : *charge_busy = 1;
540 :
541 : int result = insert_from_extra( ctx );
542 : if( FD_LIKELY( result>=0 ) ) ctx->last_successful_insert = now;
543 : }
544 : #endif
545 0 : return;
546 0 : }
547 :
548 : /* Am I in drain mode? If so, check if I can exit it */
549 0 : if( FD_UNLIKELY( ctx->drain_banks ) ) {
550 0 : if( FD_LIKELY( ctx->bank_idle_bitset==fd_ulong_mask_lsb( (int)bank_cnt ) ) ) ctx->drain_banks = 0;
551 0 : else return;
552 0 : }
553 :
554 : /* Have I sent the max allowed microblocks? Nothing to do. */
555 0 : if( FD_UNLIKELY( ctx->slot_microblock_cnt>=ctx->slot_max_microblocks ) ) return;
556 :
557 : /* Do I have enough transactions and/or have I waited enough time? */
558 0 : if( FD_UNLIKELY( (ulong)(now-ctx->last_successful_insert) <
559 0 : ctx->wait_duration_ticks[ fd_ulong_min( fd_pack_avail_txn_cnt( ctx->pack ), MAX_TXN_PER_MICROBLOCK ) ] ) ) {
560 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, 0 );
561 0 : return;
562 0 : }
563 :
564 0 : int any_ready = 0;
565 0 : int any_scheduled = 0;
566 :
567 0 : *charge_busy = 1;
568 :
569 0 : if( FD_LIKELY( ctx->crank->enabled ) ) {
570 0 : block_builder_info_t const * top_meta = fd_pack_peek_bundle_meta( ctx->pack );
571 0 : if( FD_UNLIKELY( top_meta ) ) {
572 : /* Have bundles, in a reasonable state to crank. */
573 :
574 0 : fd_txn_e_t * _bundle[ 1UL ];
575 0 : fd_txn_e_t * const * bundle = fd_pack_insert_bundle_init( ctx->pack, _bundle, 1UL );
576 :
577 0 : ulong txn_sz = fd_bundle_crank_generate( ctx->crank->gen, ctx->crank->prev_config, top_meta->commission_pubkey,
578 0 : ctx->crank->identity_pubkey, ctx->crank->tip_receiver_owner, ctx->crank->epoch, top_meta->commission,
579 0 : bundle[0]->txnp->payload, TXN( bundle[0]->txnp ) );
580 :
581 0 : if( FD_LIKELY( txn_sz==0UL ) ) { /* Everything in good shape! */
582 0 : fd_pack_insert_bundle_cancel( ctx->pack, bundle, 1UL );
583 0 : fd_pack_set_initializer_bundles_ready( ctx->pack );
584 0 : ctx->crank->metrics[ 0 ]++;
585 0 : }
586 0 : else if( FD_LIKELY( txn_sz<ULONG_MAX ) ) {
587 0 : bundle[0]->txnp->payload_sz = (ushort)txn_sz;
588 0 : memcpy( bundle[0]->txnp->payload+TXN(bundle[0]->txnp)->recent_blockhash_off, ctx->crank->recent_blockhash, 32UL );
589 :
590 0 : fd_keyguard_client_sign( ctx->crank->keyguard_client, bundle[0]->txnp->payload+1UL,
591 0 : bundle[0]->txnp->payload+65UL, txn_sz-65UL, FD_KEYGUARD_SIGN_TYPE_ED25519 );
592 :
593 0 : memcpy( ctx->crank->last_sig, bundle[0]->txnp->payload+1UL, 64UL );
594 :
595 0 : ctx->crank->ib_inserted = 1;
596 0 : int retval = fd_pack_insert_bundle_fini( ctx->pack, bundle, 1UL, ctx->leader_slot-1UL, 1, NULL );
597 0 : ctx->insert_result[ retval + FD_PACK_INSERT_RETVAL_OFF ]++;
598 0 : if( FD_UNLIKELY( retval<0 ) ) {
599 0 : ctx->crank->metrics[ 3 ]++;
600 0 : FD_LOG_WARNING(( "inserting initializer bundle returned %i", retval ));
601 0 : } else {
602 : /* Update the cached copy of the on-chain state. This seems a
603 : little dangerous, since we're updating it as if the bundle
604 : succeeded without knowing if that's true, but here's why
605 : it's safe:
606 :
607 : From now until we get the rebate call for this initializer
608 : bundle (which lets us know if it succeeded or failed), pack
609 : will be in [Pending] state, which means peek_bundle_meta
610 : will return NULL, so we won't read this state.
611 :
612 : Then, if the initializer bundle failed, we'll go into
613 : [Failed] IB state until the end of the block, which will
614 : cause top_meta to remain NULL so we don't read these values
615 : again.
616 :
617 : Otherwise, the initializer bundle succeeded, which means
618 : that these are the right values to use. */
619 0 : fd_bundle_crank_apply( ctx->crank->gen, ctx->crank->prev_config, top_meta->commission_pubkey,
620 0 : ctx->crank->tip_receiver_owner, ctx->crank->epoch, top_meta->commission );
621 0 : ctx->crank->metrics[ 1 ]++;
622 0 : }
623 0 : } else {
624 : /* Already logged a warning in this case */
625 0 : fd_pack_insert_bundle_cancel( ctx->pack, bundle, 1UL );
626 0 : ctx->crank->metrics[ 2 ]++;
627 0 : }
628 0 : }
629 0 : }
630 :
631 : /* Try to schedule the next microblock. Do we have any idle bank
632 : tiles in the first `pacing_bank_cnt`? */
633 0 : if( FD_LIKELY( ctx->bank_idle_bitset & fd_ulong_mask_lsb( pacing_bank_cnt ) ) ) { /* Optimize for schedule */
634 0 : any_ready = 1;
635 :
636 0 : int i = fd_ulong_find_lsb( ctx->bank_idle_bitset );
637 :
638 0 : fd_txn_p_t * microblock_dst = fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
639 0 : long schedule_duration = -fd_tickcount();
640 0 : ulong schedule_cnt = fd_pack_schedule_next_microblock( ctx->pack, CUS_PER_MICROBLOCK, VOTE_FRACTION, (ulong)i, microblock_dst );
641 0 : schedule_duration += fd_tickcount();
642 0 : fd_histf_sample( (schedule_cnt>0UL) ? ctx->schedule_duration : ctx->no_sched_duration, (ulong)schedule_duration );
643 :
644 0 : if( FD_LIKELY( schedule_cnt ) ) {
645 0 : any_scheduled = 1;
646 0 : long now2 = fd_tickcount();
647 0 : ulong tsorig = (ulong)fd_frag_meta_ts_comp( now ); /* A bound on when we observed bank was idle */
648 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( now2 );
649 0 : ulong chunk = ctx->out_chunk;
650 0 : ulong msg_sz = schedule_cnt*sizeof(fd_txn_p_t);
651 0 : fd_microblock_bank_trailer_t * trailer = (fd_microblock_bank_trailer_t*)(microblock_dst+schedule_cnt);
652 0 : trailer->bank = ctx->leader_bank;
653 0 : trailer->microblock_idx = ctx->slot_microblock_cnt;
654 0 : trailer->is_bundle = !!(microblock_dst->flags & FD_TXN_P_FLAGS_BUNDLE);
655 :
656 0 : ulong sig = fd_disco_poh_sig( ctx->leader_slot, POH_PKT_TYPE_MICROBLOCK, (ulong)i );
657 0 : fd_stem_publish( stem, 0UL, sig, chunk, msg_sz+sizeof(fd_microblock_bank_trailer_t), 0UL, tsorig, tspub );
658 0 : ctx->bank_expect[ i ] = stem->seqs[0]-1UL;
659 0 : ctx->bank_ready_at[i] = now2 + (long)ctx->microblock_duration_ticks;
660 0 : ctx->out_chunk = fd_dcache_compact_next( ctx->out_chunk, msg_sz+sizeof(fd_microblock_bank_trailer_t), ctx->out_chunk0, ctx->out_wmark );
661 0 : ctx->slot_microblock_cnt += fd_ulong_if( trailer->is_bundle, schedule_cnt, 1UL );
662 :
663 0 : ctx->bank_idle_bitset = fd_ulong_pop_lsb( ctx->bank_idle_bitset );
664 0 : ctx->skip_cnt = (long)schedule_cnt * fd_long_if( ctx->use_consumed_cus, (long)bank_cnt/2L, 1L );
665 0 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now2 );
666 :
667 0 : memcpy( ctx->last_sched_metrics->all, (ulong const *)fd_metrics_tl, sizeof(ctx->last_sched_metrics->all) );
668 0 : ctx->last_sched_metrics->time = now2;
669 0 : }
670 0 : }
671 :
672 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, any_ready );
673 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, any_scheduled );
674 0 : now = fd_tickcount();
675 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, fd_pack_avail_txn_cnt( ctx->pack )>0 );
676 :
677 : #if FD_PACK_USE_EXTRA_STORAGE
678 : if( FD_UNLIKELY( !extra_txn_deq_empty( ctx->extra_txn_deq ) ) ) {
679 : /* Don't start pulling from the extra storage until the available
680 : transaction count drops below half. */
681 : ulong avail_space = (ulong)fd_long_max( 0L, (long)(ctx->max_pending_transactions>>1)-(long)fd_pack_avail_txn_cnt( ctx->pack ) );
682 : ulong qty_to_insert = fd_ulong_min( 10UL, fd_ulong_min( extra_txn_deq_cnt( ctx->extra_txn_deq ), avail_space ) );
683 : int any_successes = 0;
684 : for( ulong i=0UL; i<qty_to_insert; i++ ) any_successes |= (0<=insert_from_extra( ctx ));
685 : if( FD_LIKELY( any_successes ) ) ctx->last_successful_insert = now;
686 : }
687 : #endif
688 :
689 : /* Did we send the maximum allowed microblocks? Then end the slot. */
690 0 : if( FD_UNLIKELY( ctx->slot_microblock_cnt==ctx->slot_max_microblocks )) {
691 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_LEADER, 0 );
692 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_BANKS, 0 );
693 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_MICROBLOCKS, 0 );
694 : /* The pack object also does this accounting and increases this
695 : metric, but we end the slot early so won't see it unless we also
696 : increment it here. */
697 0 : FD_MCNT_INC( PACK, MICROBLOCK_PER_BLOCK_LIMIT, 1UL );
698 0 : log_end_block_metrics( ctx, now, "microblock" );
699 0 : ctx->drain_banks = 1;
700 0 : ctx->leader_slot = ULONG_MAX;
701 0 : ctx->slot_microblock_cnt = 0UL;
702 0 : fd_pack_end_block( ctx->pack );
703 0 : remove_ib( ctx );
704 :
705 0 : }
706 0 : }
707 :
708 :
709 : /* At this point, we have started receiving frag seq with details in
710 : mline at time now. Speculatively process it here. */
711 :
712 : static inline void
713 : during_frag( fd_pack_ctx_t * ctx,
714 : ulong in_idx,
715 : ulong seq FD_PARAM_UNUSED,
716 : ulong sig,
717 : ulong chunk,
718 : ulong sz,
719 0 : ulong ctl FD_PARAM_UNUSED ) {
720 :
721 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
722 :
723 0 : switch( ctx->in_kind[ in_idx ] ) {
724 0 : case IN_KIND_POH: {
725 : /* Not interested in stamped microblocks, only leader updates. */
726 0 : if( fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_BECAME_LEADER ) return;
727 :
728 : /* There was a leader transition. Handle it. */
729 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=sizeof(fd_became_leader_t) ) )
730 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
731 :
732 0 : long now_ticks = fd_tickcount();
733 0 : long now_ns = fd_log_wallclock();
734 :
735 0 : if( FD_UNLIKELY( ctx->leader_slot!=ULONG_MAX ) ) {
736 0 : FD_LOG_WARNING(( "switching to slot %lu while packing for slot %lu. Draining bank tiles.", fd_disco_poh_sig_slot( sig ), ctx->leader_slot ));
737 0 : log_end_block_metrics( ctx, now_ticks, "switch" );
738 0 : ctx->drain_banks = 1;
739 0 : ctx->leader_slot = ULONG_MAX;
740 0 : ctx->slot_microblock_cnt = 0UL;
741 0 : fd_pack_end_block( ctx->pack );
742 0 : remove_ib( ctx );
743 0 : }
744 0 : ctx->leader_slot = fd_disco_poh_sig_slot( sig );
745 :
746 0 : ulong exp_cnt = fd_pack_expire_before( ctx->pack, fd_ulong_max( ctx->leader_slot, TRANSACTION_LIFETIME_SLOTS )-TRANSACTION_LIFETIME_SLOTS );
747 0 : FD_MCNT_INC( PACK, TRANSACTION_EXPIRED, exp_cnt );
748 :
749 0 : fd_became_leader_t * became_leader = (fd_became_leader_t *)dcache_entry;
750 0 : ctx->leader_bank = became_leader->bank;
751 0 : ctx->slot_max_microblocks = became_leader->max_microblocks_in_slot;
752 : /* Reserve some space in the block for ticks */
753 0 : ctx->slot_max_data = (ctx->larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK)
754 0 : - 48UL*(became_leader->ticks_per_slot+became_leader->total_skipped_ticks);
755 : /* ticks_per_ns is probably relatively stable over 400ms, but not
756 : over several hours, so we need to compute the slot duration in
757 : milliseconds first and then convert to ticks. This doesn't need
758 : to be super accurate, but we don't want it to vary wildly. */
759 0 : long end_ticks = now_ticks + (long)((double)fd_long_max( became_leader->slot_end_ns - now_ns, 1L )*ctx->ticks_per_ns);
760 : /* We may still get overrun, but then we'll never use this and just
761 : reinitialize it the next time when we actually become leader. */
762 0 : fd_pack_pacing_init( ctx->pacer, now_ticks, end_ticks, (float)ctx->ticks_per_ns, ctx->slot_max_cost );
763 :
764 0 : if( FD_UNLIKELY( ctx->crank->enabled ) ) {
765 : /* If we get overrun, we'll just never use these values, but the
766 : old values aren't really useful either. */
767 0 : ctx->crank->epoch = became_leader->epoch;
768 0 : *(ctx->crank->prev_config) = *(became_leader->bundle->config);
769 0 : memcpy( ctx->crank->recent_blockhash, became_leader->bundle->last_blockhash, 32UL );
770 0 : memcpy( ctx->crank->tip_receiver_owner, became_leader->bundle->tip_receiver_owner, 32UL );
771 0 : }
772 :
773 0 : FD_LOG_INFO(( "pack_became_leader(slot=%lu,ends_at=%ld)", ctx->leader_slot, became_leader->slot_end_ns ));
774 :
775 : /* The dcache might get overrun, so set slot_end_ns to 0, so if it does
776 : the slot will get skipped. Then update it in the `after_frag` case
777 : below to the correct value. */
778 0 : ctx->slot_end_ns = 0L;
779 0 : ctx->_slot_end_ns = became_leader->slot_end_ns;
780 :
781 0 : update_metric_state( ctx, fd_tickcount(), FD_PACK_METRIC_STATE_LEADER, 1 );
782 0 : return;
783 0 : }
784 0 : case IN_KIND_BANK: {
785 0 : FD_TEST( ctx->use_consumed_cus );
786 : /* For a previous slot */
787 0 : if( FD_UNLIKELY( sig!=ctx->leader_slot ) ) return;
788 :
789 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz<FD_PACK_REBATE_MIN_SZ
790 0 : || sz>FD_PACK_REBATE_MAX_SZ ) )
791 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
792 :
793 0 : ctx->pending_rebate_sz = sz;
794 0 : fd_memcpy( ctx->rebate, dcache_entry, sz );
795 0 : return;
796 0 : }
797 0 : case IN_KIND_RESOLV: {
798 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_TPU_RESOLVED_MTU ) )
799 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
800 :
801 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)dcache_entry;
802 0 : ulong payload_sz = txnm->payload_sz;
803 0 : ulong txn_t_sz = txnm->txn_t_sz;
804 0 : FD_TEST( payload_sz<=FD_TPU_MTU );
805 0 : FD_TEST( txn_t_sz <=FD_TXN_MAX_SZ );
806 0 : fd_txn_t * txn = fd_txn_m_txn_t( txnm );
807 :
808 0 : ulong addr_table_sz = 32UL*txn->addr_table_adtl_cnt;
809 0 : FD_TEST( addr_table_sz<=32UL*FD_TXN_ACCT_ADDR_MAX );
810 :
811 0 : if( FD_UNLIKELY( (ctx->leader_slot==ULONG_MAX) & (sig>ctx->highest_observed_slot) ) ) {
812 : /* Using the resolv tile's knowledge of the current slot is a bit
813 : of a hack, since we don't get any info if there are no
814 : transactions and we're not leader. We're actually in exactly
815 : the case where that's okay though. The point of calling
816 : expire_before long before we become leader is so that we don't
817 : drop new but low-fee-paying transactions when pack is clogged
818 : with expired but high-fee-paying transactions. That can only
819 : happen if we are getting transactions. */
820 0 : ctx->highest_observed_slot = sig;
821 0 : ulong exp_cnt = fd_pack_expire_before( ctx->pack, fd_ulong_max( ctx->highest_observed_slot, TRANSACTION_LIFETIME_SLOTS )-TRANSACTION_LIFETIME_SLOTS );
822 0 : FD_MCNT_INC( PACK, TRANSACTION_EXPIRED, exp_cnt );
823 0 : }
824 :
825 :
826 0 : ulong bundle_id = txnm->block_engine.bundle_id;
827 0 : if( FD_UNLIKELY( bundle_id ) ) {
828 0 : ctx->is_bundle = 1;
829 0 : if( FD_LIKELY( bundle_id!=ctx->current_bundle->id ) ) {
830 0 : if( FD_UNLIKELY( ctx->current_bundle->bundle ) ) {
831 0 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_PARTIAL_BUNDLE, ctx->current_bundle->txn_received );
832 0 : fd_pack_insert_bundle_cancel( ctx->pack, ctx->current_bundle->bundle, ctx->current_bundle->txn_cnt );
833 0 : }
834 0 : ctx->current_bundle->id = bundle_id;
835 0 : ctx->current_bundle->txn_cnt = txnm->block_engine.bundle_txn_cnt;
836 0 : ctx->current_bundle->min_blockhash_slot = ULONG_MAX;
837 0 : ctx->current_bundle->txn_received = 0UL;
838 :
839 0 : if( FD_UNLIKELY( ctx->current_bundle->txn_cnt==0UL ) ) {
840 0 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_PARTIAL_BUNDLE, 1UL );
841 0 : ctx->current_bundle->id = 0UL;
842 0 : return;
843 0 : }
844 0 : ctx->blk_engine_cfg->commission = txnm->block_engine.commission;
845 0 : memcpy( ctx->blk_engine_cfg->commission_pubkey->b, txnm->block_engine.commission_pubkey, 32UL );
846 :
847 0 : ctx->current_bundle->bundle = fd_pack_insert_bundle_init( ctx->pack, ctx->current_bundle->_txn, ctx->current_bundle->txn_cnt );
848 0 : }
849 0 : ctx->cur_spot = ctx->current_bundle->bundle[ ctx->current_bundle->txn_received ];
850 0 : ctx->current_bundle->min_blockhash_slot = fd_ulong_min( ctx->current_bundle->min_blockhash_slot, sig );
851 0 : } else {
852 0 : ctx->is_bundle = 0;
853 : #if FD_PACK_USE_EXTRA_STORAGE
854 : if( FD_LIKELY( ctx->leader_slot!=ULONG_MAX || fd_pack_avail_txn_cnt( ctx->pack )<ctx->max_pending_transactions ) ) {
855 : ctx->cur_spot = fd_pack_insert_txn_init( ctx->pack );
856 : ctx->insert_to_extra = 0;
857 : } else {
858 : if( FD_UNLIKELY( extra_txn_deq_full( ctx->extra_txn_deq ) ) ) {
859 : extra_txn_deq_remove_head( ctx->extra_txn_deq );
860 : FD_MCNT_INC( PACK, TRANSACTION_DROPPED_FROM_EXTRA, 1UL );
861 : }
862 : ctx->cur_spot = extra_txn_deq_peek_tail( extra_txn_deq_insert_tail( ctx->extra_txn_deq ) );
863 : /* We want to store the current time in cur_spot so that we can
864 : track its expiration better. We just stash it in the CU
865 : fields, since those aren't important right now. */
866 : ctx->cur_spot->txnp->blockhash_slot = sig;
867 : ctx->insert_to_extra = 1;
868 : FD_MCNT_INC( PACK, TRANSACTION_INSERTED_TO_EXTRA, 1UL );
869 : }
870 : #else
871 0 : ctx->cur_spot = fd_pack_insert_txn_init( ctx->pack );
872 0 : #endif
873 0 : }
874 :
875 : /* We get transactions from the resolv tile.
876 : The transactions should have been parsed and verified. */
877 0 : FD_MCNT_INC( PACK, NORMAL_TRANSACTION_RECEIVED, 1UL );
878 :
879 :
880 0 : fd_memcpy( ctx->cur_spot->txnp->payload, fd_txn_m_payload( txnm ), payload_sz );
881 0 : fd_memcpy( TXN(ctx->cur_spot->txnp), txn, txn_t_sz );
882 0 : fd_memcpy( ctx->cur_spot->alt_accts, fd_txn_m_alut( txnm ), addr_table_sz );
883 0 : ctx->cur_spot->txnp->payload_sz = payload_sz;
884 :
885 0 : break;
886 0 : }
887 0 : }
888 0 : }
889 :
890 :
891 : /* After the transaction has been fully received, and we know we were
892 : not overrun while reading it, insert it into pack. */
893 :
894 : static inline void
895 : after_frag( fd_pack_ctx_t * ctx,
896 : ulong in_idx,
897 : ulong seq,
898 : ulong sig,
899 : ulong sz,
900 : ulong tsorig,
901 : ulong tspub,
902 0 : fd_stem_context_t * stem ) {
903 0 : (void)seq;
904 0 : (void)sz;
905 0 : (void)tsorig;
906 0 : (void)tspub;
907 0 : (void)stem;
908 :
909 0 : long now = fd_tickcount();
910 :
911 0 : switch( ctx->in_kind[ in_idx ] ) {
912 0 : case IN_KIND_POH: {
913 0 : if( fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_BECAME_LEADER ) return;
914 :
915 0 : ctx->slot_end_ns = ctx->_slot_end_ns;
916 0 : fd_pack_set_block_limits( ctx->pack, ctx->slot_max_microblocks, ctx->slot_max_data );
917 0 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now );
918 :
919 0 : break;
920 0 : }
921 0 : case IN_KIND_BANK: {
922 : /* For a previous slot */
923 0 : if( FD_UNLIKELY( sig!=ctx->leader_slot ) ) return;
924 :
925 0 : fd_pack_rebate_cus( ctx->pack, ctx->rebate->rebate );
926 0 : ctx->pending_rebate_sz = 0UL;
927 0 : fd_pack_pacing_update_consumed_cus( ctx->pacer, fd_pack_current_block_cost( ctx->pack ), now );
928 0 : break;
929 0 : }
930 0 : case IN_KIND_RESOLV: {
931 : /* Normal transaction case */
932 : #if FD_PACK_USE_EXTRA_STORAGE
933 : if( FD_LIKELY( !ctx->insert_to_extra ) ) {
934 : #else
935 0 : if( 1 ) {
936 0 : #endif
937 0 : if( FD_UNLIKELY( ctx->is_bundle ) ) {
938 0 : if( FD_UNLIKELY( ctx->current_bundle->txn_cnt==0UL ) ) return;
939 0 : if( FD_UNLIKELY( ++(ctx->current_bundle->txn_received)==ctx->current_bundle->txn_cnt ) ) {
940 0 : long insert_duration = -fd_tickcount();
941 0 : int result = fd_pack_insert_bundle_fini( ctx->pack, ctx->current_bundle->bundle, ctx->current_bundle->txn_cnt, ctx->current_bundle->min_blockhash_slot, 0, ctx->blk_engine_cfg );
942 0 : insert_duration += fd_tickcount();
943 0 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ] += ctx->current_bundle->txn_received;
944 0 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
945 0 : ctx->current_bundle->bundle = NULL;
946 0 : }
947 0 : } else {
948 0 : ulong blockhash_slot = sig;
949 0 : long insert_duration = -fd_tickcount();
950 0 : int result = fd_pack_insert_txn_fini( ctx->pack, ctx->cur_spot, blockhash_slot );
951 0 : insert_duration += fd_tickcount();
952 0 : ctx->insert_result[ result + FD_PACK_INSERT_RETVAL_OFF ]++;
953 0 : fd_histf_sample( ctx->insert_duration, (ulong)insert_duration );
954 0 : if( FD_LIKELY( result>=0 ) ) ctx->last_successful_insert = now;
955 0 : }
956 0 : }
957 :
958 0 : ctx->cur_spot = NULL;
959 0 : break;
960 0 : }
961 0 : }
962 :
963 0 : update_metric_state( ctx, now, FD_PACK_METRIC_STATE_TRANSACTIONS, fd_pack_avail_txn_cnt( ctx->pack )>0 );
964 0 : }
965 :
966 : static void
967 : privileged_init( fd_topo_t * topo,
968 0 : fd_topo_tile_t * tile ) {
969 0 : if( FD_LIKELY( !tile->pack.bundle.enabled ) ) return;
970 :
971 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
972 :
973 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
974 0 : fd_pack_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
975 :
976 0 : if( FD_UNLIKELY( !strcmp( tile->pack.bundle.identity_key_path, "" ) ) )
977 0 : FD_LOG_ERR(( "identity_key_path not set" ));
978 :
979 0 : const uchar * identity_key = fd_keyload_load( tile->pack.bundle.identity_key_path, /* pubkey only: */ 1 );
980 0 : fd_memcpy( ctx->crank->identity_pubkey->b, identity_key, 32UL );
981 :
982 0 : if( FD_UNLIKELY( !fd_base58_decode_32( tile->pack.bundle.vote_account_path, ctx->crank->vote_pubkey->b ) ) ) {
983 0 : const uchar * vote_key = fd_keyload_load( tile->pack.bundle.vote_account_path, /* pubkey only: */ 1 );
984 0 : fd_memcpy( ctx->crank->vote_pubkey->b, vote_key, 32UL );
985 0 : }
986 0 : }
987 :
988 : static void
989 : unprivileged_init( fd_topo_t * topo,
990 0 : fd_topo_tile_t * tile ) {
991 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
992 :
993 0 : fd_pack_limits_t limits[1] = {{
994 0 : .max_cost_per_block = tile->pack.larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : FD_PACK_MAX_COST_PER_BLOCK,
995 0 : .max_vote_cost_per_block = FD_PACK_MAX_VOTE_COST_PER_BLOCK,
996 0 : .max_write_cost_per_acct = FD_PACK_MAX_WRITE_COST_PER_ACCT,
997 0 : .max_data_bytes_per_block = tile->pack.larger_shred_limits_per_block ? LARGER_MAX_DATA_PER_BLOCK : FD_PACK_MAX_DATA_PER_BLOCK,
998 0 : .max_txn_per_microblock = EFFECTIVE_TXN_PER_MICROBLOCK,
999 0 : .max_microblocks_per_block = (ulong)UINT_MAX, /* Limit not known yet */
1000 0 : }};
1001 :
1002 0 : if( FD_UNLIKELY( tile->pack.max_pending_transactions >= USHORT_MAX-10UL ) ) FD_LOG_ERR(( "pack tile supports up to %lu pending transactions", USHORT_MAX-11UL ));
1003 :
1004 0 : ulong pack_footprint = fd_pack_footprint( tile->pack.max_pending_transactions, BUNDLE_META_SZ, tile->pack.bank_tile_count, limits );
1005 :
1006 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1007 0 : fd_pack_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_pack_ctx_t ), sizeof( fd_pack_ctx_t ) );
1008 0 : fd_rng_t * rng = fd_rng_join( fd_rng_new( FD_SCRATCH_ALLOC_APPEND( l, fd_rng_align(), fd_rng_footprint() ), 0U, 0UL ) );
1009 0 : if( FD_UNLIKELY( !rng ) ) FD_LOG_ERR(( "fd_rng_new failed" ));
1010 :
1011 0 : ctx->pack = fd_pack_join( fd_pack_new( FD_SCRATCH_ALLOC_APPEND( l, fd_pack_align(), pack_footprint ),
1012 0 : tile->pack.max_pending_transactions, BUNDLE_META_SZ, tile->pack.bank_tile_count,
1013 0 : limits, rng ) );
1014 0 : if( FD_UNLIKELY( !ctx->pack ) ) FD_LOG_ERR(( "fd_pack_new failed" ));
1015 :
1016 0 : if( FD_UNLIKELY( tile->in_cnt>32UL ) ) FD_LOG_ERR(( "Too many input links (%lu>32) to pack tile", tile->in_cnt ));
1017 :
1018 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1019 0 : fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
1020 :
1021 0 : if( FD_LIKELY( !strcmp( link->name, "resolv_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
1022 0 : else if( FD_LIKELY( !strcmp( link->name, "dedup_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
1023 0 : else if( FD_LIKELY( !strcmp( link->name, "poh_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH;
1024 0 : else if( FD_LIKELY( !strcmp( link->name, "bank_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_BANK;
1025 0 : else if( FD_LIKELY( !strcmp( link->name, "sign_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_SIGN;
1026 0 : else FD_LOG_ERR(( "pack tile has unexpected input link %lu %s", i, link->name ));
1027 0 : }
1028 :
1029 0 : ulong bank_cnt = 0UL;
1030 0 : for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
1031 0 : fd_topo_tile_t const * consumer_tile = &topo->tiles[ i ];
1032 0 : if( FD_UNLIKELY( strcmp( consumer_tile->name, "bank" ) && strcmp( consumer_tile->name, "replay" ) ) ) continue;
1033 0 : for( ulong j=0UL; j<consumer_tile->in_cnt; j++ ) {
1034 0 : if( FD_UNLIKELY( consumer_tile->in_link_id[ j ]==tile->out_link_id[ 0 ] ) ) bank_cnt++;
1035 0 : }
1036 0 : }
1037 :
1038 0 : if( FD_UNLIKELY( !bank_cnt ) ) FD_LOG_ERR(( "pack tile connects to no banking tiles" ));
1039 0 : if( FD_UNLIKELY( bank_cnt>FD_PACK_MAX_BANK_TILES ) ) FD_LOG_ERR(( "pack tile connects to too many banking tiles" ));
1040 0 : if( FD_UNLIKELY( bank_cnt!=tile->pack.bank_tile_count ) ) FD_LOG_ERR(( "pack tile connects to %lu banking tiles, but tile->pack.bank_tile_count is %lu", bank_cnt, tile->pack.bank_tile_count ));
1041 :
1042 :
1043 0 : ctx->crank->enabled = tile->pack.bundle.enabled;
1044 0 : if( FD_UNLIKELY( tile->pack.bundle.enabled ) ) {
1045 0 : if( FD_UNLIKELY( !fd_bundle_crank_gen_init( ctx->crank->gen, (fd_acct_addr_t const *)tile->pack.bundle.tip_distribution_program_addr,
1046 0 : (fd_acct_addr_t const *)tile->pack.bundle.tip_payment_program_addr,
1047 0 : (fd_acct_addr_t const *)ctx->crank->vote_pubkey->b,
1048 0 : (fd_acct_addr_t const *)tile->pack.bundle.tip_distribution_authority, tile->pack.bundle.commission_bps ) ) ) {
1049 0 : FD_LOG_ERR(( "constructing bundle generator failed" ));
1050 0 : }
1051 :
1052 0 : ulong sign_in_idx = fd_topo_find_tile_in_link ( topo, tile, "sign_pack", tile->kind_id );
1053 0 : ulong sign_out_idx = fd_topo_find_tile_out_link( topo, tile, "pack_sign", tile->kind_id );
1054 0 : FD_TEST( sign_in_idx!=ULONG_MAX );
1055 0 : fd_topo_link_t * sign_in = &topo->links[ tile->in_link_id[ sign_in_idx ] ];
1056 0 : fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ sign_out_idx ] ];
1057 0 : if( FD_UNLIKELY( !fd_keyguard_client_join( fd_keyguard_client_new( ctx->crank->keyguard_client,
1058 0 : sign_out->mcache,
1059 0 : sign_out->dcache,
1060 0 : sign_in->mcache,
1061 0 : sign_in->dcache ) ) ) ) {
1062 0 : FD_LOG_ERR(( "failed to construct keyguard" ));
1063 0 : }
1064 : /* Initialize enough of the prev config that it produces a
1065 : transaction */
1066 0 : ctx->crank->prev_config->discriminator = 0x82ccfa1ee0aa0c9bUL;
1067 0 : ctx->crank->prev_config->tip_receiver->b[1] = 1;
1068 0 : ctx->crank->prev_config->block_builder->b[2] = 1;
1069 :
1070 0 : memset( ctx->crank->tip_receiver_owner, '\0', 32UL );
1071 0 : memset( ctx->crank->recent_blockhash, '\0', 32UL );
1072 0 : memset( ctx->crank->last_sig, '\0', 64UL );
1073 0 : ctx->crank->ib_inserted = 0;
1074 0 : ctx->crank->epoch = 0UL;
1075 0 : ctx->crank->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->keyswitch_obj_id ) );
1076 0 : FD_TEST( ctx->crank->keyswitch );
1077 0 : } else {
1078 0 : memset( ctx->crank, '\0', sizeof(ctx->crank) );
1079 0 : }
1080 :
1081 :
1082 : #if FD_PACK_USE_EXTRA_STORAGE
1083 : ctx->extra_txn_deq = extra_txn_deq_join( extra_txn_deq_new( FD_SCRATCH_ALLOC_APPEND( l, extra_txn_deq_align(),
1084 : extra_txn_deq_footprint() ) ) );
1085 : #endif
1086 :
1087 0 : ctx->cur_spot = NULL;
1088 0 : ctx->is_bundle = 0;
1089 0 : ctx->max_pending_transactions = tile->pack.max_pending_transactions;
1090 0 : ctx->leader_slot = ULONG_MAX;
1091 0 : ctx->leader_bank = NULL;
1092 0 : ctx->slot_microblock_cnt = 0UL;
1093 0 : ctx->slot_max_microblocks = 0UL;
1094 0 : ctx->slot_max_data = 0UL;
1095 0 : ctx->larger_shred_limits_per_block = tile->pack.larger_shred_limits_per_block;
1096 0 : ctx->slot_max_cost = limits->max_cost_per_block;
1097 0 : ctx->drain_banks = 0;
1098 0 : ctx->approx_wallclock_ns = fd_log_wallclock();
1099 0 : ctx->rng = rng;
1100 0 : ctx->ticks_per_ns = fd_tempo_tick_per_ns( NULL );
1101 0 : ctx->last_successful_insert = 0L;
1102 0 : ctx->highest_observed_slot = 0UL;
1103 0 : ctx->microblock_duration_ticks = (ulong)(fd_tempo_tick_per_ns( NULL )*(double)MICROBLOCK_DURATION_NS + 0.5);
1104 : #if FD_PACK_USE_EXTRA_STORAGE
1105 : ctx->insert_to_extra = 0;
1106 : #endif
1107 0 : ctx->use_consumed_cus = tile->pack.use_consumed_cus;
1108 0 : ctx->crank->enabled = tile->pack.bundle.enabled;
1109 :
1110 0 : ctx->wait_duration_ticks[ 0 ] = ULONG_MAX;
1111 0 : for( ulong i=1UL; i<MAX_TXN_PER_MICROBLOCK+1UL; i++ ) {
1112 0 : ctx->wait_duration_ticks[ i ]=(ulong)(fd_tempo_tick_per_ns( NULL )*(double)wait_duration[ i ] + 0.5);
1113 0 : }
1114 :
1115 :
1116 0 : ctx->bank_cnt = tile->pack.bank_tile_count;
1117 0 : ctx->poll_cursor = 0;
1118 0 : ctx->skip_cnt = 0L;
1119 0 : ctx->bank_idle_bitset = fd_ulong_mask_lsb( (int)tile->pack.bank_tile_count );
1120 0 : for( ulong i=0UL; i<tile->pack.bank_tile_count; i++ ) {
1121 0 : ulong busy_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "bank_busy.%lu", i );
1122 0 : FD_TEST( busy_obj_id!=ULONG_MAX );
1123 0 : ctx->bank_current[ i ] = fd_fseq_join( fd_topo_obj_laddr( topo, busy_obj_id ) );
1124 0 : ctx->bank_expect[ i ] = ULONG_MAX;
1125 0 : if( FD_UNLIKELY( !ctx->bank_current[ i ] ) ) FD_LOG_ERR(( "banking tile %lu has no busy flag", i ));
1126 0 : ctx->bank_ready_at[ i ] = 0L;
1127 0 : FD_TEST( ULONG_MAX==fd_fseq_query( ctx->bank_current[ i ] ) );
1128 0 : }
1129 :
1130 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1131 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
1132 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
1133 :
1134 0 : ctx->in[ i ].mem = link_wksp->wksp;
1135 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
1136 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
1137 0 : }
1138 :
1139 0 : ctx->out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
1140 0 : ctx->out_chunk0 = fd_dcache_compact_chunk0( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
1141 0 : ctx->out_wmark = fd_dcache_compact_wmark ( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
1142 0 : ctx->out_chunk = ctx->out_chunk0;
1143 :
1144 : /* Initialize metrics storage */
1145 0 : memset( ctx->insert_result, '\0', FD_PACK_INSERT_RETVAL_CNT * sizeof(ulong) );
1146 0 : fd_histf_join( fd_histf_new( ctx->schedule_duration, FD_MHIST_SECONDS_MIN( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS ),
1147 0 : FD_MHIST_SECONDS_MAX( PACK, SCHEDULE_MICROBLOCK_DURATION_SECONDS ) ) );
1148 0 : fd_histf_join( fd_histf_new( ctx->no_sched_duration, FD_MHIST_SECONDS_MIN( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS ),
1149 0 : FD_MHIST_SECONDS_MAX( PACK, NO_SCHED_MICROBLOCK_DURATION_SECONDS ) ) );
1150 0 : fd_histf_join( fd_histf_new( ctx->insert_duration, FD_MHIST_SECONDS_MIN( PACK, INSERT_TRANSACTION_DURATION_SECONDS ),
1151 0 : FD_MHIST_SECONDS_MAX( PACK, INSERT_TRANSACTION_DURATION_SECONDS ) ) );
1152 0 : fd_histf_join( fd_histf_new( ctx->complete_duration, FD_MHIST_SECONDS_MIN( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS ),
1153 0 : FD_MHIST_SECONDS_MAX( PACK, COMPLETE_MICROBLOCK_DURATION_SECONDS ) ) );
1154 0 : ctx->metric_state = 0;
1155 0 : ctx->metric_state_begin = fd_tickcount();
1156 0 : memset( ctx->metric_timing, '\0', 16*sizeof(long) );
1157 0 : memset( ctx->current_bundle, '\0', sizeof(ctx->current_bundle) );
1158 0 : memset( ctx->blk_engine_cfg, '\0', sizeof(ctx->blk_engine_cfg) );
1159 0 : memset( ctx->last_sched_metrics, '\0', sizeof(ctx->last_sched_metrics) );
1160 :
1161 0 : FD_LOG_INFO(( "packing microblocks of at most %lu transactions to %lu bank tiles", EFFECTIVE_TXN_PER_MICROBLOCK, tile->pack.bank_tile_count ));
1162 :
1163 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
1164 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
1165 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
1166 :
1167 0 : }
1168 :
1169 : static ulong
1170 : populate_allowed_seccomp( fd_topo_t const * topo,
1171 : fd_topo_tile_t const * tile,
1172 : ulong out_cnt,
1173 0 : struct sock_filter * out ) {
1174 0 : (void)topo;
1175 0 : (void)tile;
1176 :
1177 0 : populate_sock_filter_policy_fd_pack_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
1178 0 : return sock_filter_policy_fd_pack_tile_instr_cnt;
1179 0 : }
1180 :
1181 : static ulong
1182 : populate_allowed_fds( fd_topo_t const * topo,
1183 : fd_topo_tile_t const * tile,
1184 : ulong out_fds_cnt,
1185 0 : int * out_fds ) {
1186 0 : (void)topo;
1187 0 : (void)tile;
1188 :
1189 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
1190 :
1191 0 : ulong out_cnt = 0UL;
1192 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
1193 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
1194 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
1195 0 : return out_cnt;
1196 0 : }
1197 :
1198 0 : #define STEM_BURST (1UL)
1199 :
1200 : /* We want lazy (measured in ns) to be small enough that the producer
1201 : and the consumer never have to wait for credits. For most tango
1202 : links, we use a default worst case speed coming from 100 Gbps
1203 : Ethernet. That's not very suitable for microblocks that go from
1204 : pack to bank. Instead we manually estimate the very aggressive
1205 : 1000ns per microblock, and then reduce it further (in line with the
1206 : default lazy value computation) to ensure the random value chosen
1207 : based on this won't lead to credit return stalls. */
1208 0 : #define STEM_LAZY (128L*3000L)
1209 :
1210 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_pack_ctx_t
1211 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_pack_ctx_t)
1212 :
1213 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
1214 0 : #define STEM_CALLBACK_BEFORE_CREDIT before_credit
1215 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
1216 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
1217 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
1218 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
1219 :
1220 : #include "../stem/fd_stem.c"
1221 :
1222 : fd_topo_run_tile_t fd_tile_pack = {
1223 : .name = "pack",
1224 : .populate_allowed_seccomp = populate_allowed_seccomp,
1225 : .populate_allowed_fds = populate_allowed_fds,
1226 : .scratch_align = scratch_align,
1227 : .scratch_footprint = scratch_footprint,
1228 : .privileged_init = privileged_init,
1229 : .unprivileged_init = unprivileged_init,
1230 : .run = stem_run,
1231 : };
|