Line data Source code
1 : #define FD_UNALIGNED_ACCESS_STYLE 0
2 : #include "fd_pack.h"
3 : #include "fd_pack_cost.h"
4 : #include "fd_pack_bitset.h"
5 : #include "fd_pack_unwritable.h"
6 : #include "fd_chkdup.h"
7 : #include "fd_pack_tip_prog_blacklist.h"
8 : #include <math.h> /* for sqrt */
9 : #include <stddef.h> /* for offsetof */
10 : #include "../metrics/fd_metrics.h"
11 :
12 : #define FD_PACK_USE_NON_TEMPORAL_MEMCPY 1
13 :
14 : /* Declare a bunch of helper structs used for pack-internal data
15 : structures. */
16 : typedef struct {
17 : fd_ed25519_sig_t sig;
18 : } wrapped_sig_t;
19 :
20 : /* fd_pack_ord_txn_t: An fd_txn_p_t with information required to order
21 : it by priority. */
22 : struct fd_pack_private_ord_txn {
23 : /* It's important that there be no padding here (asserted below)
24 : because the code casts back and forth from pointers to this element
25 : to pointers to the whole struct. */
26 : union {
27 : fd_txn_p_t txn[1]; /* txn is an alias for txn_e->txnp */
28 : fd_txn_e_t txn_e[1];
29 : fd_txn_e_t _txn_e; /* Non-array type needed for map_chain */
30 : struct{ uchar _sig_cnt; wrapped_sig_t sig; };
31 : };
32 :
33 : /* Since this struct can be in one of several trees, it's helpful to
34 : store which tree. This should be one of the FD_ORD_TXN_ROOT_*
35 : values. */
36 : int root;
37 :
38 : /* The sig2txn map_chain fields */
39 : ushort sigmap_next;
40 : ushort sigmap_prev;
41 :
42 : /* Each transaction is inserted with an expiration "time." This code
43 : doesn't care about the units (blocks, rdtsc tick, ns, etc.), and
44 : doesn't require transactions to be inserted in expiration date
45 : order. */
46 : ulong expires_at;
47 : /* expq_idx: When this object is part of one of the treaps, it's
48 : also in the expiration priority queue. This field (which is
49 : manipulated behind the scenes by the fd_prq code) stores where so
50 : that if we delete this transaction, we can also delete it from the
51 : expiration priority queue. */
52 : ulong expq_idx;
53 :
54 : /* The noncemap map_chain fields */
55 : ushort noncemap_next;
56 : ushort noncemap_prev;
57 :
58 : /* We want rewards*compute_est to fit in a ulong so that r1/c1 < r2/c2 can be
59 : computed as r1*c2 < r2*c1, with the product fitting in a ulong.
60 : compute_est has a small natural limit of mid-20 bits. rewards doesn't have
61 : a natural limit, so there is some argument to be made for raising the
62 : limit for rewards to 40ish bits. The struct has better packing with
63 : uint/uint though. */
64 : uint __attribute__((aligned(64))) /* We want the treap fields and the bitsets
65 : to be on the same double cache line pair */
66 : rewards; /* in Lamports */
67 : uint compute_est; /* in compute units */
68 :
69 : /* The treap fields */
70 : ushort left;
71 : ushort right;
72 : ushort parent;
73 : ushort prio;
74 : ushort prev;
75 : ushort next;
76 :
77 : /* skip: if we skip this transaction more than FD_PACK_SKIP_CNT times
78 : for reasons that won't go away until the end of the block, then we
79 : want to skip it very quickly. If skip is in [1, FD_PACK_SKIP_CNT],
80 : then that means we have to skip it `skip` more times before taking
81 : any action. If skip>FD_PACK_SKIP_CNT, then it is a compressed slot
82 : number during which it should be skipped, and we'll skip it until
83 : the compressed slot reaches a new value. skip is never 0. */
84 : ushort skip;
85 :
86 : FD_PACK_BITSET_DECLARE( rw_bitset ); /* all accts this txn references */
87 : FD_PACK_BITSET_DECLARE( w_bitset ); /* accts this txn write-locks */
88 :
89 : };
90 : typedef struct fd_pack_private_ord_txn fd_pack_ord_txn_t;
91 :
92 : /* What we want is that the payload starts at byte 0 of
93 : fd_pack_ord_txn_t so that the trick with the signature map works
94 : properly. GCC and Clang seem to disagree on the rules of offsetof.
95 : */
96 : FD_STATIC_ASSERT( offsetof( fd_pack_ord_txn_t, txn )==0UL, fd_pack_ord_txn_t );
97 : FD_STATIC_ASSERT( offsetof( fd_pack_ord_txn_t, sig )==1UL, fd_pack_ord_txn_t );
98 : #if FD_USING_CLANG
99 : FD_STATIC_ASSERT( offsetof( fd_txn_p_t, payload )==0UL, fd_pack_ord_txn_t );
100 : #else
101 : FD_STATIC_ASSERT( offsetof( fd_pack_ord_txn_t, txn->payload )==0UL, fd_pack_ord_txn_t );
102 : FD_STATIC_ASSERT( offsetof( fd_pack_ord_txn_t, txn_e->txnp )==0UL, fd_pack_ord_txn_t );
103 : #endif
104 :
105 : /* FD_ORD_TXN_ROOT is essentially a small union packed into an int. The low
106 : byte is the "tag". The higher 3 bytes depend on the low byte. */
107 4452279 : #define FD_ORD_TXN_ROOT_TAG_MASK 0xFF
108 19658097 : #define FD_ORD_TXN_ROOT_FREE 0
109 18006464 : #define FD_ORD_TXN_ROOT_PENDING 1
110 13276434 : #define FD_ORD_TXN_ROOT_PENDING_VOTE 2
111 1065 : #define FD_ORD_TXN_ROOT_PENDING_BUNDLE 3
112 328987 : #define FD_ORD_TXN_ROOT_PENALTY( idx ) (4 | (idx)<<8)
113 :
114 : /* if root & TAG_MASK == PENALTY, then PENALTY_ACCT_IDX(root) gives the index
115 : in the transaction's list of account addresses of which penalty treap the
116 : transaction is in. */
117 : #define FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( root ) (((root) & 0xFF00)>>8)
118 :
119 28433904 : #define FD_PACK_IN_USE_WRITABLE (0x8000000000000000UL)
120 15386217 : #define FD_PACK_IN_USE_BIT_CLEARED (0x4000000000000000UL)
121 :
122 : /* Each non-empty microblock we schedule also has an overhead of 48
123 : bytes that counts towards shed limits. That comes from the 32 byte
124 : hash, the hash count (8 bytes) and the transaction count (8 bytes).
125 : We don't have to pay this overhead if the microblock is empty, since
126 : those microblocks get dropped. */
127 1505856 : #define MICROBLOCK_DATA_OVERHEAD 48UL
128 :
129 : /* Keep track of accounts that are written to in each block so that we
130 : can reset the writer costs to 0. If the number of accounts that are
131 : written to is above or equal to this, we'll just clear the whole
132 : writer cost map instead of only removing the elements we increased. */
133 1368 : #define DEFAULT_WRITTEN_LIST_MAX 16384UL
134 :
135 : FD_STATIC_ASSERT( sizeof(fd_acct_addr_t)==sizeof(fd_pubkey_t), "" );
136 :
137 : /* fd_pack_expq_t: An element of an fd_prq to sort the transactions by
138 : timeout. This structure has several invariants for entries
139 : corresponding to pending transactions:
140 : expires_at == txn->expires_at
141 : txn->exp_prq_idx is the index of this structure
142 : Notice that prq is an array-based heap, which means the indexes of
143 : elements change. The PRQ_TMP_ST macro is hijacked to keep that
144 : invariant up to date.
145 :
146 : Note: this could be easier if fd_heap supported deleting from the
147 : middle, but that's not possible with the current design of fd_heap,
148 : which omits a parent pointer for improved performance. */
149 : struct fd_pack_expq {
150 : ulong expires_at;
151 : fd_pack_ord_txn_t * txn;
152 : };
153 : typedef struct fd_pack_expq fd_pack_expq_t;
154 :
155 :
156 : /* fd_pack_bitset_acct_mapping_t: An element of an fd_map_dynamic that
157 : maps an account address to the number of transactions that are
158 : referencing it and the bit that is reserved to indicate it in the
159 : bitset, if any. */
160 : struct fd_pack_bitset_acct_mapping {
161 : fd_acct_addr_t key; /* account address */
162 : ulong ref_cnt;
163 :
164 : /* first_instance and first_instance_was_write are only valid when
165 : bit==FD_PACK_BITSET_FIRST_INSTANCE, which is set when ref_cnt
166 : transitions from 0 to 1. These just exist to implement the
167 : optimization that accounts referenced a single time aren't
168 : allocated a bit, but this seems to be an important optimization. */
169 : fd_pack_ord_txn_t * first_instance;
170 : int first_instance_was_write;
171 :
172 : /* bit is in [0, FD_PACK_BITSET_MAX) U
173 : { FD_PACK_BITSET_FIRST_INSTANCE, FD_PACK_BITSET_SLOWPATH }. */
174 : ushort bit;
175 : };
176 : typedef struct fd_pack_bitset_acct_mapping fd_pack_bitset_acct_mapping_t;
177 :
178 :
179 :
180 : /* pack maintains a small state machine related to initializer bundles.
181 : See the header file for more details about it, but it's
182 : also summarized here:
183 : * NOT_INITIALIZED: The starting state for each block
184 : * PENDING: an initializer bundle has been scheduled, but pack has
185 : not observed its result yet, so we don't know if it was successful
186 : or not.
187 : * FAILED: the most recently scheduled initializer bundle failed
188 : for reasons other than already being executed. Most commonly, this
189 : could be because of a bug in the code that generated the
190 : initializer bundle, a lack of fee payer balance, or an expired
191 : blockhash.
192 : * READY: the most recently scheduled initialization bundle succeeded
193 : and normal bundles can be scheduled in this slot. */
194 2646 : #define FD_PACK_IB_STATE_NOT_INITIALIZED 0
195 0 : #define FD_PACK_IB_STATE_PENDING 1
196 0 : #define FD_PACK_IB_STATE_FAILED 2
197 3 : #define FD_PACK_IB_STATE_READY 3
198 :
199 :
200 : /* Returns 1 if x.rewards/x.compute < y.rewards/y.compute. Not robust. */
201 84572152 : #define COMPARE_WORSE(x,y) ( ((ulong)((x)->rewards)*(ulong)((y)->compute_est)) < ((ulong)((y)->rewards)*(ulong)((x)->compute_est)) )
202 :
203 : /* Declare all the data structures */
204 :
205 :
206 : /* Define the big max-"heap" that we pull transactions off to schedule.
207 : The priority is given by reward/compute. We may want to add in some
208 : additional terms at a later point. In order to cheaply remove nodes,
209 : we actually use a treap. */
210 : #define POOL_NAME trp_pool
211 1584 : #define POOL_T fd_pack_ord_txn_t
212 : #define POOL_IDX_T ushort
213 29596146 : #define POOL_NEXT parent
214 : #include "../../util/tmpl/fd_pool.c"
215 :
216 : #define TREAP_T fd_pack_ord_txn_t
217 : #define TREAP_NAME treap
218 : #define TREAP_QUERY_T void * /* We don't use query ... */
219 : #define TREAP_CMP(a,b) (__extension__({ (void)(a); (void)(b); -1; })) /* which means we don't need to give a real
220 : implementation to cmp either */
221 180627231 : #define TREAP_IDX_T ushort
222 : #define TREAP_OPTIMIZE_ITERATION 1
223 84572152 : #define TREAP_LT COMPARE_WORSE
224 : #include "../../util/tmpl/fd_treap.c"
225 :
226 :
227 : #define MAP_NAME sig2txn
228 : #define MAP_OPTIMIZE_RANDOM_ACCESS_REMOVAL 1
229 : #define MAP_MULTI 1
230 13581621 : #define MAP_ELE_T fd_pack_ord_txn_t
231 36577778 : #define MAP_PREV sigmap_prev
232 35013276 : #define MAP_NEXT sigmap_next
233 13584988 : #define MAP_IDX_T ushort
234 : #define MAP_KEY_T wrapped_sig_t
235 26652889 : #define MAP_KEY sig
236 1088 : #define MAP_KEY_EQ(k0,k1) (!memcmp( (k0),(k1), FD_TXN_SIGNATURE_SZ) )
237 26653963 : #define MAP_KEY_HASH(key,seed) fd_hash( (seed), (key), 64UL )
238 : #include "../../util/tmpl/fd_map_chain.c"
239 :
240 :
241 : /* noncemap: A map from (nonce account, nonce authority, recent
242 : blockhash) to a durable nonce transaction containing it. We only
243 : want to allow one transaction in the pool at a time with a given
244 : (nonce account, recent blockhash) tuple value. The question is: can
245 : adding this limitation cause us to throw out potentially valuable
246 : transaction? The answer is yes, but only very rarely, and the
247 : savings are worth it. Suppose we have durable nonce transactions t1
248 : and t2 that advance the same nonce account and have the same value
249 : for the recent blockhash.
250 :
251 : - If t1 lands on chain, then it will advance the nonce account, and
252 : t2 will certainly not land on chain.
253 : - If t1 fails with AlreadyExecuted, that means the nonce account was
254 : advanced when t1 landed in a previous block, so t2 will certainly not
255 : land on chain.
256 : - If t1 fails with BlockhashNotFound, then the nonce account was
257 : advanced in some previous transaction, so again, t2 will certainly
258 : not land on chain.
259 : - If t1 does not land on chain because of an issue with the fee
260 : payer, it's possible that t2 could land on chain if it used a
261 : different fee payer, but historical data shows this is unlikely.
262 : - If t1 does not land on chain because it is part of a bundle that
263 : fails for an unrelated reason, it's possible that t2 could land on
264 : chain, but again, historical data says this is rare.
265 :
266 : We need to include the nonce authority in the hash to prevent one
267 : user from being able to DoS another user. */
268 :
269 : typedef struct {
270 : uchar const * recent_blockhash;
271 : fd_acct_addr_t const * nonce_acct;
272 : fd_acct_addr_t const * nonce_auth;
273 : } noncemap_extract_t;
274 :
275 : /* k must be a valid, durable nonce transaction. No error checking is
276 : done. */
277 : static inline void
278 : noncemap_extract( fd_txn_e_t const * k,
279 3378 : noncemap_extract_t * out ) {
280 3378 : fd_txn_t const * txn = TXN(k->txnp);
281 3378 : out->recent_blockhash = fd_txn_get_recent_blockhash( txn, k->txnp->payload );
282 :
283 3378 : ulong nonce_idx = k->txnp->payload[ txn->instr[ 0 ].acct_off+0 ];
284 3378 : ulong autho_idx = k->txnp->payload[ txn->instr[ 0 ].acct_off+2 ];
285 :
286 3378 : ulong imm_cnt = fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
287 3378 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, k->txnp->payload );
288 3378 : fd_acct_addr_t const * alt_adj = k->alt_accts - imm_cnt;
289 3378 : out->nonce_acct = fd_ptr_if( nonce_idx<imm_cnt, accts, alt_adj )+nonce_idx;
290 : /* The nonce authority must be a signer, so it must be an immediate
291 : account. */
292 3378 : out->nonce_auth = accts+autho_idx;
293 3378 : }
294 :
295 : static inline int
296 : noncemap_key_eq_internal( fd_txn_e_t const * k0,
297 171 : fd_txn_e_t const * k1 ) {
298 171 : noncemap_extract_t e0[1], e1[1];
299 171 : noncemap_extract( k0, e0 );
300 171 : noncemap_extract( k1, e1 );
301 :
302 171 : if( FD_UNLIKELY( memcmp( e0->recent_blockhash, e1->recent_blockhash, 32UL ) ) ) return 0;
303 63 : if( FD_UNLIKELY( memcmp( e0->nonce_acct, e1->nonce_acct, 32UL ) ) ) return 0;
304 63 : if( FD_UNLIKELY( memcmp( e0->nonce_auth, e1->nonce_auth, 32UL ) ) ) return 0;
305 63 : return 1;
306 63 : }
307 :
308 : static inline ulong
309 : noncemap_key_hash_internal( ulong seed,
310 3036 : fd_txn_e_t const * k ) {
311 : /* TODO: This takes >100 cycles! */
312 3036 : noncemap_extract_t e[1];
313 3036 : noncemap_extract( k, e );
314 3036 : return fd_hash( seed, e->recent_blockhash, 32UL ) ^
315 3036 : fd_hash( seed+ 864394383UL, e->nonce_acct, 32UL ) ^
316 3036 : fd_hash( seed+3818662446UL, e->nonce_auth, 32UL );
317 3036 : }
318 :
319 : #define MAP_NAME noncemap
320 : #define MAP_OPTIMIZE_RANDOM_ACCESS_REMOVAL 1
321 : #define MAP_MULTI 0
322 375 : #define MAP_ELE_T fd_pack_ord_txn_t
323 558 : #define MAP_PREV noncemap_prev
324 1073 : #define MAP_NEXT noncemap_next
325 4191 : #define MAP_IDX_T ushort
326 : #define MAP_KEY_T fd_txn_e_t
327 756 : #define MAP_KEY _txn_e
328 171 : #define MAP_KEY_EQ(k0,k1) noncemap_key_eq_internal( (k0), (k1) )
329 3036 : #define MAP_KEY_HASH(key,seed) noncemap_key_hash_internal( (seed), (key) )
330 : #include "../../util/tmpl/fd_map_chain.c"
331 :
332 :
333 : static const fd_acct_addr_t null_addr = { 0 };
334 :
335 : #define MAP_NAME acct_uses
336 94457147 : #define MAP_T fd_pack_addr_use_t
337 111453182 : #define MAP_KEY_T fd_acct_addr_t
338 331770398 : #define MAP_KEY_NULL null_addr
339 : #if FD_HAS_AVX
340 111453182 : # define MAP_KEY_INVAL(k) _mm256_testz_si256( wb_ldu( (k).b ), wb_ldu( (k).b ) )
341 : #else
342 : # define MAP_KEY_INVAL(k) MAP_KEY_EQUAL(k, null_addr)
343 : #endif
344 77439912 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp((k0).b,(k1).b, FD_TXN_ACCT_ADDR_SZ))
345 : #define MAP_KEY_EQUAL_IS_SLOW 1
346 : #define MAP_MEMOIZE 0
347 94463413 : #define MAP_KEY_HASH(key,s) ((uint)fd_ulong_hash( fd_ulong_load_8( (key).b ) ))
348 : #include "../../util/tmpl/fd_map_dynamic.c"
349 :
350 :
351 : #define MAP_NAME bitset_map
352 52410203 : #define MAP_T fd_pack_bitset_acct_mapping_t
353 65749335 : #define MAP_KEY_T fd_acct_addr_t
354 873983672 : #define MAP_KEY_NULL null_addr
355 : #if FD_HAS_AVX
356 1655030103 : # define MAP_KEY_INVAL(k) _mm256_testz_si256( wb_ldu( (k).b ), wb_ldu( (k).b ) )
357 : #else
358 : # define MAP_KEY_INVAL(k) MAP_KEY_EQUAL(k, null_addr)
359 : #endif
360 39107958 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp((k0).b,(k1).b, FD_TXN_ACCT_ADDR_SZ))
361 : #define MAP_KEY_EQUAL_IS_SLOW 1
362 : #define MAP_MEMOIZE 0
363 52437313 : #define MAP_KEY_HASH(key,s) ((uint)fd_ulong_hash( fd_ulong_load_8( (key).b ) ))
364 : #include "../../util/tmpl/fd_map_dynamic.c"
365 :
366 :
367 : /* Since transactions can also expire, we also maintain a parallel
368 : priority queue. This means elements are simultaneously part of the
369 : treap (ordered by priority) and the expiration queue (ordered by
370 : expiration). It's tempting to use the priority field of the treap
371 : for this purpose, but that can result in degenerate treaps in some
372 : cases. */
373 : #define PRQ_NAME expq
374 32826046 : #define PRQ_T fd_pack_expq_t
375 27147834 : #define PRQ_TIMEOUT_T ulong
376 27147834 : #define PRQ_TIMEOUT expires_at
377 15889862 : #define PRQ_TMP_ST(p,t) do { \
378 15889862 : (p)[0] = (t); \
379 15889862 : t.txn->expq_idx = (ulong)((p)-heap); \
380 15889862 : } while( 0 )
381 : #include "../../util/tmpl/fd_prq.c"
382 :
383 : /* With realistic traffic patterns, we often see many, many transactions
384 : competing for the same writable account. Since only one of these can
385 : execute at a time, we sometimes waste lots of scheduling time going
386 : through them one at a time. To combat that, when a transaction
387 : writes to an account with more than PENALTY_TREAP_THRESHOLD
388 : references (readers or writers), instead of inserting it into the
389 : main treap, we insert it into a penalty treap for that specific hot
390 : account address. These transactions are not immediately available
391 : for scheduling. Then, when a transaction that writes to the hot
392 : address completes, we move the most lucrative transaction from the
393 : penalty treap to the main treap, making it available for scheduling.
394 : This policy may slightly violate the price-time priority scheduling
395 : approach pack normally uses: if the most lucrative transaction
396 : competing for hot state arrives after PENALTY_TREAP_THRESHOLD has
397 : been hit, it may be scheduled second instead of first. However, if
398 : the account is in use at the time the new transaction arrives, it
399 : will be scheduled next, as desired. This minor difference seems
400 : reasonable to reduce complexity.
401 :
402 : fd_pack_penalty_treap is one account-specific penalty treap. All the
403 : transactions in the penalty_treap treap write to key.
404 :
405 : penalty_map is the fd_map_dynamic that maps accounts to their
406 : respective penalty treaps. */
407 : struct fd_pack_penalty_treap {
408 : fd_acct_addr_t key;
409 : treap_t penalty_treap[1];
410 : };
411 : typedef struct fd_pack_penalty_treap fd_pack_penalty_treap_t;
412 :
413 : #define MAP_NAME penalty_map
414 4235857 : #define MAP_T fd_pack_penalty_treap_t
415 4237701 : #define MAP_KEY_T fd_acct_addr_t
416 13450965 : #define MAP_KEY_NULL null_addr
417 : #if FD_HAS_AVX
418 29070213 : # define MAP_KEY_INVAL(k) _mm256_testz_si256( wb_ldu( (k).b ), wb_ldu( (k).b ) )
419 : #else
420 : # define MAP_KEY_INVAL(k) MAP_KEY_EQUAL(k, null_addr)
421 : #endif
422 4231899 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp((k0).b,(k1).b, FD_TXN_ACCT_ADDR_SZ))
423 : #define MAP_KEY_EQUAL_IS_SLOW 1
424 : #define MAP_MEMOIZE 0
425 4234798 : #define MAP_KEY_HASH(key,s) ((uint)fd_ulong_hash( fd_ulong_load_8( (key).b ) ))
426 : #include "../../util/tmpl/fd_map_dynamic.c"
427 :
428 : /* PENALTY_TREAP_THRESHOLD: How many references to an account do we
429 : allow before subsequent transactions that write to the account go to
430 : the penalty treap. */
431 29528580 : #define PENALTY_TREAP_THRESHOLD 64UL
432 :
433 :
434 : /* FD_PACK_SKIP_CNT: How many times we'll skip a transaction (for
435 : reasons other than account conflicts) before we won't consider it
436 : until the next slot. For performance reasons, this doesn't reset at
437 : the end of a slot, so e.g. we might skip twice in slot 1, then three
438 : times in slot 2, which would be enough to prevent considering it
439 : until slot 3. The main reason this is not 1 is that some skips that
440 : seem permanent until the end of the slot can actually go away based
441 : on rebates. */
442 13586154 : #define FD_PACK_SKIP_CNT 50UL
443 :
444 : /* Finally, we can now declare the main pack data structure */
445 : struct fd_pack_private {
446 : ulong pack_depth;
447 : ulong bundle_meta_sz; /* if 0, bundles are disabled */
448 : ulong bank_tile_cnt;
449 :
450 : fd_pack_limits_t lim[1];
451 :
452 : ulong pending_txn_cnt; /* Summed across all treaps */
453 : ulong microblock_cnt; /* How many microblocks have we
454 : generated in this block? */
455 : ulong data_bytes_consumed; /* How much data is in this block so
456 : far ? */
457 :
458 : /* counters / gauge for schedule outcome enums */
459 : ulong sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_CNT ];
460 :
461 : fd_rng_t * rng;
462 :
463 : ulong cumulative_block_cost;
464 : ulong cumulative_vote_cost;
465 :
466 : /* expire_before: Any transactions with expires_at strictly less than
467 : the current expire_before are removed from the available pending
468 : transaction. Here, "expire" is used as a verb: cause all
469 : transactions before this time to expire. */
470 : ulong expire_before;
471 :
472 : /* outstanding_microblock_mask: a bitmask indicating which banking
473 : tiles have outstanding microblocks, i.e. fd_pack has generated a
474 : microblock for that banking tile and the banking tile has not yet
475 : notified fd_pack that it has completed it. */
476 : ulong outstanding_microblock_mask;
477 :
478 : /* The actual footprint for the pool and maps is allocated
479 : in the same order in which they are declared immediately following
480 : the struct. I.e. these pointers point to memory not far after the
481 : struct. The trees are just pointers into the pool so don't take up
482 : more space. */
483 :
484 : fd_pack_ord_txn_t * pool;
485 :
486 : /* Treaps (sorted by priority) of pending transactions. We store the
487 : pending simple votes and transactions that come from bundles
488 : separately. */
489 : treap_t pending[1];
490 : treap_t pending_votes[1];
491 : treap_t pending_bundles[1];
492 :
493 : /* penalty_treaps: an fd_map_dynamic mapping hotly contended account
494 : addresses to treaps of transactions that write to them. We try not
495 : to allow more than roughly PENALTY_TREAP_THRESHOLD transactions in
496 : the main treap that write to each account, though this is not
497 : exact. */
498 : fd_pack_penalty_treap_t * penalty_treaps;
499 :
500 : /* initializer_bundle_state: The current state of the initialization
501 : bundle state machine. One of the FD_PACK_IB_STATE_* values. See
502 : the long comment in the header and the comments attached to the
503 : respective values for a discussion of what each state means and the
504 : transitions between them. */
505 : int initializer_bundle_state;
506 :
507 : /* relative_bundle_idx: the number of bundles that have been inserted
508 : since the last time pending_bundles was empty. See the long
509 : comment about encoding this index in the rewards field of each
510 : transaction in the bundle, and why it is important that this reset
511 : to 0 as frequently as possible. */
512 : ulong relative_bundle_idx;
513 :
514 : /* pending{_votes}_smallest: keep a conservative estimate of the
515 : smallest transaction (by cost units and by bytes) in each heap.
516 : Both CUs and bytes should be set to ULONG_MAX is the treap is
517 : empty. */
518 : fd_pack_smallest_t pending_smallest[1];
519 : fd_pack_smallest_t pending_votes_smallest[1];
520 :
521 : /* expiration_q: At the same time that a transaction is in exactly one
522 : of the above treaps, it is also in the expiration queue, sorted by
523 : its expiration time. This enables deleting all transactions that
524 : have expired, regardless of which treap they are in. */
525 : fd_pack_expq_t * expiration_q;
526 :
527 : /* acct_in_use: Map from account address to bitmask indicating which
528 : bank tiles are using the account and whether that use is read or
529 : write (msb). */
530 : fd_pack_addr_use_t * acct_in_use;
531 :
532 : /* bitset_{w, rw}_in_use stores a subset of the information in
533 : acct_in_use using the compressed set format explained at the top of
534 : this file. rw_in_use stores accounts in use for read or write
535 : while w_in_use stores only those in use for write. */
536 : FD_PACK_BITSET_DECLARE( bitset_rw_in_use );
537 : FD_PACK_BITSET_DECLARE( bitset_w_in_use );
538 :
539 : /* writer_costs: Map from account addresses to the sum of costs of
540 : transactions that write to the account. Used for enforcing limits
541 : on the max write cost per account per block. */
542 : fd_pack_addr_use_t * writer_costs;
543 :
544 : /* top_writers: A simple max heap of the top 5 writers in the slot,
545 : used by downstream consumers for monitoring purposes. */
546 : fd_pack_addr_use_t top_writers[ FD_PACK_TOP_WRITERS_CNT ];
547 :
548 : /* At the end of every slot, we have to clear out writer_costs. The
549 : map is large, but typically very sparsely populated. As an
550 : optimization, we keep track of the elements of the map that we've
551 : actually used, up to a maximum. If we use more than the maximum,
552 : we revert to the old way of just clearing the whole map.
553 :
554 : written_list indexed [0, written_list_cnt).
555 : written_list_cnt in [0, written_list_max).
556 :
557 : written_list_cnt==written_list_max-1 means that the list may be
558 : incomplete and should be ignored. */
559 : fd_pack_addr_use_t * * written_list;
560 : ulong written_list_cnt;
561 : ulong written_list_max;
562 :
563 : /* Noncemap is a map_chain that maps from tuples (nonce account,
564 : recent blockhash value, nonce authority) to a transaction. This
565 : map stores exactly the transactions in pool that have the nonce
566 : flag set. */
567 : noncemap_t * noncemap;
568 :
569 : sig2txn_t * signature_map; /* Stores pointers into pool for deleting by signature */
570 :
571 : /* bundle_temp_map: A fd_map_dynamic (although it could be an fd_map)
572 : used during fd_pack_try_schedule_bundle to store information about
573 : what accounts are used by transactions in the bundle. It's empty
574 : (in a map sense) outside of calls to try_schedule_bundle, and each
575 : call to try_schedule_bundle clears it after use. If bundles are
576 : disabled, this is a valid fd_map_dynamic, but it's as small as
577 : convenient and remains empty. */
578 : fd_pack_addr_use_t * bundle_temp_map;
579 :
580 :
581 : /* use_by_bank: An array of size (max_txn_per_microblock *
582 : FD_TXN_ACCT_ADDR_MAX) for each banking tile. Only the MSB of
583 : in_use_by is relevant. Addressed use_by_bank[i][j] where i is in
584 : [0, bank_tile_cnt) and j is in [0, use_by_bank_cnt[i]). Used
585 : mostly for clearing the proper bits of acct_in_use when a
586 : microblock finishes.
587 :
588 : use_by_bank_txn: indexed [i][j], where i is in [0, bank_tile_cnt)
589 : and j is in [0, max_txn_per_microblock). Transaction j in the
590 : microblock currently scheduled to bank i uses account addresses in
591 : use_by_bank[i][k] where k is in [0, use_by_bank[i][j]). For
592 : example, if use_by_bank[i][0] = 2 and use_by_bank[i][1] = 3, then
593 : all the accounts that the first transaction in the outstanding
594 : microblock for bank 0 uses are contained in the set
595 : { use_by_bank[i][0], use_by_bank[i][1] },
596 : and all the accounts in the second transaction in the microblock
597 : are in the set
598 : { use_by_bank[i][0], use_by_bank[i][1], use_by_bank[i][2] }.
599 : Each transaction writes to at least one account (the fee payer)
600 : that no other transaction scheduled to the bank uses, which means
601 : that use_by_bank_txn[i][j] - use_by_bank_txn[i][j-1] >= 1 (with 0
602 : for use_by_bank_txn[i][-1]). This means we can stop iterating when
603 : use_by_bank_txn[i][j] == use_by_bank_cnt[i]. */
604 : fd_pack_addr_use_t * use_by_bank [ FD_PACK_MAX_BANK_TILES ];
605 : ulong use_by_bank_cnt[ FD_PACK_MAX_BANK_TILES ];
606 : ulong * use_by_bank_txn[ FD_PACK_MAX_BANK_TILES ];
607 :
608 : fd_histf_t txn_per_microblock [ 1 ];
609 : fd_histf_t vote_per_microblock[ 1 ];
610 :
611 : fd_histf_t scheduled_cus_per_block[ 1 ];
612 : fd_histf_t rebated_cus_per_block [ 1 ];
613 : fd_histf_t net_cus_per_block [ 1 ];
614 : fd_histf_t pct_cus_per_block [ 1 ];
615 : ulong cumulative_rebated_cus;
616 :
617 :
618 : /* compressed_slot_number: a number in (FD_PACK_SKIP_CNT, USHORT_MAX]
619 : that advances each time we start packing for a new slot. */
620 : ushort compressed_slot_number;
621 :
622 : /* bitset_avail: a stack of which bits are not currently reserved and
623 : can be used to represent an account address.
624 : Indexed [0, bitset_avail_cnt]. Element 0 is fixed at
625 : FD_PACK_BITSET_SLOWPATH. */
626 : ushort bitset_avail[ 1UL+FD_PACK_BITSET_MAX ];
627 : ulong bitset_avail_cnt;
628 :
629 : /* acct_to_bitset: an fd_map_dynamic that maps acct addresses to the
630 : reference count, which bit, etc. */
631 : fd_pack_bitset_acct_mapping_t * acct_to_bitset;
632 :
633 : /* chdkup: scratch memory chkdup needs for its internal processing */
634 : fd_chkdup_t chkdup[ 1 ];
635 :
636 : /* bundle_meta: an array, parallel to the pool, with each element
637 : having size bundle_meta_sz. I.e. if pool[i] has an associated
638 : bundle meta, it's located at bundle_meta[j] for j in
639 : [i*bundle_meta_sz, (i+1)*bundle_meta_sz). */
640 : void * bundle_meta;
641 : };
642 :
643 : typedef struct fd_pack_private fd_pack_t;
644 :
645 : FD_STATIC_ASSERT( offsetof(fd_pack_t, pending_txn_cnt)==FD_PACK_PENDING_TXN_CNT_OFF, txn_cnt_off );
646 :
647 : /* Forward-declare some helper functions */
648 : static ulong delete_transaction( fd_pack_t * pack, fd_pack_ord_txn_t * txn, int delete_full_bundle, int move_from_penalty_treap );
649 : static inline void insert_bundle_impl( fd_pack_t * pack, ulong bundle_idx, ulong txn_cnt, fd_pack_ord_txn_t * * bundle, ulong expires_at );
650 :
651 : FD_FN_PURE ulong
652 : fd_pack_footprint( ulong pack_depth,
653 : ulong bundle_meta_sz,
654 : ulong bank_tile_cnt,
655 312 : fd_pack_limits_t const * limits ) {
656 312 : if( FD_UNLIKELY( (bank_tile_cnt==0) | (bank_tile_cnt>FD_PACK_MAX_BANK_TILES) ) ) return 0UL;
657 312 : if( FD_UNLIKELY( pack_depth<4UL ) ) return 0UL;
658 :
659 312 : int enable_bundles = !!bundle_meta_sz;
660 312 : ulong l;
661 312 : ulong extra_depth = fd_ulong_if( enable_bundles, 1UL+2UL*FD_PACK_MAX_TXN_PER_BUNDLE, 1UL ); /* space for use between init and fini */
662 312 : ulong max_acct_in_treap = pack_depth * FD_TXN_ACCT_ADDR_MAX;
663 312 : ulong max_txn_per_mblk = fd_ulong_max( limits->max_txn_per_microblock,
664 312 : fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE, 0UL ) );
665 312 : ulong max_acct_in_flight = bank_tile_cnt * (FD_TXN_ACCT_ADDR_MAX * max_txn_per_mblk + 1UL);
666 312 : ulong max_txn_in_flight = bank_tile_cnt * max_txn_per_mblk;
667 :
668 312 : ulong max_w_per_block = fd_ulong_min( limits->max_cost_per_block / FD_PACK_COST_PER_WRITABLE_ACCT,
669 312 : max_txn_per_mblk * limits->max_microblocks_per_block * FD_TXN_ACCT_ADDR_MAX );
670 312 : ulong written_list_max = fd_ulong_min( max_w_per_block>>1, DEFAULT_WRITTEN_LIST_MAX );
671 312 : ulong bundle_temp_accts = fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE*FD_TXN_ACCT_ADDR_MAX, 1UL );
672 312 : ulong sig_chain_cnt = sig2txn_chain_cnt_est( pack_depth );
673 312 : ulong nonce_chain_cnt = noncemap_chain_cnt_est( pack_depth );
674 :
675 : /* log base 2, but with a 2* so that the hash table stays sparse */
676 312 : int lg_uses_tbl_sz = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_flight ) );
677 312 : int lg_max_writers = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_w_per_block ) );
678 312 : int lg_acct_in_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap ) );
679 312 : int lg_penalty_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap/PENALTY_TREAP_THRESHOLD ) );
680 312 : int lg_bundle_temp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*bundle_temp_accts ) );
681 :
682 312 : l = FD_LAYOUT_INIT;
683 312 : l = FD_LAYOUT_APPEND( l, FD_PACK_ALIGN, sizeof(fd_pack_t) );
684 312 : l = FD_LAYOUT_APPEND( l, trp_pool_align (), trp_pool_footprint ( pack_depth+extra_depth ) ); /* pool */
685 312 : l = FD_LAYOUT_APPEND( l, penalty_map_align(), penalty_map_footprint( lg_penalty_trp ) ); /* penalty_treaps */
686 312 : l = FD_LAYOUT_APPEND( l, expq_align (), expq_footprint ( pack_depth ) ); /* expiration prq */
687 312 : l = FD_LAYOUT_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_uses_tbl_sz ) ); /* acct_in_use */
688 312 : l = FD_LAYOUT_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_max_writers ) ); /* writer_costs */
689 312 : l = FD_LAYOUT_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t*)*written_list_max ); /* written_list */
690 312 : l = FD_LAYOUT_APPEND( l, noncemap_align (), noncemap_footprint ( nonce_chain_cnt ) ); /* noncemap */
691 312 : l = FD_LAYOUT_APPEND( l, sig2txn_align (), sig2txn_footprint ( sig_chain_cnt ) ); /* signature_map */
692 312 : l = FD_LAYOUT_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_bundle_temp ) ); /* bundle_temp_map*/
693 312 : l = FD_LAYOUT_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t)*max_acct_in_flight ); /* use_by_bank */
694 312 : l = FD_LAYOUT_APPEND( l, 32UL, sizeof(ulong)*max_txn_in_flight ); /* use_by_bank_txn*/
695 312 : l = FD_LAYOUT_APPEND( l, bitset_map_align(), bitset_map_footprint( lg_acct_in_trp ) ); /* acct_to_bitset */
696 312 : l = FD_LAYOUT_APPEND( l, 64UL, (pack_depth+extra_depth)*bundle_meta_sz ); /* bundle_meta */
697 312 : return FD_LAYOUT_FINI( l, FD_PACK_ALIGN );
698 312 : }
699 :
700 : void *
701 : fd_pack_new( void * mem,
702 : ulong pack_depth,
703 : ulong bundle_meta_sz,
704 : ulong bank_tile_cnt,
705 : fd_pack_limits_t const * limits,
706 528 : fd_rng_t * rng ) {
707 :
708 528 : int enable_bundles = !!bundle_meta_sz;
709 528 : ulong extra_depth = fd_ulong_if( enable_bundles, 1UL+2UL*FD_PACK_MAX_TXN_PER_BUNDLE, 1UL );
710 528 : ulong max_acct_in_treap = pack_depth * FD_TXN_ACCT_ADDR_MAX;
711 528 : ulong max_txn_per_mblk = fd_ulong_max( limits->max_txn_per_microblock,
712 528 : fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE, 0UL ) );
713 528 : ulong max_acct_in_flight = bank_tile_cnt * (FD_TXN_ACCT_ADDR_MAX * max_txn_per_mblk + 1UL);
714 528 : ulong max_txn_in_flight = bank_tile_cnt * max_txn_per_mblk;
715 :
716 528 : ulong max_w_per_block = fd_ulong_min( limits->max_cost_per_block / FD_PACK_COST_PER_WRITABLE_ACCT,
717 528 : max_txn_per_mblk * limits->max_microblocks_per_block * FD_TXN_ACCT_ADDR_MAX );
718 528 : ulong written_list_max = fd_ulong_min( max_w_per_block>>1, DEFAULT_WRITTEN_LIST_MAX );
719 528 : ulong bundle_temp_accts = fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE*FD_TXN_ACCT_ADDR_MAX, 1UL );
720 528 : ulong sig_chain_cnt = sig2txn_chain_cnt_est( pack_depth );
721 528 : ulong nonce_chain_cnt = noncemap_chain_cnt_est( pack_depth );
722 :
723 : /* log base 2, but with a 2* so that the hash table stays sparse */
724 528 : int lg_uses_tbl_sz = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_flight ) );
725 528 : int lg_max_writers = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_w_per_block ) );
726 528 : int lg_acct_in_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap ) );
727 528 : int lg_penalty_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap/PENALTY_TREAP_THRESHOLD ) );
728 528 : int lg_bundle_temp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*bundle_temp_accts ) );
729 :
730 528 : FD_SCRATCH_ALLOC_INIT( l, mem );
731 528 : fd_pack_t * pack = FD_SCRATCH_ALLOC_APPEND( l, FD_PACK_ALIGN, sizeof(fd_pack_t) );
732 : /* The pool has one extra element that is used between insert_init and
733 : cancel/fini. */
734 528 : void * _pool = FD_SCRATCH_ALLOC_APPEND( l, trp_pool_align(), trp_pool_footprint ( pack_depth+extra_depth ) );
735 528 : void * _penalty_map = FD_SCRATCH_ALLOC_APPEND( l, penalty_map_align(), penalty_map_footprint( lg_penalty_trp ) );
736 528 : void * _expq = FD_SCRATCH_ALLOC_APPEND( l, expq_align(), expq_footprint ( pack_depth ) );
737 528 : void * _uses = FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_uses_tbl_sz ) );
738 528 : void * _writer_cost = FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_max_writers ) );
739 528 : void * _written_lst = FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t*)*written_list_max );
740 528 : void * _noncemap = FD_SCRATCH_ALLOC_APPEND( l, noncemap_align(), noncemap_footprint ( nonce_chain_cnt ) );
741 528 : void * _sig_map = FD_SCRATCH_ALLOC_APPEND( l, sig2txn_align(), sig2txn_footprint ( sig_chain_cnt ) );
742 528 : void * _bundle_temp = FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_bundle_temp ) );
743 528 : void * _use_by_bank = FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t)*max_acct_in_flight );
744 528 : void * _use_by_txn = FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(ulong)*max_txn_in_flight );
745 528 : void * _acct_bitset = FD_SCRATCH_ALLOC_APPEND( l, bitset_map_align(), bitset_map_footprint( lg_acct_in_trp ) );
746 528 : void * bundle_meta = FD_SCRATCH_ALLOC_APPEND( l, 64UL, (pack_depth+extra_depth)*bundle_meta_sz );
747 :
748 0 : pack->pack_depth = pack_depth;
749 528 : pack->bundle_meta_sz = bundle_meta_sz;
750 528 : pack->bank_tile_cnt = bank_tile_cnt;
751 528 : pack->lim[0] = *limits;
752 528 : pack->pending_txn_cnt = 0UL;
753 528 : pack->microblock_cnt = 0UL;
754 528 : pack->data_bytes_consumed = 0UL;
755 528 : memset( pack->sched_results, 0, sizeof(pack->sched_results) );
756 528 : pack->rng = rng;
757 528 : pack->cumulative_block_cost = 0UL;
758 528 : pack->cumulative_vote_cost = 0UL;
759 528 : pack->expire_before = 0UL;
760 528 : pack->outstanding_microblock_mask = 0UL;
761 528 : pack->cumulative_rebated_cus = 0UL;
762 :
763 :
764 528 : trp_pool_new( _pool, pack_depth+extra_depth );
765 :
766 528 : fd_pack_ord_txn_t * pool = trp_pool_join( _pool );
767 528 : treap_seed( pool, pack_depth+extra_depth, fd_rng_ulong( rng ) );
768 2186148 : for( ulong i=0UL; i<pack_depth+extra_depth; i++ ) pool[i].root = FD_ORD_TXN_ROOT_FREE;
769 :
770 528 : (void)trp_pool_leave( pool );
771 :
772 528 : penalty_map_new( _penalty_map, lg_penalty_trp, 0UL );
773 :
774 : /* These treaps can have at most pack_depth elements at any moment,
775 : but they come from a pool of size pack_depth+extra_depth. */
776 528 : treap_new( (void*)pack->pending, pack_depth+extra_depth );
777 528 : treap_new( (void*)pack->pending_votes, pack_depth+extra_depth );
778 528 : treap_new( (void*)pack->pending_bundles, pack_depth+extra_depth );
779 :
780 528 : pack->pending_smallest->cus = ULONG_MAX;
781 528 : pack->pending_smallest->bytes = ULONG_MAX;
782 528 : pack->pending_votes_smallest->cus = ULONG_MAX;
783 528 : pack->pending_votes_smallest->bytes = ULONG_MAX;
784 :
785 528 : expq_new( _expq, pack_depth );
786 :
787 528 : FD_PACK_BITSET_CLEAR( pack->bitset_rw_in_use );
788 528 : FD_PACK_BITSET_CLEAR( pack->bitset_w_in_use );
789 :
790 528 : acct_uses_new( _uses, lg_uses_tbl_sz, 0UL );
791 528 : acct_uses_new( _writer_cost, lg_max_writers, 0UL );
792 528 : acct_uses_new( _bundle_temp, lg_bundle_temp, 0UL );
793 :
794 528 : pack->written_list = _written_lst;
795 528 : pack->written_list_cnt = 0UL;
796 528 : pack->written_list_max = written_list_max;
797 :
798 528 : noncemap_new( _noncemap, nonce_chain_cnt, fd_rng_ulong( rng ) );
799 :
800 528 : sig2txn_new( _sig_map, sig_chain_cnt, fd_rng_ulong( rng ) );
801 :
802 528 : fd_pack_addr_use_t * use_by_bank = (fd_pack_addr_use_t *)_use_by_bank;
803 528 : ulong * use_by_bank_txn = (ulong *)_use_by_txn;
804 6783 : for( ulong i=0UL; i<bank_tile_cnt; i++ ) {
805 6255 : pack->use_by_bank [i] = use_by_bank + i*(FD_TXN_ACCT_ADDR_MAX*max_txn_per_mblk+1UL);
806 6255 : pack->use_by_bank_cnt[i] = 0UL;
807 6255 : pack->use_by_bank_txn[i] = use_by_bank_txn + i*max_txn_per_mblk;
808 6255 : pack->use_by_bank_txn[i][0] = 0UL;
809 6255 : }
810 27009 : for( ulong i=bank_tile_cnt; i<FD_PACK_MAX_BANK_TILES; i++ ) {
811 26481 : pack->use_by_bank [i] = NULL;
812 26481 : pack->use_by_bank_cnt[i] = 0UL;
813 26481 : pack->use_by_bank_txn[i] = NULL;
814 26481 : }
815 :
816 528 : fd_histf_new( pack->txn_per_microblock, FD_MHIST_MIN( PACK, TOTAL_TRANSACTIONS_PER_MICROBLOCK_COUNT ),
817 528 : FD_MHIST_MAX( PACK, TOTAL_TRANSACTIONS_PER_MICROBLOCK_COUNT ) );
818 528 : fd_histf_new( pack->vote_per_microblock, FD_MHIST_MIN( PACK, VOTES_PER_MICROBLOCK_COUNT ),
819 528 : FD_MHIST_MAX( PACK, VOTES_PER_MICROBLOCK_COUNT ) );
820 :
821 528 : fd_histf_new( pack->scheduled_cus_per_block, FD_MHIST_MIN( PACK, CUS_SCHEDULED ),
822 528 : FD_MHIST_MAX( PACK, CUS_SCHEDULED ) );
823 528 : fd_histf_new( pack->rebated_cus_per_block, FD_MHIST_MIN( PACK, CUS_REBATED ),
824 528 : FD_MHIST_MAX( PACK, CUS_REBATED ) );
825 528 : fd_histf_new( pack->net_cus_per_block, FD_MHIST_MIN( PACK, CUS_NET ),
826 528 : FD_MHIST_MAX( PACK, CUS_NET ) );
827 528 : fd_histf_new( pack->pct_cus_per_block, FD_MHIST_MIN( PACK, CUS_PCT ),
828 528 : FD_MHIST_MAX( PACK, CUS_PCT ) );
829 :
830 528 : pack->compressed_slot_number = (ushort)(FD_PACK_SKIP_CNT+1);
831 :
832 528 : pack->bitset_avail[ 0 ] = FD_PACK_BITSET_SLOWPATH;
833 180752 : for( ulong i=0UL; i<FD_PACK_BITSET_MAX; i++ ) pack->bitset_avail[ i+1UL ] = (ushort)i;
834 528 : pack->bitset_avail_cnt = FD_PACK_BITSET_MAX;
835 :
836 528 : bitset_map_new( _acct_bitset, lg_acct_in_trp, 0UL );
837 :
838 528 : fd_chkdup_new( pack->chkdup, rng );
839 :
840 528 : pack->bundle_meta = bundle_meta;
841 :
842 528 : return mem;
843 528 : }
844 :
845 : fd_pack_t *
846 528 : fd_pack_join( void * mem ) {
847 528 : FD_SCRATCH_ALLOC_INIT( l, mem );
848 528 : fd_pack_t * pack = FD_SCRATCH_ALLOC_APPEND( l, FD_PACK_ALIGN, sizeof(fd_pack_t) );
849 :
850 0 : int enable_bundles = !!pack->bundle_meta_sz;
851 528 : ulong pack_depth = pack->pack_depth;
852 528 : ulong extra_depth = fd_ulong_if( enable_bundles, 1UL+2UL*FD_PACK_MAX_TXN_PER_BUNDLE, 1UL );
853 528 : ulong bank_tile_cnt = pack->bank_tile_cnt;
854 528 : ulong max_txn_per_microblock = fd_ulong_max( pack->lim->max_txn_per_microblock,
855 528 : fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE, 0UL ) );
856 :
857 528 : ulong max_acct_in_treap = pack_depth * FD_TXN_ACCT_ADDR_MAX;
858 528 : ulong max_acct_in_flight = bank_tile_cnt * (FD_TXN_ACCT_ADDR_MAX * max_txn_per_microblock + 1UL);
859 528 : ulong max_txn_in_flight = bank_tile_cnt * max_txn_per_microblock;
860 528 : ulong max_w_per_block = fd_ulong_min( pack->lim->max_cost_per_block / FD_PACK_COST_PER_WRITABLE_ACCT,
861 528 : max_txn_per_microblock * pack->lim->max_microblocks_per_block * FD_TXN_ACCT_ADDR_MAX );
862 528 : ulong written_list_max = fd_ulong_min( max_w_per_block>>1, DEFAULT_WRITTEN_LIST_MAX );
863 528 : ulong bundle_temp_accts = fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE*FD_TXN_ACCT_ADDR_MAX, 1UL );
864 528 : ulong sig_chain_cnt = sig2txn_chain_cnt_est( pack_depth );
865 528 : ulong nonce_chain_cnt = noncemap_chain_cnt_est( pack_depth );
866 :
867 528 : int lg_uses_tbl_sz = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_flight ) );
868 528 : int lg_max_writers = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_w_per_block ) );
869 528 : int lg_acct_in_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap ) );
870 528 : int lg_penalty_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap/PENALTY_TREAP_THRESHOLD ) );
871 528 : int lg_bundle_temp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*bundle_temp_accts ) );
872 :
873 :
874 528 : pack->pool = trp_pool_join( FD_SCRATCH_ALLOC_APPEND( l, trp_pool_align(), trp_pool_footprint ( pack_depth+extra_depth ) ) );
875 528 : pack->penalty_treaps= penalty_map_join(FD_SCRATCH_ALLOC_APPEND( l, penalty_map_align(),penalty_map_footprint( lg_penalty_trp ) ) );
876 528 : pack->expiration_q = expq_join ( FD_SCRATCH_ALLOC_APPEND( l, expq_align(), expq_footprint ( pack_depth ) ) );
877 528 : pack->acct_in_use = acct_uses_join( FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint ( lg_uses_tbl_sz ) ) );
878 528 : pack->writer_costs = acct_uses_join( FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint ( lg_max_writers ) ) );
879 528 : /* */ FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t*)*written_list_max );
880 528 : pack->noncemap = noncemap_join( FD_SCRATCH_ALLOC_APPEND( l, noncemap_align(), noncemap_footprint ( nonce_chain_cnt ) ) );
881 528 : pack->signature_map = sig2txn_join( FD_SCRATCH_ALLOC_APPEND( l, sig2txn_align(), sig2txn_footprint ( sig_chain_cnt ) ) );
882 528 : pack->bundle_temp_map=acct_uses_join( FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint ( lg_bundle_temp ) ) );
883 528 : /* */ FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t)*max_acct_in_flight );
884 528 : /* */ FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(ulong)*max_txn_in_flight );
885 528 : pack->acct_to_bitset= bitset_map_join( FD_SCRATCH_ALLOC_APPEND( l, bitset_map_align(), bitset_map_footprint( lg_acct_in_trp ) ) );
886 528 : /* */ FD_SCRATCH_ALLOC_APPEND( l, 64UL, (pack_depth+extra_depth)*pack->bundle_meta_sz );
887 :
888 528 : FD_MGAUGE_SET( PACK, PENDING_TRANSACTIONS_HEAP_SIZE, pack->pack_depth );
889 528 : memset( pack->top_writers, 0, sizeof(pack->top_writers) );
890 :
891 528 : return pack;
892 528 : }
893 :
894 :
895 : /* Returns 0 on failure, 1 on success for a vote, 2 on success for a
896 : non-vote. */
897 : static int
898 : fd_pack_estimate_rewards_and_compute( fd_txn_e_t * txne,
899 13583097 : fd_pack_ord_txn_t * out ) {
900 13583097 : fd_txn_t * txn = TXN(txne->txnp);
901 13583097 : ulong sig_rewards = FD_PACK_FEE_PER_SIGNATURE * txn->signature_cnt; /* Easily in [5000, 635000] */
902 :
903 13583097 : ulong requested_execution_cus;
904 13583097 : ulong priority_rewards;
905 13583097 : ulong precompile_sigs;
906 13583097 : ulong requested_loaded_accounts_data_cost;
907 13583097 : ulong cost_estimate = fd_pack_compute_cost( txn, txne->txnp->payload, &txne->txnp->flags, &requested_execution_cus, &priority_rewards, &precompile_sigs, &requested_loaded_accounts_data_cost );
908 :
909 13583097 : if( FD_UNLIKELY( !cost_estimate ) ) return 0;
910 :
911 : /* precompile_sigs <= 16320, so after the addition,
912 : sig_rewards < 83,000,000 */
913 13583094 : sig_rewards += FD_PACK_FEE_PER_SIGNATURE * precompile_sigs;
914 13583094 : sig_rewards = sig_rewards * FD_PACK_TXN_FEE_BURN_PCT / 100UL;
915 :
916 : /* No fancy CU estimation in this version of pack
917 : for( ulong i=0UL; i<(ulong)txn->instr_cnt; i++ ) {
918 : uchar prog_id_idx = txn->instr[ i ].program_id;
919 : fd_acct_addr_t const * acct_addr = fd_txn_get_acct_addrs( txn, txnp->payload ) + (ulong)prog_id_idx;
920 : }
921 : */
922 13583094 : out->rewards = (priority_rewards < (UINT_MAX - sig_rewards)) ? (uint)(sig_rewards + priority_rewards) : UINT_MAX;
923 13583094 : out->compute_est = (uint)cost_estimate;
924 13583094 : out->txn->pack_cu.requested_exec_plus_acct_data_cus = (uint)(requested_execution_cus + requested_loaded_accounts_data_cost);
925 13583094 : out->txn->pack_cu.non_execution_cus = (uint)(cost_estimate - requested_execution_cus - requested_loaded_accounts_data_cost);
926 :
927 13583094 : return fd_int_if( txne->txnp->flags & FD_TXN_P_FLAGS_IS_SIMPLE_VOTE, 1, 2 );
928 13583097 : }
929 :
930 : /* Returns 0 on failure, 1 if not a durable nonce transaction, and 2 if
931 : it is. FIXME: These return codes are set to harmonize with
932 : estimate_rewards_and_compute but -1/0/1 makes a lot more sense to me.
933 : */
934 : static int
935 13583094 : fd_pack_validate_durable_nonce( fd_txn_e_t * txne ) {
936 13583094 : fd_txn_t const * txn = TXN(txne->txnp);
937 :
938 : /* First instruction invokes system program with 4 bytes of
939 : instruction data with the little-endian value 4. It also has 3
940 : accounts: the nonce account, recent blockhashes sysvar, and the
941 : nonce authority. It seems like technically the nonce authority may
942 : not need to be passed in, but we disallow that. We also allow
943 : trailing data and trailing accounts. We want to organize the
944 : checks somewhat to minimize cache misses. */
945 13583094 : if( FD_UNLIKELY( txn->instr_cnt==0 ) ) return 1;
946 1417974 : if( FD_UNLIKELY( txn->instr[ 0 ].data_sz<4UL ) ) return 1;
947 1417974 : if( FD_UNLIKELY( txn->instr[ 0 ].acct_cnt<3UL ) ) return 1; /* It seems like technically 2 is allowed, but never used */
948 39300 : if( FD_LIKELY ( fd_uint_load_4( txne->txnp->payload + txn->instr[ 0 ].data_off )!=4U ) ) return 1;
949 : /* The program has to be a static account */
950 1155 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, txne->txnp->payload );
951 1155 : if( FD_UNLIKELY( !fd_memeq( accts[ txn->instr[ 0 ].program_id ].b, null_addr.b, 32UL ) ) ) return 1;
952 1155 : if( FD_UNLIKELY( !fd_txn_is_signer( txn, txne->txnp->payload[ txn->instr[ 0 ].acct_off+2 ] ) ) ) return 0;
953 : /* We could check recent blockhash, but it's not necessary */
954 1152 : return 2;
955 1155 : }
956 :
957 : /* Can the fee payer afford to pay a transaction with the specified
958 : price? Returns 1 if so, 0 otherwise. This is just a stub that
959 : always returns 1 for now, and the real check is deferred to the bank
960 : tile. In general, this function can't be totally accurate, because
961 : the transactions immediately prior to this one can affect the balance
962 : of this fee payer, but a simple check here may be helpful for
963 : reducing spam. */
964 : static int
965 : fd_pack_can_fee_payer_afford( fd_acct_addr_t const * acct_addr,
966 13583088 : ulong price /* in lamports */) {
967 13583088 : (void)acct_addr;
968 13583088 : (void)price;
969 13583088 : return 1;
970 13583088 : }
971 :
972 :
973 :
974 :
975 :
976 13703916 : fd_txn_e_t * fd_pack_insert_txn_init( fd_pack_t * pack ) { return trp_pool_ele_acquire( pack->pool )->txn_e; }
977 122400 : void fd_pack_insert_txn_cancel( fd_pack_t * pack, fd_txn_e_t * txn ) { trp_pool_ele_release( pack->pool, (fd_pack_ord_txn_t*)txn ); }
978 :
979 24 : #define REJECT( reason ) do { \
980 24 : trp_pool_ele_release( pack->pool, ord ); \
981 24 : return FD_PACK_INSERT_REJECT_ ## reason; \
982 24 : } while( 0 )
983 :
984 : /* These require txn, accts, and alt_adj to be defined as per usual */
985 328987 : #define ACCT_IDX_TO_PTR( idx ) (__extension__( { \
986 328987 : ulong __idx = (idx); \
987 328987 : fd_ptr_if( __idx<fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM ), accts, alt_adj )+__idx; \
988 328987 : }))
989 71650749 : #define ACCT_ITER_TO_PTR( iter ) (__extension__( { \
990 71650749 : ulong __idx = fd_txn_acct_iter_idx( iter ); \
991 71650749 : fd_ptr_if( __idx<fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM ), accts, alt_adj )+__idx; \
992 71650749 : }))
993 :
994 :
995 : /* Tries to find the worst transaction in any treap in pack. If that
996 : transaction's score is worse than or equal to threshold_score, it
997 : initiates a delete and returns the number of deleted transactions
998 : (potentially more than 1 for a bundle). If it's higher than
999 : threshold_score, it returns 0. To force this function to delete the
1000 : worst transaction if there are any eligible ones, pass FLT_MAX as
1001 : threshold_score. */
1002 : static inline ulong
1003 : delete_worst( fd_pack_t * pack,
1004 : float threshold_score,
1005 494601 : int is_vote ) {
1006 : /* If the tree is full, we want to see if this is better than the
1007 : worst element in the pool before inserting. If the new transaction
1008 : is better than that one, we'll delete it and insert the new
1009 : transaction. Otherwise, we'll throw away this transaction.
1010 :
1011 : We want to bias the definition of "worst" here to provide better
1012 : quality of service. For example, if the pool is filled with
1013 : transactions that all write to the same account or are all votes,
1014 : we want to bias towards treating one of those transactions as the
1015 : worst, even if they pay slightly higher fees per computer unit,
1016 : since we know we won't actually be able to schedule them all.
1017 :
1018 : This is a tricky task, however. All our notions of priority and
1019 : better/worse are based on static information about the transaction,
1020 : and there's not an easy way to take into account global
1021 : information, for example, how many other transactions contend with
1022 : this one. One idea is to build a heap (not a treap, since we only
1023 : need pop-min, insert, and delete) with one element for each element
1024 : in the pool, with a "delete me" score that's related but not
1025 : identical to the normal score. This would allow building in some
1026 : global information. The downside is that the global information
1027 : that gets integrated is static. E.g. if you bias a transaction's
1028 : "delete me" score to make it more likely to be deleted because
1029 : there are many conflicting transactions in the pool, the score
1030 : stays biased, even if the global conditions change (unless you come
1031 : up with some complicated re-scoring scheme). This can work, since
1032 : when the pool is full, the global bias factors are unlikely to
1033 : change significantly at the relevant timescales.
1034 :
1035 : However, rather than this, we implement a simpler probabilistic
1036 : scheme. We'll sample M transactions, find the worst transaction in
1037 : each of the M treaps, compute a "delete me" score for those <= M
1038 : transactions, and delete the worst. If one penalty treap is
1039 : starting to get big, then it becomes very likely that the random
1040 : sample will find it and choose to delete a transaction from it.
1041 :
1042 : The exact formula for the "delete me" score should be the matter of
1043 : some more intense quantitative research. For now, we'll just use
1044 : this:
1045 :
1046 : Treap with N transactions Scale Factor
1047 : Pending 1.0 unless inserting a vote and votes < 25%
1048 : Pending votes 1.0 until 75% of depth, then 0
1049 : Penalty treap 1.0 at <= 100 transactions, then sqrt(100/N)
1050 : Pending bundles inf (since the rewards value is fudged)
1051 :
1052 : We'll also use M=8. */
1053 :
1054 494601 : float worst_score = FLT_MAX;
1055 494601 : fd_pack_ord_txn_t * worst = NULL;
1056 4451409 : for( ulong i=0UL; i<8UL; i++ ) {
1057 3956808 : uint pool_max = (uint)trp_pool_max( pack->pool );
1058 3956808 : ulong sample_i = fd_rng_uint_roll( pack->rng, pool_max );
1059 :
1060 3956808 : fd_pack_ord_txn_t * sample = &pack->pool[ sample_i ];
1061 : /* Presumably if we're calling this, the pool is almost entirely
1062 : full, so the probability of choosing a free one is small. If
1063 : it does happen, find the first one that isn't free. */
1064 3959093 : while( FD_UNLIKELY( sample->root==FD_ORD_TXN_ROOT_FREE ) ) sample = &pack->pool[ (++sample_i)%pool_max ];
1065 :
1066 3956808 : int root_idx = sample->root;
1067 3956808 : float multiplier = 0.0f; /* The smaller this is, the more biased we'll be to deleting it */
1068 3956808 : treap_t * treap;
1069 3956808 : switch( root_idx & FD_ORD_TXN_ROOT_TAG_MASK ) {
1070 0 : default:
1071 0 : case FD_ORD_TXN_ROOT_FREE: {
1072 0 : FD_LOG_CRIT(( "Double free detected" ));
1073 0 : return ULONG_MAX; /* Can't be hit */
1074 0 : }
1075 3935441 : case FD_ORD_TXN_ROOT_PENDING: {
1076 3935441 : treap = pack->pending;
1077 3935441 : ulong vote_cnt = treap_ele_cnt( pack->pending_votes );
1078 3935441 : if( FD_LIKELY( !is_vote || (vote_cnt>=pack->pack_depth/4UL ) ) ) multiplier = 1.0f;
1079 3935441 : break;
1080 0 : }
1081 0 : case FD_ORD_TXN_ROOT_PENDING_VOTE: {
1082 0 : treap = pack->pending_votes;
1083 0 : ulong vote_cnt = treap_ele_cnt( pack->pending_votes );
1084 0 : if( FD_LIKELY( is_vote || (vote_cnt<=3UL*pack->pack_depth/4UL ) ) ) multiplier = 1.0f;
1085 0 : break;
1086 0 : }
1087 0 : case FD_ORD_TXN_ROOT_PENDING_BUNDLE: {
1088 : /* We don't have a way to tell how much these actually pay in
1089 : rewards, so we just assume they are very high. */
1090 0 : treap = pack->pending_bundles;
1091 : /* We cap rewards at UINT_MAX lamports for estimation, and min
1092 : CUs is about 1000, which means rewards/compute < 5e6.
1093 : FLT_MAX is around 3e38. That means, 1e20*rewards/compute is
1094 : much less than FLT_MAX, so we won't have any issues with
1095 : overflow. On the other hand, if rewards==1 lamport and
1096 : compute is 2 million CUs, 1e20*1/2e6 is still higher than any
1097 : normal transaction. */
1098 0 : multiplier = 1e20f;
1099 0 : break;
1100 0 : }
1101 21367 : case FD_ORD_TXN_ROOT_PENALTY( 0 ): {
1102 21367 : fd_txn_t * txn = TXN( sample->txn );
1103 21367 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, sample->txn->payload );
1104 21367 : fd_acct_addr_t const * alt_adj = sample->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1105 21367 : fd_acct_addr_t penalty_acct = *ACCT_IDX_TO_PTR( FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( root_idx ) );
1106 21367 : fd_pack_penalty_treap_t * q = penalty_map_query( pack->penalty_treaps, penalty_acct, NULL );
1107 21367 : FD_TEST( q );
1108 21367 : ulong cnt = treap_ele_cnt( q->penalty_treap );
1109 21367 : treap = q->penalty_treap;
1110 :
1111 21367 : multiplier = sqrtf( 100.0f / (float)fd_ulong_max( 100UL, cnt ) );
1112 21367 : break;
1113 21367 : }
1114 3956808 : }
1115 : /* Get the worst from the sampled treap */
1116 3956808 : treap_fwd_iter_t _cur=treap_fwd_iter_init( treap, pack->pool );
1117 3956808 : FD_TEST( !treap_fwd_iter_done( _cur ) ); /* It can't be empty because we just sampled an element from it. */
1118 3956808 : sample = treap_fwd_iter_ele( _cur, pack->pool );
1119 :
1120 3956808 : float score = multiplier * (float)sample->rewards / (float)sample->compute_est;
1121 3956808 : worst = fd_ptr_if( score<worst_score, sample, worst );
1122 3956808 : worst_score = fd_float_if( worst_score<score, worst_score, score );
1123 3956808 : }
1124 :
1125 494601 : if( FD_UNLIKELY( !worst ) ) return 0;
1126 494601 : if( FD_UNLIKELY( threshold_score<worst_score ) ) return 0;
1127 :
1128 494601 : return delete_transaction( pack, worst, 1, 1 );
1129 494601 : }
1130 :
1131 : static inline int
1132 : validate_transaction( fd_pack_t * pack,
1133 : fd_pack_ord_txn_t const * ord,
1134 : fd_txn_t const * txn,
1135 : fd_acct_addr_t const * accts,
1136 : fd_acct_addr_t const * alt_adj,
1137 13583088 : int check_bundle_blacklist ) {
1138 13583088 : int writes_to_sysvar = 0;
1139 13583088 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
1140 28351344 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1141 14768256 : writes_to_sysvar |= fd_pack_unwritable_contains( ACCT_ITER_TO_PTR( iter ) );
1142 14768256 : }
1143 :
1144 13583088 : int bundle_blacklist = 0;
1145 13583088 : if( FD_UNLIKELY( check_bundle_blacklist ) ) {
1146 67758 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_ALL );
1147 447423 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1148 379665 : bundle_blacklist |= (3==fd_pack_tip_prog_check_blacklist( ACCT_ITER_TO_PTR( iter ) ));
1149 379665 : }
1150 67758 : }
1151 :
1152 13583088 : fd_acct_addr_t const * alt = ord->txn_e->alt_accts;
1153 13583088 : fd_chkdup_t * chkdup = pack->chkdup;
1154 13583088 : ulong imm_cnt = fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1155 13583088 : ulong alt_cnt = fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_ALT );
1156 :
1157 : /* Throw out transactions ... */
1158 : /* ... that are unfunded */
1159 13583088 : if( FD_UNLIKELY( !fd_pack_can_fee_payer_afford( accts, ord->rewards ) ) ) return FD_PACK_INSERT_REJECT_UNAFFORDABLE;
1160 : /* ... that are so big they'll never run */
1161 13583088 : if( FD_UNLIKELY( ord->compute_est >= pack->lim->max_cost_per_block ) ) return FD_PACK_INSERT_REJECT_TOO_LARGE;
1162 : /* ... that load too many accounts (ignoring 9LZdXeKGeBV6hRLdxS1rHbHoEUsKqesCC2ZAPTPKJAbK) */
1163 13583088 : if( FD_UNLIKELY( fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_ALL )>64UL ) ) return FD_PACK_INSERT_REJECT_ACCOUNT_CNT;
1164 : /* ... that duplicate an account address */
1165 13583085 : if( FD_UNLIKELY( fd_chkdup_check( chkdup, accts, imm_cnt, alt, alt_cnt ) ) ) return FD_PACK_INSERT_REJECT_DUPLICATE_ACCT;
1166 : /* ... that try to write to a sysvar */
1167 13583082 : if( FD_UNLIKELY( writes_to_sysvar ) ) return FD_PACK_INSERT_REJECT_WRITES_SYSVAR;
1168 : /* ... that use an account that violates bundle rules */
1169 13582989 : if( FD_UNLIKELY( bundle_blacklist & 1 ) ) return FD_PACK_INSERT_REJECT_BUNDLE_BLACKLIST;
1170 :
1171 13582989 : return 0;
1172 13582989 : }
1173 :
1174 :
1175 :
1176 : /* returns cumulative penalty "points", i.e. the sum of the populated
1177 : section of penalties (which also tells the caller how much of the
1178 : array is populated. */
1179 : static inline ulong
1180 : populate_bitsets( fd_pack_t * pack,
1181 : fd_pack_ord_txn_t * ord,
1182 : ushort penalties [ static FD_TXN_ACCT_ADDR_MAX ],
1183 13581939 : uchar penalty_idx[ static FD_TXN_ACCT_ADDR_MAX ] ) {
1184 13581939 : FD_PACK_BITSET_CLEAR( ord->rw_bitset );
1185 13581939 : FD_PACK_BITSET_CLEAR( ord->w_bitset );
1186 :
1187 13581939 : fd_txn_t * txn = TXN(ord->txn);
1188 13581939 : uchar * payload = ord->txn->payload;
1189 :
1190 13581939 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, payload );
1191 : /* alt_adj is the pointer to the ALT expansion, adjusted so that if
1192 : account address n is the first that comes from the ALT, it can be
1193 : accessed with adj_lut[n]. */
1194 13581939 : fd_acct_addr_t const * alt_adj = ord->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1195 :
1196 13581939 : ulong cumulative_penalty = 0UL;
1197 13581939 : ulong penalty_i = 0UL;
1198 :
1199 13581939 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
1200 28345545 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1201 14763606 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
1202 14763606 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, acct, NULL );
1203 14763606 : if( FD_UNLIKELY( q==NULL ) ) {
1204 13276913 : q = bitset_map_insert( pack->acct_to_bitset, acct );
1205 13276913 : q->ref_cnt = 0UL;
1206 13276913 : q->first_instance = ord;
1207 13276913 : q->first_instance_was_write = 1;
1208 13276913 : q->bit = FD_PACK_BITSET_FIRST_INSTANCE;
1209 13276913 : } else if( FD_UNLIKELY( q->bit == FD_PACK_BITSET_FIRST_INSTANCE ) ) {
1210 7321 : q->bit = pack->bitset_avail[ pack->bitset_avail_cnt ];
1211 7321 : pack->bitset_avail_cnt = fd_ulong_if( !!pack->bitset_avail_cnt, pack->bitset_avail_cnt-1UL, 0UL );
1212 :
1213 7321 : FD_PACK_BITSET_SETN( q->first_instance->rw_bitset, q->bit );
1214 7321 : if( q->first_instance_was_write ) FD_PACK_BITSET_SETN( q->first_instance->w_bitset, q->bit );
1215 7321 : }
1216 14763606 : ulong penalty = fd_ulong_max( q->ref_cnt, PENALTY_TREAP_THRESHOLD )-PENALTY_TREAP_THRESHOLD;
1217 14763606 : if( FD_UNLIKELY( penalty ) ) {
1218 1212867 : penalties [ penalty_i ] = (ushort)penalty;
1219 1212867 : penalty_idx[ penalty_i ] = (uchar )fd_txn_acct_iter_idx( iter );
1220 1212867 : penalty_i++;
1221 1212867 : cumulative_penalty += penalty;
1222 1212867 : }
1223 :
1224 14763606 : q->ref_cnt++;
1225 14763606 : FD_PACK_BITSET_SETN( ord->rw_bitset, q->bit );
1226 14763606 : FD_PACK_BITSET_SETN( ord->w_bitset , q->bit );
1227 14763606 : }
1228 :
1229 13581939 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
1230 18157251 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1231 :
1232 4575312 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
1233 4575312 : if( FD_UNLIKELY( fd_pack_unwritable_contains( &acct ) ) ) continue;
1234 :
1235 3081981 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, acct, NULL );
1236 3081981 : if( FD_UNLIKELY( q==NULL ) ) {
1237 29739 : q = bitset_map_insert( pack->acct_to_bitset, acct );
1238 29739 : q->ref_cnt = 0UL;
1239 29739 : q->first_instance = ord;
1240 29739 : q->first_instance_was_write = 0;
1241 29739 : q->bit = FD_PACK_BITSET_FIRST_INSTANCE;
1242 3052242 : } else if( FD_UNLIKELY( q->bit == FD_PACK_BITSET_FIRST_INSTANCE ) ) {
1243 11031 : q->bit = pack->bitset_avail[ pack->bitset_avail_cnt ];
1244 11031 : pack->bitset_avail_cnt = fd_ulong_if( !!pack->bitset_avail_cnt, pack->bitset_avail_cnt-1UL, 0UL );
1245 :
1246 11031 : FD_PACK_BITSET_SETN( q->first_instance->rw_bitset, q->bit );
1247 11031 : if( q->first_instance_was_write ) FD_PACK_BITSET_SETN( q->first_instance->w_bitset, q->bit );
1248 11031 : }
1249 :
1250 3081981 : q->ref_cnt++;
1251 3081981 : FD_PACK_BITSET_SETN( ord->rw_bitset, q->bit );
1252 3081981 : }
1253 13581939 : return cumulative_penalty;
1254 13581939 : }
1255 :
1256 : int
1257 : fd_pack_insert_txn_fini( fd_pack_t * pack,
1258 : fd_txn_e_t * txne,
1259 : ulong expires_at,
1260 13581516 : ulong * delete_cnt ) {
1261 13581516 : *delete_cnt = 0UL;
1262 :
1263 13581516 : fd_pack_ord_txn_t * ord = (fd_pack_ord_txn_t *)txne;
1264 :
1265 13581516 : fd_txn_t * txn = TXN(txne->txnp);
1266 13581516 : uchar * payload = txne->txnp->payload;
1267 :
1268 13581516 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, payload );
1269 : /* alt_adj is the pointer to the ALT expansion, adjusted so that if
1270 : account address n is the first that comes from the ALT, it can be
1271 : accessed with adj_lut[n]. */
1272 13581516 : fd_acct_addr_t const * alt_adj = ord->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1273 :
1274 13581516 : ord->expires_at = expires_at;
1275 :
1276 13581516 : int est_result = fd_pack_estimate_rewards_and_compute( txne, ord );
1277 13581516 : if( FD_UNLIKELY( !est_result ) ) REJECT( ESTIMATION_FAIL );
1278 13581513 : int is_vote = est_result==1;
1279 :
1280 13581513 : int nonce_result = fd_pack_validate_durable_nonce( txne );
1281 13581513 : if( FD_UNLIKELY( !nonce_result ) ) REJECT( INVALID_NONCE );
1282 13581510 : int is_durable_nonce = nonce_result==2;
1283 13581510 : ord->txn->flags &= ~FD_TXN_P_FLAGS_DURABLE_NONCE;
1284 13581510 : ord->txn->flags |= fd_uint_if( is_durable_nonce, FD_TXN_P_FLAGS_DURABLE_NONCE, 0U );
1285 :
1286 13581510 : int validation_result = validate_transaction( pack, ord, txn, accts, alt_adj, !!pack->bundle_meta_sz );
1287 13581510 : if( FD_UNLIKELY( validation_result ) ) {
1288 99 : trp_pool_ele_release( pack->pool, ord );
1289 99 : return validation_result;
1290 99 : }
1291 :
1292 : /* Reject any transactions that have already expired */
1293 13581411 : if( FD_UNLIKELY( expires_at<pack->expire_before ) ) REJECT( EXPIRED );
1294 :
1295 13581399 : int replaces = 0;
1296 : /* If it's a durable nonce and we already have one, delete one or the
1297 : other. */
1298 13581399 : if( FD_UNLIKELY( is_durable_nonce ) ) {
1299 120 : fd_pack_ord_txn_t * same_nonce = noncemap_ele_query( pack->noncemap, txne, NULL, pack->pool );
1300 120 : if( FD_LIKELY( same_nonce ) ) { /* Seems like most nonce transactions are effectively duplicates */
1301 9 : if( FD_LIKELY( same_nonce->root == FD_ORD_TXN_ROOT_PENDING_BUNDLE || COMPARE_WORSE( ord, same_nonce ) ) ) REJECT( NONCE_PRIORITY );
1302 3 : ulong _delete_cnt = delete_transaction( pack, same_nonce, 0, 0 ); /* Not a bundle, so delete_full_bundle is 0 */
1303 3 : *delete_cnt += _delete_cnt;
1304 3 : replaces = 1;
1305 3 : }
1306 120 : }
1307 :
1308 13581393 : if( FD_UNLIKELY( pack->pending_txn_cnt == pack->pack_depth ) ) {
1309 494592 : float threshold_score = (float)ord->rewards/(float)ord->compute_est;
1310 494592 : ulong _delete_cnt = delete_worst( pack, threshold_score, is_vote );
1311 494592 : *delete_cnt += _delete_cnt;
1312 494592 : if( FD_UNLIKELY( !_delete_cnt ) ) REJECT( PRIORITY );
1313 494592 : replaces = 1;
1314 494592 : }
1315 :
1316 13581393 : ord->txn->flags &= ~(FD_TXN_P_FLAGS_BUNDLE | FD_TXN_P_FLAGS_INITIALIZER_BUNDLE);
1317 13581393 : ord->skip = FD_PACK_SKIP_CNT;
1318 :
1319 : /* At this point, we know we have space to insert the transaction and
1320 : we've committed to insert it. */
1321 :
1322 : /* Since the pool uses ushorts, the size of the pool is < USHORT_MAX.
1323 : Each transaction can reference an account at most once, which means
1324 : that the total number of references for an account is < USHORT_MAX.
1325 : If these were ulongs, the array would be 512B, which is kind of a
1326 : lot to zero out.*/
1327 13581393 : ushort penalties[ FD_TXN_ACCT_ADDR_MAX ] = {0};
1328 13581393 : uchar penalty_idx[ FD_TXN_ACCT_ADDR_MAX ];
1329 13581393 : ulong cumulative_penalty = populate_bitsets( pack, ord, penalties, penalty_idx );
1330 :
1331 13581393 : treap_t * insert_into = pack->pending;
1332 :
1333 13581393 : if( FD_UNLIKELY( cumulative_penalty && !is_vote ) ) { /* Optimize for high parallelism case */
1334 : /* Compute a weighted random choice */
1335 304959 : ulong roll = (ulong)fd_rng_uint_roll( pack->rng, (uint)cumulative_penalty ); /* cumulative_penalty < USHORT_MAX*64 < UINT_MAX */
1336 304959 : ulong i = 0UL;
1337 : /* Find the right one. This can be done in O(log N), but I imagine
1338 : N is normally so small that doesn't matter. */
1339 758568 : while( roll>=penalties[i] ) roll -= (ulong)penalties[i++];
1340 :
1341 304959 : fd_acct_addr_t penalty_acct = *ACCT_IDX_TO_PTR( penalty_idx[i] );
1342 304959 : fd_pack_penalty_treap_t * q = penalty_map_query( pack->penalty_treaps, penalty_acct, NULL );
1343 304959 : if( FD_UNLIKELY( q==NULL ) ) {
1344 2901 : q = penalty_map_insert( pack->penalty_treaps, penalty_acct );
1345 2901 : treap_new( q->penalty_treap, pack->pack_depth );
1346 2901 : }
1347 304959 : insert_into = q->penalty_treap;
1348 304959 : ord->root = FD_ORD_TXN_ROOT_PENALTY( penalty_idx[i] );
1349 13276434 : } else {
1350 13276434 : ord->root = fd_int_if( is_vote, FD_ORD_TXN_ROOT_PENDING_VOTE, FD_ORD_TXN_ROOT_PENDING );
1351 :
1352 13276434 : fd_pack_smallest_t * smallest = fd_ptr_if( is_vote, &pack->pending_votes_smallest[0], pack->pending_smallest );
1353 13276434 : smallest->cus = fd_ulong_min( smallest->cus, ord->compute_est );
1354 13276434 : smallest->bytes = fd_ulong_min( smallest->bytes, txne->txnp->payload_sz );
1355 13276434 : }
1356 :
1357 13581393 : pack->pending_txn_cnt++;
1358 :
1359 13581393 : sig2txn_ele_insert( pack->signature_map, ord, pack->pool );
1360 :
1361 13581393 : if( FD_UNLIKELY( is_durable_nonce ) ) noncemap_ele_insert( pack->noncemap, ord, pack->pool );
1362 :
1363 13581393 : fd_pack_expq_t temp[ 1 ] = {{ .expires_at = expires_at, .txn = ord }};
1364 13581393 : expq_insert( pack->expiration_q, temp );
1365 :
1366 13581393 : if( FD_LIKELY( is_vote ) ) insert_into = pack->pending_votes;
1367 :
1368 13581393 : treap_ele_insert( insert_into, ord, pack->pool );
1369 13581393 : return (is_vote) | (replaces<<1) | (is_durable_nonce<<2);
1370 13581393 : }
1371 : #undef REJECT
1372 :
1373 : fd_txn_e_t * const *
1374 : fd_pack_insert_bundle_init( fd_pack_t * pack,
1375 : fd_txn_e_t * * bundle,
1376 378 : ulong txn_cnt ) {
1377 378 : FD_TEST( txn_cnt<=FD_PACK_MAX_TXN_PER_BUNDLE );
1378 378 : FD_TEST( trp_pool_free( pack->pool )>=txn_cnt );
1379 1962 : for( ulong i=0UL; i<txn_cnt; i++ ) bundle[ i ] = trp_pool_ele_acquire( pack->pool )->txn_e;
1380 378 : return bundle;
1381 378 : }
1382 :
1383 : void
1384 : fd_pack_insert_bundle_cancel( fd_pack_t * pack,
1385 : fd_txn_e_t * const * bundle,
1386 246 : ulong txn_cnt ) {
1387 : /* There's no real reason these have to be released in reverse, but it
1388 : seems fitting to release them in the opposite order they were
1389 : acquired. */
1390 1284 : for( ulong i=0UL; i<txn_cnt; i++ ) trp_pool_ele_release( pack->pool, (fd_pack_ord_txn_t*)bundle[ txn_cnt-1UL-i ] );
1391 246 : }
1392 :
1393 : /* Explained below */
1394 : #define BUNDLE_L_PRIME 37896771UL
1395 : #define BUNDLE_N 312671UL
1396 147 : #define RC_TO_REL_BUNDLE_IDX( r, c ) (BUNDLE_N - ((ulong)(r) * 1UL<<32)/((ulong)(c) * BUNDLE_L_PRIME))
1397 :
1398 : int
1399 : fd_pack_insert_bundle_fini( fd_pack_t * pack,
1400 : fd_txn_e_t * const * bundle,
1401 : ulong txn_cnt,
1402 : ulong expires_at,
1403 : int initializer_bundle,
1404 : void const * bundle_meta,
1405 378 : ulong * delete_cnt ) {
1406 :
1407 378 : int err = 0;
1408 378 : *delete_cnt = 0UL;
1409 :
1410 378 : ulong pending_b_txn_cnt = treap_ele_cnt( pack->pending_bundles );
1411 : /* We want to prevent bundles from consuming the whole treap, but in
1412 : general, we assume bundles are lucrative. We'll set the policy
1413 : on capping bundles at half of the pack depth. We assume that the
1414 : bundles are coming in a pre-prioritized order, so it doesn't make
1415 : sense to drop an earlier bundle for this one. That means that
1416 : really, the best thing to do is drop this one. */
1417 378 : if( FD_UNLIKELY( (!initializer_bundle)&(pending_b_txn_cnt+txn_cnt>pack->pack_depth/2UL) ) ) err = FD_PACK_INSERT_REJECT_PRIORITY;
1418 :
1419 378 : if( FD_UNLIKELY( expires_at<pack->expire_before ) ) err = FD_PACK_INSERT_REJECT_EXPIRED;
1420 :
1421 :
1422 378 : int replaces = 0;
1423 378 : ulong nonce_txn_cnt = 0UL;
1424 :
1425 : /* Collect nonce hashes to detect duplicate nonces.
1426 : Use a constant-time duplicate-detection algorithm -- Vacant entries
1427 : have the MSB set, occupied entries are the noncemap hash, with the
1428 : MSB set to 0. */
1429 378 : ulong nonce_hash63[ FD_PACK_MAX_TXN_PER_BUNDLE ];
1430 2268 : for( ulong i=0UL; i<FD_PACK_MAX_TXN_PER_BUNDLE; i++ ) {
1431 1890 : nonce_hash63[ i ] = ULONG_MAX-i;
1432 1890 : }
1433 :
1434 1956 : for( ulong i=0UL; (i<txn_cnt) && !err; i++ ) {
1435 1581 : fd_pack_ord_txn_t * ord = (fd_pack_ord_txn_t *)bundle[ i ];
1436 :
1437 1581 : fd_txn_t const * txn = TXN(bundle[ i ]->txnp);
1438 1581 : uchar const * payload = bundle[ i ]->txnp->payload;
1439 :
1440 1581 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, payload );
1441 1581 : fd_acct_addr_t const * alt_adj = ord->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1442 :
1443 1581 : int est_result = fd_pack_estimate_rewards_and_compute( bundle[ i ], ord );
1444 1581 : if( FD_UNLIKELY( !est_result ) ) { err = FD_PACK_INSERT_REJECT_ESTIMATION_FAIL; break; }
1445 1581 : int nonce_result = fd_pack_validate_durable_nonce( ord->txn_e );
1446 1581 : if( FD_UNLIKELY( !nonce_result ) ) { err = FD_PACK_INSERT_REJECT_INVALID_NONCE; break; }
1447 1581 : int is_durable_nonce = nonce_result==2;
1448 1581 : nonce_txn_cnt += !!is_durable_nonce;
1449 :
1450 1581 : bundle[ i ]->txnp->flags |= FD_TXN_P_FLAGS_BUNDLE;
1451 1581 : bundle[ i ]->txnp->flags &= ~(FD_TXN_P_FLAGS_INITIALIZER_BUNDLE | FD_TXN_P_FLAGS_DURABLE_NONCE);
1452 1581 : bundle[ i ]->txnp->flags |= fd_uint_if( initializer_bundle, FD_TXN_P_FLAGS_INITIALIZER_BUNDLE, 0U );
1453 1581 : bundle[ i ]->txnp->flags |= fd_uint_if( is_durable_nonce, FD_TXN_P_FLAGS_DURABLE_NONCE, 0U );
1454 1581 : ord->skip = FD_PACK_SKIP_CNT;
1455 1581 : ord->expires_at = expires_at;
1456 :
1457 1581 : if( FD_UNLIKELY( is_durable_nonce ) ) {
1458 1032 : nonce_hash63[ i ] = noncemap_key_hash( ord->txn_e, pack->noncemap->seed ) & 0x7FFFFFFFFFFFFFFFUL;
1459 1032 : fd_pack_ord_txn_t * same_nonce = noncemap_ele_query( pack->noncemap, ord->txn_e, NULL, pack->pool );
1460 1032 : if( FD_LIKELY( same_nonce ) ) {
1461 : /* bundles take priority over non-bundles, and earlier bundles
1462 : take priority over later bundles. */
1463 6 : if( FD_UNLIKELY( same_nonce->txn->flags & FD_TXN_P_FLAGS_BUNDLE ) ) {
1464 3 : err = FD_PACK_INSERT_REJECT_NONCE_PRIORITY;
1465 3 : break;
1466 3 : } else {
1467 3 : ulong _delete_cnt = delete_transaction( pack, same_nonce, 0, 0 );
1468 3 : *delete_cnt += _delete_cnt;
1469 3 : replaces = 1;
1470 3 : }
1471 6 : }
1472 1032 : }
1473 :
1474 1578 : int validation_result = validate_transaction( pack, ord, txn, accts, alt_adj, !initializer_bundle );
1475 1578 : if( FD_UNLIKELY( validation_result ) ) { err = validation_result; break; }
1476 1578 : }
1477 :
1478 378 : if( FD_UNLIKELY( err ) ) {
1479 3 : fd_pack_insert_bundle_cancel( pack, bundle, txn_cnt );
1480 3 : return err;
1481 3 : }
1482 :
1483 375 : if( FD_UNLIKELY( initializer_bundle && pending_b_txn_cnt>0UL ) ) {
1484 0 : treap_rev_iter_t _cur=treap_rev_iter_init( pack->pending_bundles, pack->pool );
1485 0 : FD_TEST( !treap_rev_iter_done( _cur ) );
1486 0 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pack->pool );
1487 0 : int is_ib = !!(cur->txn->flags & FD_TXN_P_FLAGS_INITIALIZER_BUNDLE);
1488 :
1489 : /* Delete the previous IB if there is one */
1490 0 : if( FD_UNLIKELY( is_ib && 0UL==RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est ) ) ) {
1491 0 : ulong _delete_cnt = delete_transaction( pack, cur, 1, 0 );
1492 0 : *delete_cnt += _delete_cnt;
1493 0 : }
1494 0 : }
1495 :
1496 384 : while( FD_UNLIKELY( pack->pending_txn_cnt+txn_cnt > pack->pack_depth ) ) {
1497 9 : ulong _delete_cnt = delete_worst( pack, FLT_MAX, 0 );
1498 9 : *delete_cnt += _delete_cnt;
1499 9 : if( FD_UNLIKELY( !_delete_cnt ) ) {
1500 0 : fd_pack_insert_bundle_cancel( pack, bundle, txn_cnt );
1501 0 : return FD_PACK_INSERT_REJECT_PRIORITY;
1502 0 : }
1503 9 : replaces = 1;
1504 9 : }
1505 :
1506 375 : if( FD_UNLIKELY( !pending_b_txn_cnt ) ) {
1507 375 : pack->relative_bundle_idx = 1UL;
1508 375 : }
1509 :
1510 375 : if( FD_LIKELY( bundle_meta ) ) {
1511 0 : memcpy( (uchar *)pack->bundle_meta + (ulong)((fd_pack_ord_txn_t *)bundle[0]-pack->pool)*pack->bundle_meta_sz, bundle_meta, pack->bundle_meta_sz );
1512 0 : }
1513 :
1514 375 : if( FD_UNLIKELY( nonce_txn_cnt>1UL ) ) {
1515 : /* Do a ILP-friendly duplicate detect, naive O(n^2) algo. With max
1516 : 5 txns per bundle, this requires 10 comparisons. ~ 25 cycle. */
1517 375 : uint conflict_detected = 0u;
1518 1875 : for( ulong i=0UL; i<FD_PACK_MAX_TXN_PER_BUNDLE-1; i++ ) {
1519 5250 : for( ulong j=i+1; j<FD_PACK_MAX_TXN_PER_BUNDLE; j++ ) {
1520 3750 : ulong const ele_i = nonce_hash63[ i ];
1521 3750 : ulong const ele_j = nonce_hash63[ j ];
1522 3750 : conflict_detected |= (ele_i==ele_j);
1523 3750 : }
1524 1500 : }
1525 375 : if( FD_UNLIKELY( conflict_detected ) ) {
1526 243 : fd_pack_insert_bundle_cancel( pack, bundle, txn_cnt );
1527 243 : return FD_PACK_INSERT_REJECT_NONCE_CONFLICT;
1528 243 : }
1529 375 : }
1530 :
1531 : /* We put bundles in a treap just like all the other transactions, but
1532 : we actually want to sort them in a very specific order; the order
1533 : within the bundle is determined at bundle creation time, and the
1534 : order among the bundles is FIFO. However, it's going to be a pain
1535 : to use a different sorting function for this treap, since it's
1536 : fixed as part of the treap creation for performance. Don't fear
1537 : though; we can pull a cool math trick out of the bag to shoehorn
1538 : the order we'd like into the sort function we need, and to get even
1539 : more.
1540 :
1541 : Recall that the sort function is r_i/c_i, smallest to largest,
1542 : where r_i is the rewards and c_i is the cost units. r_i and c_i
1543 : are both uints, and the comparison is done by cross-multiplication
1544 : as ulongs. We actually use the c_i value for testing if
1545 : transactions fit, etc. so let's assume that's fixed, and we know
1546 : it's in the range [1020, 1,556,782].
1547 :
1548 : This means, if c_0, c_1, ... c_4 are the CU costs of the
1549 : transactions in the first bundle, we require r_0/c_0 > r_1/c_1 >
1550 : ... > r_4/c_4. Then, if c_5, ... c_9 are the CU costs of the
1551 : transactions in the second bundle, we also require that r_4/c_4 >
1552 : r_5/c_5. For convenience, we'll impose a slightly stronger
1553 : constraint: we want the kth bundle to obey L*(N-k) <= r_i/c_i <
1554 : L*(N+1-k), for fixed constants L and N, real and integer,
1555 : respectively, that we'll determine. For example, this means r_4/c_4
1556 : >= L*N > r_5/c_5. This enables us to group the transactions in the
1557 : same bundle more easily.
1558 :
1559 : For convenience in the math below, we'll set j=N-k and relabel the
1560 : transactions from the jth bundle c_0, ... c_4.
1561 : From above, we know that Lj <= r_4/c_4. We'd like to make it as
1562 : close as possible given that r_4 is an integers. Thus, put
1563 : r_4 = ceil( c_4 * Lj ). r_4 is clearly an integer, and it satisfies
1564 : the required inequality because:
1565 : r_4/c_4 = ceil( c_4 * Lj)/c_4 >= c_4*Lj / c_4 >= Lj.
1566 :
1567 : Following in the same spirit, put r_3 = ceil( c_3 * (r_4+1)/c_4 ).
1568 : Again, r_3 is clearly an integer, and
1569 : r_3/c_3 = ceil(c_3*(r_4+1)/c_4)/c_3
1570 : >= (c_3*(r_4+1))/(c_3 * c_4)
1571 : >= r_4/c_4 + 1/c_4
1572 : > r_4/c_4.
1573 : Following the pattern, we put
1574 : r_2 = ceil( c_2 * (r_3+1)/c_3 )
1575 : r_1 = ceil( c_1 * (r_2+1)/c_2 )
1576 : r_0 = ceil( c_0 * (r_1+1)/c_1 )
1577 : which work for the same reason that as r_3.
1578 :
1579 : We now need for r_0 to satisfy the final inequality with L, and
1580 : we'll use this to guide our choice of L. Theoretically, r_0 can be
1581 : expressed in terms of L, j, and c_0, ... c_4, but that's a truly
1582 : inscrutible expression. Instead, we need some bounds so we can get
1583 : rid of all the ceil using the property that x <= ceil(x) < x+1.
1584 : c_4 * Lj <= r_4 < c_4 * Lj + 1
1585 : The lower bound on r_3 is easy:
1586 : r_3 >= c_3 * (c_4 * Lj + 1)/c_4 = c_3 * Lj + c_3/c_4
1587 : For the upper bound,
1588 : r_3 < 1 + c_3*(r_4+1)/c_4 < 1 + c_3*(c_4*Lj+1 + 1)/c_4
1589 : = 1 + c_3 * Lj + 2*c_3/c_4
1590 : Continuing similarly gives
1591 : c_2*Lj + c_2/c_3 + c_2/c_4 <= r_2
1592 : c_1*Lj + c_1/c_2 + c_1/c_c + c_1/c_4 <= r_1
1593 : c_0*Lj + c_0/c_1 + c_0/c_2 + c_0/c_3 + c_0/c_4 <= r_0
1594 : and
1595 : r_2 < 1 + c_2*Lj + 2c_2/c_3 + 2c_2/c_4
1596 : r_1 < 1 + c_1*Lj + 2c_1/c_2 + 2c_1/c_3 + 2c_1/c_4
1597 : r_0 < 1 + c_0*Lj + 2c_0/c_1 + 2c_0/c_2 + 2c_0/c_3 + 2c_0/c_4.
1598 :
1599 : Setting L(j+1)>=(1 + c_0*Lj+2c_0/c_1+2c_0/c_2+2c_0/c_3+2c_0/c_4)/c_0
1600 : is then sufficient to ensure the whole sequence of 5 fits between Lj
1601 : and L(j+1). Simplifying gives
1602 : L<= 1/c_0 + 2/c_1 + 2/c_2 + 2/c_3 + 2/c_4
1603 : but L must be a constant and not depend on individual values of c_i,
1604 : so, given that c_i >= 1020, we set L = 9/1020.
1605 :
1606 : Now all that remains is to determine N. It's a bit unfortunate
1607 : that we require N, since it limits our capacity, but it's necessary
1608 : in any system that tries to compute priorities to enforce a FIFO
1609 : order. If we've inserted more than N bundles without ever having
1610 : the bundle treap go empty, we'll briefly break the FIFO ordering as
1611 : we underflow.
1612 :
1613 : Thus, we'd like to make N as big as possible, avoiding overflow.
1614 : r_0, ..., r_4 are all uints, and taking the bounds from above,
1615 : given that for any i, i' c_i/c_{i'} < 1527, we have
1616 : r_i < 1 + 1556782 * Lj + 8*1527.
1617 : To avoid overflow, we assert the right-hand side is < 2^32, which
1618 : implies N <= 312671.
1619 :
1620 : We want to use a fixed point representation for L so that the
1621 : entire computation can be done with integer arithmetic. We can do
1622 : the arithmetic as ulongs, which means defining L' >= L * 2^s, and
1623 : we compute ceil( c_4*Lj ) as floor( (c_4 * L' * j + 2^s - 1)/2^s ),
1624 : so c_4 * L' * j + 2^s should fit in a ulong. With j<=N, this gives
1625 : s<=32, so we set s=32, which means L' = 37896771 >= 9/1020 * 2^32.
1626 : Note that 1 + 1556782 * L' * N + 8*1527 + 2^32 is approximately
1627 : 2^63.999993.
1628 :
1629 : Note that this is all checked by a proof of the code translated
1630 : into Z3. Unfortunately CBMC was too slow to prove this code
1631 : directly. */
1632 279 : #define BUNDLE_L_PRIME 37896771UL
1633 279 : #define BUNDLE_N 312671UL
1634 :
1635 132 : if( FD_UNLIKELY( pack->relative_bundle_idx>BUNDLE_N ) ) {
1636 0 : FD_LOG_WARNING(( "Too many bundles inserted without allowing pending bundles to go empty. "
1637 0 : "Ordering of bundles may be incorrect." ));
1638 0 : pack->relative_bundle_idx = 1UL;
1639 0 : }
1640 132 : ulong bundle_idx = fd_ulong_if( initializer_bundle, 0UL, pack->relative_bundle_idx );
1641 132 : insert_bundle_impl( pack, bundle_idx, txn_cnt, (fd_pack_ord_txn_t * *)bundle, expires_at );
1642 : /* if IB this is max( 1, x ), which is x. Otherwise, this is max(x,
1643 : x+1) which is x++ */
1644 132 : pack->relative_bundle_idx = fd_ulong_max( bundle_idx+1UL, pack->relative_bundle_idx );
1645 :
1646 132 : return (0) | (replaces<<1) | ((!!nonce_txn_cnt)<<2);
1647 375 : }
1648 : static inline void
1649 : insert_bundle_impl( fd_pack_t * pack,
1650 : ulong bundle_idx,
1651 : ulong txn_cnt,
1652 : fd_pack_ord_txn_t * * bundle,
1653 132 : ulong expires_at ) {
1654 132 : ulong prev_reward = ((BUNDLE_L_PRIME * (BUNDLE_N - bundle_idx))) - 1UL;
1655 132 : ulong prev_cost = 1UL<<32;
1656 :
1657 : /* Assign last to first */
1658 678 : for( ulong i=0UL; i<txn_cnt; i++ ) {
1659 546 : fd_pack_ord_txn_t * ord = bundle[ txn_cnt-1UL - i ];
1660 546 : ord->rewards = (uint)(((ulong)ord->compute_est * (prev_reward + 1UL) + prev_cost-1UL)/prev_cost);
1661 546 : ord->root = FD_ORD_TXN_ROOT_PENDING_BUNDLE;
1662 546 : prev_reward = ord->rewards;
1663 546 : prev_cost = ord->compute_est;
1664 :
1665 : /* The penalty information isn't used for bundles. */
1666 546 : ushort penalties [ FD_TXN_ACCT_ADDR_MAX ];
1667 546 : uchar penalty_idx[ FD_TXN_ACCT_ADDR_MAX ];
1668 546 : populate_bitsets( pack, ord, penalties, penalty_idx );
1669 :
1670 546 : treap_ele_insert( pack->pending_bundles, ord, pack->pool );
1671 546 : pack->pending_txn_cnt++;
1672 :
1673 546 : if( FD_UNLIKELY( ord->txn->flags & FD_TXN_P_FLAGS_DURABLE_NONCE ) ) noncemap_ele_insert( pack->noncemap, ord, pack->pool );
1674 546 : sig2txn_ele_insert( pack->signature_map, ord, pack->pool );
1675 :
1676 546 : fd_pack_expq_t temp[ 1 ] = {{ .expires_at = expires_at, .txn = ord }};
1677 546 : expq_insert( pack->expiration_q, temp );
1678 546 : }
1679 :
1680 132 : }
1681 :
1682 : void const *
1683 0 : fd_pack_peek_bundle_meta( fd_pack_t const * pack ) {
1684 0 : int ib_state = pack->initializer_bundle_state;
1685 0 : if( FD_UNLIKELY( (ib_state==FD_PACK_IB_STATE_PENDING) | (ib_state==FD_PACK_IB_STATE_FAILED) ) ) return NULL;
1686 :
1687 0 : treap_rev_iter_t _cur=treap_rev_iter_init( pack->pending_bundles, pack->pool );
1688 0 : if( FD_UNLIKELY( treap_rev_iter_done( _cur ) ) ) return NULL; /* empty */
1689 :
1690 0 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pack->pool );
1691 0 : int is_ib = !!(cur->txn->flags & FD_TXN_P_FLAGS_INITIALIZER_BUNDLE);
1692 0 : if( FD_UNLIKELY( is_ib ) ) return NULL;
1693 :
1694 0 : return (void const *)((uchar const *)pack->bundle_meta + (ulong)_cur * pack->bundle_meta_sz);
1695 0 : }
1696 :
1697 : void
1698 3 : fd_pack_set_initializer_bundles_ready( fd_pack_t * pack ) {
1699 3 : pack->initializer_bundle_state = FD_PACK_IB_STATE_READY;
1700 3 : }
1701 :
1702 : void
1703 752907 : fd_pack_metrics_write( fd_pack_t const * pack ) {
1704 752907 : ulong pending_regular = treap_ele_cnt( pack->pending );
1705 752907 : ulong pending_votes = treap_ele_cnt( pack->pending_votes );
1706 752907 : ulong pending_bundle = treap_ele_cnt( pack->pending_bundles );
1707 752907 : ulong conflicting = pack->pending_txn_cnt - pending_votes - pending_bundle - treap_ele_cnt( pack->pending );
1708 752907 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_ALL, pack->pending_txn_cnt );
1709 752907 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_REGULAR, pending_regular );
1710 752907 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_VOTES, pending_votes );
1711 752907 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_CONFLICTING, conflicting );
1712 752907 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_BUNDLES, pending_bundle );
1713 752907 : FD_MGAUGE_SET( PACK, SMALLEST_PENDING_TRANSACTION, pack->pending_smallest->cus );
1714 :
1715 752907 : FD_MCNT_ENUM_COPY( PACK, TRANSACTION_SCHEDULE, pack->sched_results );
1716 752907 : }
1717 :
1718 : void
1719 0 : fd_pack_get_sched_metrics( fd_pack_t const * pack, ulong * metrics ) {
1720 0 : fd_memcpy( metrics, pack->sched_results, sizeof(pack->sched_results) );
1721 0 : }
1722 :
1723 : typedef struct {
1724 : ushort clear_rw_bit;
1725 : ushort clear_w_bit;
1726 : } release_result_t;
1727 :
1728 : static inline release_result_t
1729 : release_bit_reference( fd_pack_t * pack,
1730 17844495 : fd_acct_addr_t const * acct ) {
1731 :
1732 17844495 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, *acct, NULL );
1733 17844495 : FD_TEST( q ); /* q==NULL not be possible */
1734 :
1735 17844495 : q->ref_cnt--;
1736 :
1737 17844495 : if( FD_UNLIKELY( q->ref_cnt==0UL ) ) {
1738 13305680 : ushort bit = q->bit;
1739 13305680 : bitset_map_remove( pack->acct_to_bitset, q );
1740 13305680 : if( FD_LIKELY( bit<FD_PACK_BITSET_MAX ) ) pack->bitset_avail[ ++(pack->bitset_avail_cnt) ] = bit;
1741 :
1742 13305680 : fd_pack_addr_use_t * use = acct_uses_query( pack->acct_in_use, *acct, NULL );
1743 13305680 : if( FD_LIKELY( use ) ) {
1744 12809616 : use->in_use_by |= FD_PACK_IN_USE_BIT_CLEARED;
1745 12809616 : release_result_t ret = { .clear_rw_bit = bit,
1746 12809616 : .clear_w_bit = fd_ushort_if( !!(use->in_use_by & FD_PACK_IN_USE_WRITABLE), bit, FD_PACK_BITSET_MAX ) };
1747 12809616 : return ret;
1748 12809616 : }
1749 13305680 : }
1750 5034879 : release_result_t ret = { .clear_rw_bit = FD_PACK_BITSET_MAX, .clear_w_bit = FD_PACK_BITSET_MAX };
1751 5034879 : return ret;
1752 17844495 : }
1753 :
1754 : typedef struct {
1755 : ulong cus_scheduled;
1756 : ulong txns_scheduled;
1757 : ulong bytes_scheduled;
1758 : } sched_return_t;
1759 :
1760 : static inline sched_return_t
1761 : fd_pack_schedule_impl( fd_pack_t * pack,
1762 : treap_t * sched_from,
1763 : ulong cu_limit,
1764 : ulong txn_limit,
1765 : ulong byte_limit,
1766 : ulong bank_tile,
1767 : fd_pack_smallest_t * smallest_in_treap,
1768 : ulong * use_by_bank_txn,
1769 1505721 : fd_txn_p_t * out ) {
1770 :
1771 1505721 : fd_pack_ord_txn_t * pool = pack->pool;
1772 1505721 : fd_pack_addr_use_t * acct_in_use = pack->acct_in_use;
1773 1505721 : fd_pack_addr_use_t * writer_costs = pack->writer_costs;
1774 :
1775 1505721 : fd_pack_addr_use_t ** written_list = pack->written_list;
1776 1505721 : ulong written_list_cnt = pack->written_list_cnt;
1777 1505721 : ulong written_list_max = pack->written_list_max;
1778 :
1779 1505721 : FD_PACK_BITSET_DECLARE( bitset_rw_in_use );
1780 1505721 : FD_PACK_BITSET_DECLARE( bitset_w_in_use );
1781 1505721 : FD_PACK_BITSET_COPY( bitset_rw_in_use, pack->bitset_rw_in_use );
1782 1505721 : FD_PACK_BITSET_COPY( bitset_w_in_use, pack->bitset_w_in_use );
1783 :
1784 1505721 : fd_pack_addr_use_t * use_by_bank = pack->use_by_bank [bank_tile];
1785 1505721 : ulong use_by_bank_cnt = pack->use_by_bank_cnt[bank_tile];
1786 :
1787 1505721 : ulong max_write_cost_per_acct = pack->lim->max_write_cost_per_acct;
1788 :
1789 1505721 : ushort compressed_slot_number = pack->compressed_slot_number;
1790 :
1791 1505721 : ulong txns_scheduled = 0UL;
1792 1505721 : ulong cus_scheduled = 0UL;
1793 1505721 : ulong bytes_scheduled = 0UL;
1794 :
1795 1505721 : ulong bank_tile_mask = 1UL << bank_tile;
1796 :
1797 1505721 : ulong fast_path = 0UL;
1798 1505721 : ulong slow_path = 0UL;
1799 1505721 : ulong cu_limit_c = 0UL;
1800 1505721 : ulong byte_limit_c = 0UL;
1801 1505721 : ulong write_limit_c = 0UL;
1802 1505721 : ulong skip_c = 0UL;
1803 :
1804 1505721 : ulong min_cus = ULONG_MAX;
1805 1505721 : ulong min_bytes = ULONG_MAX;
1806 :
1807 1505721 : if( FD_UNLIKELY( (cu_limit<smallest_in_treap->cus) | (txn_limit==0UL) | (byte_limit<smallest_in_treap->bytes) ) ) {
1808 814176 : sched_return_t to_return = { .cus_scheduled = 0UL, .txns_scheduled = 0UL, .bytes_scheduled = 0UL };
1809 814176 : return to_return;
1810 814176 : }
1811 :
1812 691545 : treap_rev_iter_t prev = treap_idx_null();
1813 23924139 : for( treap_rev_iter_t _cur=treap_rev_iter_init( sched_from, pool ); !treap_rev_iter_done( _cur ); _cur=prev ) {
1814 : /* Capture next so that we can delete while we iterate. */
1815 23838804 : prev = treap_rev_iter_next( _cur, pool );
1816 :
1817 23838804 : # if FD_HAS_X86
1818 23838804 : _mm_prefetch( &(pool[ prev ].prev), _MM_HINT_T0 );
1819 23838804 : # endif
1820 :
1821 23838804 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
1822 :
1823 23838804 : min_cus = fd_ulong_min( min_cus, cur->compute_est );
1824 23838804 : min_bytes = fd_ulong_min( min_bytes, cur->txn->payload_sz );
1825 :
1826 23838804 : ulong conflicts = 0UL;
1827 :
1828 23838804 : if( FD_UNLIKELY( cur->compute_est>cu_limit ) ) {
1829 : /* Too big to be scheduled at the moment, but might be okay for
1830 : the next microblock, so we don't want to delay it. */
1831 0 : cu_limit_c++;
1832 0 : continue;
1833 0 : }
1834 :
1835 : /* Likely? Unlikely? */
1836 23838804 : if( FD_LIKELY( !FD_PACK_BITSET_INTERSECT4_EMPTY( bitset_rw_in_use, bitset_w_in_use, cur->w_bitset, cur->rw_bitset ) ) ) {
1837 10752822 : fast_path++;
1838 10752822 : continue;
1839 10752822 : }
1840 :
1841 13085982 : if( FD_UNLIKELY( cur->skip==compressed_slot_number ) ) {
1842 0 : skip_c++;
1843 0 : continue;
1844 0 : }
1845 :
1846 : /* If skip>FD_PACK_MAX_SKIP but not compressed_slot_number, it means
1847 : it's the compressed slot number of a previous slot. We don't
1848 : care unless we're going to update the value though, so we don't
1849 : need to eagerly reset it to FD_PACK_MAX_SKIP.
1850 : compressed_slot_number is a ushort, so it's possible for it to
1851 : roll over, but the transaction lifetime is much shorter than
1852 : that, so it won't be a problem. */
1853 :
1854 13085982 : if( FD_UNLIKELY( cur->txn->payload_sz>byte_limit ) ) {
1855 6 : byte_limit_c++;
1856 6 : continue;
1857 6 : }
1858 :
1859 :
1860 13085976 : fd_txn_t const * txn = TXN(cur->txn);
1861 13085976 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, cur->txn->payload );
1862 13085976 : fd_acct_addr_t const * alt_adj = cur->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1863 : /* Check conflicts between this transaction's writable accounts and
1864 : current readers */
1865 13085976 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
1866 27342129 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1867 :
1868 14256156 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
1869 :
1870 14256156 : fd_pack_addr_use_t * in_wcost_table = acct_uses_query( writer_costs, acct, NULL );
1871 14256156 : if( FD_UNLIKELY( in_wcost_table && in_wcost_table->total_cost+cur->compute_est > max_write_cost_per_acct ) ) {
1872 : /* Can't be scheduled until the next block */
1873 3 : conflicts = ULONG_MAX;
1874 3 : break;
1875 3 : }
1876 :
1877 14256153 : fd_pack_addr_use_t * use = acct_uses_query( acct_in_use, acct, NULL );
1878 14256153 : if( FD_UNLIKELY( use ) ) conflicts |= use->in_use_by; /* break? */
1879 14256153 : }
1880 :
1881 13085976 : if( FD_UNLIKELY( conflicts==ULONG_MAX ) ) {
1882 : /* The logic for how to adjust skip is a bit complicated, and we
1883 : want to do it branchlessly. Let psc=FD_PACK_SKIP_CNT,
1884 : Before After
1885 : 1 compressed_slot_number
1886 : x in [2, psc] x-1
1887 : x where x>psc psc-1
1888 :
1889 : Set A=min(x, 5), B=min(A-2, compressed_slot_number-1), and
1890 : note that compressed_slot_number is in [psc+1, USHORT_MAX].
1891 : Then:
1892 : x A A-2 B B+1
1893 : 1 1 USHORT_MAX csn-1 csn
1894 : x in [2, psc] x x-2 x-2 x-1
1895 : x where x>psc psc psc-2 psc-2 psc-1
1896 : So B+1 is the desired value. */
1897 3 : cur->skip = (ushort)(1+fd_ushort_min( (ushort)(compressed_slot_number-1),
1898 3 : (ushort)(fd_ushort_min( cur->skip, FD_PACK_SKIP_CNT )-2) ) );
1899 3 : write_limit_c++;
1900 3 : continue;
1901 3 : }
1902 :
1903 13085973 : if( FD_UNLIKELY( conflicts ) ) {
1904 6 : slow_path++;
1905 6 : continue;
1906 6 : }
1907 :
1908 : /* Check conflicts between this transaction's readonly accounts and
1909 : current writers */
1910 13085967 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
1911 16657371 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1912 :
1913 3571404 : fd_acct_addr_t const * acct = ACCT_ITER_TO_PTR( iter );
1914 3571404 : if( fd_pack_unwritable_contains( acct ) ) continue; /* No need to track sysvars because they can't be writable */
1915 :
1916 2576115 : fd_pack_addr_use_t * use = acct_uses_query( acct_in_use, *acct, NULL );
1917 2576115 : if( use ) conflicts |= (use->in_use_by & FD_PACK_IN_USE_WRITABLE) ? use->in_use_by : 0UL;
1918 2576115 : }
1919 :
1920 13085967 : if( FD_UNLIKELY( conflicts ) ) {
1921 0 : slow_path++;
1922 0 : continue;
1923 0 : }
1924 :
1925 : /* Include this transaction in the microblock! */
1926 13085967 : FD_PACK_BITSET_OR( bitset_rw_in_use, cur->rw_bitset );
1927 13085967 : FD_PACK_BITSET_OR( bitset_w_in_use, cur->w_bitset );
1928 :
1929 13085967 : if(
1930 4361989 : #if FD_HAS_AVX512 && FD_PACK_USE_NON_TEMPORAL_MEMCPY
1931 4361989 : FD_LIKELY( cur->txn->payload_sz>=1024UL )
1932 : #else
1933 8723978 : 0
1934 8723978 : #endif
1935 13085967 : ) {
1936 4224 : #if FD_HAS_AVX512 && FD_PACK_USE_NON_TEMPORAL_MEMCPY
1937 4224 : _mm512_stream_si512( (void*)(out->payload+ 0UL), _mm512_load_epi64( cur->txn->payload+ 0UL ) );
1938 4224 : _mm512_stream_si512( (void*)(out->payload+ 64UL), _mm512_load_epi64( cur->txn->payload+ 64UL ) );
1939 4224 : _mm512_stream_si512( (void*)(out->payload+ 128UL), _mm512_load_epi64( cur->txn->payload+ 128UL ) );
1940 4224 : _mm512_stream_si512( (void*)(out->payload+ 192UL), _mm512_load_epi64( cur->txn->payload+ 192UL ) );
1941 4224 : _mm512_stream_si512( (void*)(out->payload+ 256UL), _mm512_load_epi64( cur->txn->payload+ 256UL ) );
1942 4224 : _mm512_stream_si512( (void*)(out->payload+ 320UL), _mm512_load_epi64( cur->txn->payload+ 320UL ) );
1943 4224 : _mm512_stream_si512( (void*)(out->payload+ 384UL), _mm512_load_epi64( cur->txn->payload+ 384UL ) );
1944 4224 : _mm512_stream_si512( (void*)(out->payload+ 448UL), _mm512_load_epi64( cur->txn->payload+ 448UL ) );
1945 4224 : _mm512_stream_si512( (void*)(out->payload+ 512UL), _mm512_load_epi64( cur->txn->payload+ 512UL ) );
1946 4224 : _mm512_stream_si512( (void*)(out->payload+ 576UL), _mm512_load_epi64( cur->txn->payload+ 576UL ) );
1947 4224 : _mm512_stream_si512( (void*)(out->payload+ 640UL), _mm512_load_epi64( cur->txn->payload+ 640UL ) );
1948 4224 : _mm512_stream_si512( (void*)(out->payload+ 704UL), _mm512_load_epi64( cur->txn->payload+ 704UL ) );
1949 4224 : _mm512_stream_si512( (void*)(out->payload+ 768UL), _mm512_load_epi64( cur->txn->payload+ 768UL ) );
1950 4224 : _mm512_stream_si512( (void*)(out->payload+ 832UL), _mm512_load_epi64( cur->txn->payload+ 832UL ) );
1951 4224 : _mm512_stream_si512( (void*)(out->payload+ 896UL), _mm512_load_epi64( cur->txn->payload+ 896UL ) );
1952 4224 : _mm512_stream_si512( (void*)(out->payload+ 960UL), _mm512_load_epi64( cur->txn->payload+ 960UL ) );
1953 4224 : _mm512_stream_si512( (void*)(out->payload+1024UL), _mm512_load_epi64( cur->txn->payload+1024UL ) );
1954 4224 : _mm512_stream_si512( (void*)(out->payload+1088UL), _mm512_load_epi64( cur->txn->payload+1088UL ) );
1955 4224 : _mm512_stream_si512( (void*)(out->payload+1152UL), _mm512_load_epi64( cur->txn->payload+1152UL ) );
1956 4224 : _mm512_stream_si512( (void*)(out->payload+1216UL), _mm512_load_epi64( cur->txn->payload+1216UL ) );
1957 : /* Copied out to 1280 bytes, which copies some other fields we needed to
1958 : copy anyway. */
1959 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, payload_sz )+sizeof(((fd_txn_p_t*)NULL)->payload_sz )<=1280UL, nt_memcpy );
1960 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, blockhash_slot )+sizeof(((fd_txn_p_t*)NULL)->blockhash_slot)<=1280UL, nt_memcpy );
1961 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, scheduler_arrival_time_nanos )+sizeof(((fd_txn_p_t*)NULL)->scheduler_arrival_time_nanos )<=1280UL, nt_memcpy );
1962 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, source_tpu )+sizeof(((fd_txn_p_t*)NULL)->source_tpu )<=1280UL, nt_memcpy );
1963 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, source_ipv4 )+sizeof(((fd_txn_p_t*)NULL)->source_ipv4 )<=1280UL, nt_memcpy );
1964 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, flags )+sizeof(((fd_txn_p_t*)NULL)->flags )<=1280UL, nt_memcpy );
1965 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, _ ) <=1280UL, nt_memcpy );
1966 4224 : const ulong offset_into_txn = 1280UL - offsetof(fd_txn_p_t, _ );
1967 4224 : fd_memcpy( offset_into_txn+(uchar *)TXN(out), offset_into_txn+(uchar const *)txn,
1968 4224 : fd_ulong_max( offset_into_txn, fd_txn_footprint( txn->instr_cnt, txn->addr_table_lookup_cnt ) )-offset_into_txn );
1969 4224 : #endif
1970 13081743 : } else {
1971 13081743 : fd_memcpy( out->payload, cur->txn->payload, cur->txn->payload_sz );
1972 13081743 : fd_memcpy( TXN(out), txn, fd_txn_footprint( txn->instr_cnt, txn->addr_table_lookup_cnt ) );
1973 13081743 : out->payload_sz = cur->txn->payload_sz;
1974 13081743 : out->pack_cu.requested_exec_plus_acct_data_cus = cur->txn->pack_cu.requested_exec_plus_acct_data_cus;
1975 13081743 : out->pack_cu.non_execution_cus = cur->txn->pack_cu.non_execution_cus;
1976 13081743 : out->scheduler_arrival_time_nanos = cur->txn->scheduler_arrival_time_nanos;
1977 13081743 : out->source_tpu = cur->txn->source_tpu;
1978 13081743 : out->source_ipv4 = cur->txn->source_ipv4;
1979 13081743 : out->flags = cur->txn->flags;
1980 13081743 : }
1981 13085967 : out++;
1982 :
1983 13085967 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
1984 27342105 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1985 14256138 : fd_acct_addr_t acct_addr = *ACCT_ITER_TO_PTR( iter );
1986 :
1987 14256138 : fd_pack_addr_use_t * in_wcost_table = acct_uses_query( writer_costs, acct_addr, NULL );
1988 14256138 : if( !in_wcost_table ) {
1989 793968 : in_wcost_table = acct_uses_insert( writer_costs, acct_addr );
1990 793968 : in_wcost_table->total_cost = 0UL;
1991 793968 : written_list[ written_list_cnt ] = in_wcost_table;
1992 793968 : written_list_cnt = fd_ulong_min( written_list_cnt+1UL, written_list_max-1UL );
1993 793968 : }
1994 14256138 : in_wcost_table->total_cost += cur->compute_est;
1995 :
1996 14256138 : fd_pack_addr_use_t * use = acct_uses_insert( acct_in_use, acct_addr );
1997 14256138 : use->in_use_by = bank_tile_mask | FD_PACK_IN_USE_WRITABLE;
1998 :
1999 14256138 : use_by_bank[use_by_bank_cnt++] = *use;
2000 :
2001 : /* If there aren't any more references to this account in the
2002 : heap, it can't cause any conflicts. That means we actually
2003 : don't need to record that we are using it, which is good
2004 : because we want to release the bit. */
2005 14256138 : release_result_t ret = release_bit_reference( pack, &acct_addr );
2006 14256138 : FD_PACK_BITSET_CLEARN( bitset_rw_in_use, ret.clear_rw_bit );
2007 14256138 : FD_PACK_BITSET_CLEARN( bitset_w_in_use, ret.clear_w_bit );
2008 14256138 : }
2009 13085967 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
2010 16657371 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2011 :
2012 3571404 : fd_acct_addr_t acct_addr = *ACCT_ITER_TO_PTR( iter );
2013 :
2014 3571404 : if( fd_pack_unwritable_contains( &acct_addr ) ) continue; /* No need to track sysvars because they can't be writable */
2015 :
2016 2576115 : fd_pack_addr_use_t * use = acct_uses_query( acct_in_use, acct_addr, NULL );
2017 2576115 : if( !use ) { use = acct_uses_insert( acct_in_use, acct_addr ); use->in_use_by = 0UL; }
2018 :
2019 2576115 : if( !(use->in_use_by & bank_tile_mask) ) use_by_bank[use_by_bank_cnt++] = *use;
2020 2576115 : use->in_use_by |= bank_tile_mask;
2021 2576115 : use->in_use_by &= ~FD_PACK_IN_USE_BIT_CLEARED;
2022 :
2023 :
2024 2576115 : release_result_t ret = release_bit_reference( pack, &acct_addr );
2025 2576115 : FD_PACK_BITSET_CLEARN( bitset_rw_in_use, ret.clear_rw_bit );
2026 2576115 : FD_PACK_BITSET_CLEARN( bitset_w_in_use, ret.clear_w_bit );
2027 2576115 : }
2028 :
2029 13085967 : txns_scheduled += 1UL; txn_limit -= 1UL;
2030 13085967 : cus_scheduled += cur->compute_est; cu_limit -= cur->compute_est;
2031 13085967 : bytes_scheduled += cur->txn->payload_sz; byte_limit -= cur->txn->payload_sz;
2032 :
2033 13085967 : *(use_by_bank_txn++) = use_by_bank_cnt;
2034 :
2035 13085967 : if( FD_UNLIKELY( cur->txn->flags & FD_TXN_P_FLAGS_DURABLE_NONCE ) ) noncemap_ele_remove_fast( pack->noncemap, cur, pack->pool );
2036 13085967 : sig2txn_ele_remove_fast( pack->signature_map, cur, pool );
2037 :
2038 13085967 : cur->root = FD_ORD_TXN_ROOT_FREE;
2039 13085967 : expq_remove( pack->expiration_q, cur->expq_idx );
2040 13085967 : treap_idx_remove( sched_from, _cur, pool );
2041 13085967 : trp_pool_idx_release( pool, _cur );
2042 13085967 : pack->pending_txn_cnt--;
2043 :
2044 13085967 : if( FD_UNLIKELY( (cu_limit<smallest_in_treap->cus) | (txn_limit==0UL) | (byte_limit<smallest_in_treap->bytes) ) ) break;
2045 13085967 : }
2046 :
2047 691545 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_TAKEN_IDX ] += txns_scheduled;
2048 691545 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_CU_LIMIT_IDX ] += cu_limit_c;
2049 691545 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_FAST_PATH_IDX ] += fast_path;
2050 691545 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_BYTE_LIMIT_IDX ] += byte_limit_c;
2051 691545 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_WRITE_COST_IDX ] += write_limit_c;
2052 691545 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_SLOW_PATH_IDX ] += slow_path;
2053 691545 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_DEFER_SKIP_IDX ] += skip_c;
2054 :
2055 : /* If we scanned the whole treap and didn't break early, we now have a
2056 : better estimate of the smallest. */
2057 691545 : if( FD_UNLIKELY( treap_rev_iter_done( prev ) ) ) {
2058 88386 : smallest_in_treap->cus = min_cus;
2059 88386 : smallest_in_treap->bytes = min_bytes;
2060 88386 : }
2061 :
2062 691545 : pack->use_by_bank_cnt[bank_tile] = use_by_bank_cnt;
2063 691545 : FD_PACK_BITSET_COPY( pack->bitset_rw_in_use, bitset_rw_in_use );
2064 691545 : FD_PACK_BITSET_COPY( pack->bitset_w_in_use, bitset_w_in_use );
2065 :
2066 691545 : pack->written_list_cnt = written_list_cnt;
2067 :
2068 691545 : sched_return_t to_return = { .cus_scheduled=cus_scheduled, .txns_scheduled=txns_scheduled, .bytes_scheduled=bytes_scheduled };
2069 691545 : return to_return;
2070 1505721 : }
2071 :
2072 : int
2073 : fd_pack_microblock_complete( fd_pack_t * pack,
2074 752913 : ulong bank_tile ) {
2075 : /* If the account is in use writably, and it's in use by this banking
2076 : tile, then this banking tile must be the sole writer to it, so it's
2077 : always okay to clear the writable bit. */
2078 752913 : ulong clear_mask = ~((1UL<<bank_tile) | FD_PACK_IN_USE_WRITABLE);
2079 :
2080 : /* If nothing outstanding, bail quickly */
2081 752913 : if( FD_UNLIKELY( !(pack->outstanding_microblock_mask & (1UL<<bank_tile)) ) ) return 0;
2082 :
2083 685575 : FD_PACK_BITSET_DECLARE( bitset_rw_in_use );
2084 685575 : FD_PACK_BITSET_DECLARE( bitset_w_in_use );
2085 685575 : FD_PACK_BITSET_COPY( bitset_rw_in_use, pack->bitset_rw_in_use );
2086 685575 : FD_PACK_BITSET_COPY( bitset_w_in_use, pack->bitset_w_in_use );
2087 :
2088 685575 : fd_pack_addr_use_t * base = pack->use_by_bank[bank_tile];
2089 :
2090 685575 : fd_pack_ord_txn_t * best = NULL;
2091 685575 : fd_pack_penalty_treap_t * best_penalty = NULL;
2092 685575 : ulong txn_cnt = 0UL;
2093 :
2094 16895910 : for( ulong i=0UL; i<pack->use_by_bank_cnt[bank_tile]; i++ ) {
2095 16210335 : fd_pack_addr_use_t * use = acct_uses_query( pack->acct_in_use, base[i].key, NULL );
2096 16210335 : FD_TEST( use );
2097 16210335 : use->in_use_by &= clear_mask;
2098 :
2099 : /* In order to properly bound the size of bitset_map, we need to
2100 : release the "reference" to the account when we schedule it.
2101 : However, that poses a bit of a problem here, because by the time
2102 : we complete the microblock, that account could have been assigned
2103 : a different bit in the bitset. The scheduling step tells us if
2104 : that is the case, and if so, we know that the bits in
2105 : bitset_w_in_use and bitset_rw_in_use were already cleared as
2106 : necessary.
2107 :
2108 : Note that it's possible for BIT_CLEARED to be set and then unset
2109 : by later uses, but then the account would be in use on other
2110 : banks, so we wouldn't try to observe the old value. For example:
2111 : Suppose bit 0->account A, bit 1->account B, and we have two
2112 : transactions that read A, B. We schedule a microblock to bank 0,
2113 : taking both transactions, which sets the counts for A, B to 0,
2114 : and releases the bits, clearing bits 0 and 1, and setting
2115 : BIT_CLEARED. Then we get two more transactions that read
2116 : accounts C, D, A, B, and they get assigned 0->C, 1->D, 2->A,
2117 : 3->B. We try to schedule a microblock to bank 1 that takes one
2118 : of those transactions. This unsets BIT_CLEARED for A, B.
2119 : Finally, the first microblock completes. Even though the bitset
2120 : map has the new bits for A and B which are "wrong" compared to
2121 : when the transaction was initially scheduled, those bits have
2122 : already been cleared and reset properly in the bitset as needed.
2123 : A and B will still be in use by bank 1, so we won't clear any
2124 : bits. If, on the other hand, the microblock scheduled to bank 1
2125 : completes first, bits 0 and 1 will be cleared for accounts C and
2126 : D, while bits 2 and 3 will remain set, which is correct. Then
2127 : when bank 0 completes, bits 2 and 3 will be cleared. */
2128 16210335 : if( FD_LIKELY( !use->in_use_by ) ) { /* if in_use_by==0, doesn't include BIT_CLEARED */
2129 3408801 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, base[i].key, NULL );
2130 3408801 : FD_TEST( q );
2131 3408801 : FD_PACK_BITSET_CLEARN( bitset_w_in_use, q->bit );
2132 3408801 : FD_PACK_BITSET_CLEARN( bitset_rw_in_use, q->bit );
2133 :
2134 : /* Because this account is no longer in use, it might be possible
2135 : to schedule a transaction that writes to it. Check its
2136 : penalty treap if it has one, and potentially move it to the
2137 : main treap. */
2138 3408801 : fd_pack_penalty_treap_t * p_trp = penalty_map_query( pack->penalty_treaps, base[i].key, NULL );
2139 3408801 : if( FD_UNLIKELY( p_trp ) ) {
2140 752813 : fd_pack_ord_txn_t * best_in_trp = treap_rev_iter_ele( treap_rev_iter_init( p_trp->penalty_treap, pack->pool ), pack->pool );
2141 752813 : if( FD_UNLIKELY( !best || COMPARE_WORSE( best, best_in_trp ) ) ) {
2142 301626 : best = best_in_trp;
2143 301626 : best_penalty = p_trp;
2144 301626 : }
2145 752813 : }
2146 3408801 : }
2147 :
2148 16210335 : if( FD_LIKELY( !(use->in_use_by & ~FD_PACK_IN_USE_BIT_CLEARED) ) ) acct_uses_remove( pack->acct_in_use, use );
2149 :
2150 16210335 : if( FD_UNLIKELY( i+1UL==pack->use_by_bank_txn[ bank_tile ][ txn_cnt ] ) ) {
2151 13082157 : txn_cnt++;
2152 13082157 : if( FD_LIKELY( best ) ) {
2153 : /* move best to the main treap */
2154 301626 : treap_ele_remove( best_penalty->penalty_treap, best, pack->pool );
2155 301626 : best->root = FD_ORD_TXN_ROOT_PENDING;
2156 301626 : treap_ele_insert( pack->pending, best, pack->pool );
2157 :
2158 301626 : pack->pending_smallest->cus = fd_ulong_min( pack->pending_smallest->cus, best->compute_est );
2159 301626 : pack->pending_smallest->bytes = fd_ulong_min( pack->pending_smallest->bytes, best->txn_e->txnp->payload_sz );
2160 :
2161 301626 : if( FD_UNLIKELY( !treap_ele_cnt( best_penalty->penalty_treap ) ) ) {
2162 2892 : treap_delete( treap_leave( best_penalty->penalty_treap ) );
2163 : /* Removal invalidates any pointers we got from
2164 : penalty_map_query, but we immediately set these to NULL, so
2165 : we're not keeping any pointers around. */
2166 2892 : penalty_map_remove( pack->penalty_treaps, best_penalty );
2167 2892 : }
2168 301626 : best = NULL;
2169 301626 : best_penalty = NULL;
2170 301626 : }
2171 13082157 : }
2172 16210335 : }
2173 :
2174 685575 : pack->use_by_bank_cnt[bank_tile] = 0UL;
2175 :
2176 685575 : FD_PACK_BITSET_COPY( pack->bitset_rw_in_use, bitset_rw_in_use );
2177 685575 : FD_PACK_BITSET_COPY( pack->bitset_w_in_use, bitset_w_in_use );
2178 :
2179 : /* outstanding_microblock_mask never has the writable bit set, so we
2180 : don't care about clearing it here either. */
2181 685575 : pack->outstanding_microblock_mask &= clear_mask;
2182 685575 : return 1;
2183 685575 : }
2184 :
2185 752688 : #define TRY_BUNDLE_NO_READY_BUNDLES 0
2186 6 : #define TRY_BUNDLE_HAS_CONFLICTS (-1)
2187 6 : #define TRY_BUNDLE_DOES_NOT_FIT (-2)
2188 6 : #define TRY_BUNDLE_SUCCESS(n) ( n) /* schedule bundle with n transactions */
2189 : static inline int
2190 : fd_pack_try_schedule_bundle( fd_pack_t * pack,
2191 : ulong bank_tile,
2192 752694 : fd_txn_p_t * out ) {
2193 752694 : int state = pack->initializer_bundle_state;
2194 752694 : if( FD_UNLIKELY( (state==FD_PACK_IB_STATE_PENDING) | (state==FD_PACK_IB_STATE_FAILED ) ) ) return TRY_BUNDLE_NO_READY_BUNDLES;
2195 :
2196 752694 : fd_pack_ord_txn_t * pool = pack->pool;
2197 752694 : treap_t * bundles = pack->pending_bundles;
2198 :
2199 752694 : int require_ib;
2200 752694 : if( FD_UNLIKELY( state==FD_PACK_IB_STATE_NOT_INITIALIZED ) ) { require_ib = 1; }
2201 752694 : if( FD_LIKELY ( state==FD_PACK_IB_STATE_READY ) ) { require_ib = 0; }
2202 :
2203 752694 : treap_rev_iter_t _cur = treap_rev_iter_init( bundles, pool );
2204 752694 : ulong bundle_idx = ULONG_MAX;
2205 :
2206 : /* Skip any that we've marked as won't fit in this block */
2207 752694 : while( FD_UNLIKELY( !treap_rev_iter_done( _cur ) && treap_rev_iter_ele( _cur, pool )->skip==pack->compressed_slot_number ) ) {
2208 0 : _cur = treap_rev_iter_next( _cur, pool );
2209 0 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_DEFER_SKIP_IDX ]++;
2210 0 : }
2211 :
2212 752694 : if( FD_UNLIKELY( treap_rev_iter_done( _cur ) ) ) return TRY_BUNDLE_NO_READY_BUNDLES;
2213 :
2214 6 : treap_rev_iter_t _txn0 = _cur;
2215 6 : fd_pack_ord_txn_t * txn0 = treap_rev_iter_ele( _txn0, pool );
2216 6 : int is_ib = !!(txn0->txn->flags & FD_TXN_P_FLAGS_INITIALIZER_BUNDLE);
2217 6 : bundle_idx = RC_TO_REL_BUNDLE_IDX( txn0->rewards, txn0->compute_est );
2218 :
2219 6 : if( FD_UNLIKELY( require_ib & !is_ib ) ) return TRY_BUNDLE_NO_READY_BUNDLES;
2220 :
2221 : /* At this point, we have our candidate bundle, so we'll schedule it
2222 : if we can. If we can't, we won't schedule anything. */
2223 :
2224 :
2225 6 : fd_pack_addr_use_t * bundle_temp_inserted[ FD_PACK_MAX_TXN_PER_BUNDLE * FD_TXN_ACCT_ADDR_MAX ];
2226 6 : ulong bundle_temp_inserted_cnt = 0UL;
2227 :
2228 6 : ulong bank_tile_mask = 1UL << bank_tile;
2229 :
2230 6 : int doesnt_fit = 0;
2231 6 : int has_conflict = 0;
2232 6 : ulong txn_cnt = 0UL;
2233 :
2234 6 : ulong cu_limit = pack->lim->max_cost_per_block - pack->cumulative_block_cost;
2235 6 : ulong byte_limit = pack->lim->max_data_bytes_per_block - pack->data_bytes_consumed;
2236 6 : ulong microblock_limit = pack->lim->max_microblocks_per_block - pack->microblock_cnt;
2237 :
2238 6 : FD_PACK_BITSET_DECLARE( bitset_rw_in_use );
2239 6 : FD_PACK_BITSET_DECLARE( bitset_w_in_use );
2240 6 : FD_PACK_BITSET_COPY( bitset_rw_in_use, pack->bitset_rw_in_use );
2241 6 : FD_PACK_BITSET_COPY( bitset_w_in_use, pack->bitset_w_in_use );
2242 :
2243 : /* last_use_in_txn_cnt[i+1] Keeps track of the number of accounts that
2244 : have their last reference in transaction i of the bundle. This
2245 : esoteric value is important for computing use_by_bank_txn.
2246 : last_use_in_txn_cnt[0] is garbage. */
2247 6 : ulong last_use_in_txn_cnt[ 1UL+FD_PACK_MAX_TXN_PER_BUNDLE ] = { 0UL };
2248 :
2249 6 : fd_pack_addr_use_t null_use[1] = {{{{ 0 }}, { 0 }}};
2250 :
2251 24 : while( !(doesnt_fit | has_conflict) & !treap_rev_iter_done( _cur ) ) {
2252 18 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
2253 18 : ulong this_bundle_idx = RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est );
2254 18 : if( FD_UNLIKELY( this_bundle_idx!=bundle_idx ) ) break;
2255 :
2256 18 : if( FD_UNLIKELY( cur->compute_est>cu_limit ) ) {
2257 0 : doesnt_fit = 1;
2258 0 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_CU_LIMIT_IDX ]++;
2259 0 : break;
2260 0 : }
2261 18 : cu_limit -= cur->compute_est;
2262 :
2263 : /* Each transaction in a bundle turns into a microblock */
2264 18 : if( FD_UNLIKELY( microblock_limit==0UL ) ) {
2265 0 : doesnt_fit = 1;
2266 0 : FD_MCNT_INC( PACK, MICROBLOCK_PER_BLOCK_LIMIT, 1UL );
2267 0 : break;
2268 0 : }
2269 18 : microblock_limit--;
2270 :
2271 18 : if( FD_UNLIKELY( cur->txn->payload_sz+MICROBLOCK_DATA_OVERHEAD>byte_limit ) ) {
2272 0 : doesnt_fit = 1;
2273 0 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_BYTE_LIMIT_IDX ]++;
2274 0 : break;
2275 0 : }
2276 18 : byte_limit -= cur->txn->payload_sz + MICROBLOCK_DATA_OVERHEAD;
2277 :
2278 18 : if( FD_UNLIKELY( !FD_PACK_BITSET_INTERSECT4_EMPTY( pack->bitset_rw_in_use, pack->bitset_w_in_use, cur->w_bitset, cur->rw_bitset ) ) ) {
2279 0 : has_conflict = 1;
2280 0 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_FAST_PATH_IDX ]++;
2281 0 : break;
2282 0 : }
2283 :
2284 : /* Don't update the actual in-use bitset, because the transactions
2285 : in the bundle are allowed to conflict with each other. */
2286 18 : FD_PACK_BITSET_OR( bitset_rw_in_use, cur->rw_bitset );
2287 18 : FD_PACK_BITSET_OR( bitset_w_in_use, cur->w_bitset );
2288 :
2289 :
2290 18 : fd_txn_t const * txn = TXN(cur->txn);
2291 18 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, cur->txn->payload );
2292 18 : fd_acct_addr_t const * alt_adj = cur->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
2293 :
2294 : /* Check conflicts between this transaction's writable accounts and
2295 : current readers */
2296 18 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
2297 108 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2298 :
2299 90 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
2300 :
2301 90 : fd_pack_addr_use_t * in_bundle_temp = acct_uses_query( pack->bundle_temp_map, acct, null_use );
2302 90 : ulong current_cost = acct_uses_query( pack->writer_costs, acct, null_use )->total_cost;
2303 90 : ulong carried_cost = (ulong)in_bundle_temp->carried_cost;
2304 90 : if( FD_UNLIKELY( current_cost + carried_cost + cur->compute_est > pack->lim->max_write_cost_per_acct ) ) {
2305 0 : doesnt_fit = 1;
2306 0 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_WRITE_COST_IDX ]++;
2307 0 : break;
2308 0 : }
2309 :
2310 90 : if( FD_LIKELY( in_bundle_temp==null_use ) ) { /* Not in temp bundle table yet */
2311 30 : in_bundle_temp = acct_uses_insert( pack->bundle_temp_map, acct );
2312 30 : in_bundle_temp->_ = 0UL;
2313 30 : bundle_temp_inserted[ bundle_temp_inserted_cnt++ ] = in_bundle_temp;
2314 30 : }
2315 90 : in_bundle_temp->carried_cost += (uint)cur->compute_est; /* < 2^21, but >0 */
2316 90 : in_bundle_temp->ref_cnt++;
2317 90 : last_use_in_txn_cnt[ in_bundle_temp->last_use_in ]--;
2318 90 : in_bundle_temp->last_use_in = (ushort)(txn_cnt+1UL);
2319 90 : last_use_in_txn_cnt[ in_bundle_temp->last_use_in ]++;
2320 :
2321 90 : if( FD_UNLIKELY( acct_uses_query( pack->acct_in_use, acct, null_use )->in_use_by ) ) {
2322 0 : has_conflict = 1;
2323 0 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_SLOW_PATH_IDX ]++;
2324 0 : break;
2325 0 : }
2326 90 : }
2327 18 : if( has_conflict | doesnt_fit ) break;
2328 :
2329 : /* Check conflicts between this transaction's readonly accounts and
2330 : current writers */
2331 18 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
2332 126 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2333 :
2334 108 : fd_acct_addr_t const * acct = ACCT_ITER_TO_PTR( iter );
2335 108 : if( fd_pack_unwritable_contains( acct ) ) continue; /* No need to track sysvars because they can't be writable */
2336 :
2337 54 : fd_pack_addr_use_t * in_bundle_temp = acct_uses_query( pack->bundle_temp_map, *acct, null_use );
2338 54 : if( FD_LIKELY( in_bundle_temp==null_use ) ) { /* Not in temp bundle table yet */
2339 18 : in_bundle_temp = acct_uses_insert( pack->bundle_temp_map, *acct );
2340 18 : in_bundle_temp->_ = 0UL;
2341 18 : bundle_temp_inserted[ bundle_temp_inserted_cnt++ ] = in_bundle_temp;
2342 18 : }
2343 54 : in_bundle_temp->ref_cnt++;
2344 54 : last_use_in_txn_cnt[ in_bundle_temp->last_use_in ]--;
2345 54 : in_bundle_temp->last_use_in = (ushort)(txn_cnt+1UL);
2346 54 : last_use_in_txn_cnt[ in_bundle_temp->last_use_in ]++;
2347 :
2348 54 : if( FD_UNLIKELY( acct_uses_query( pack->acct_in_use, *acct, null_use )->in_use_by & FD_PACK_IN_USE_WRITABLE ) ) {
2349 0 : has_conflict = 1;
2350 0 : pack->sched_results[ FD_METRICS_ENUM_PACK_TXN_SCHEDULE_V_SLOW_PATH_IDX ]++;
2351 0 : break;
2352 0 : }
2353 54 : }
2354 :
2355 18 : if( has_conflict | doesnt_fit ) break;
2356 :
2357 18 : txn_cnt++;
2358 18 : _cur = treap_rev_iter_next( _cur, pool );
2359 18 : }
2360 6 : int retval = fd_int_if( doesnt_fit, TRY_BUNDLE_DOES_NOT_FIT,
2361 6 : fd_int_if( has_conflict, TRY_BUNDLE_HAS_CONFLICTS, TRY_BUNDLE_SUCCESS( (int)txn_cnt ) ) );
2362 :
2363 6 : if( FD_UNLIKELY( retval<=0 ) ) {
2364 0 : for( ulong i=0UL; i<bundle_temp_inserted_cnt; i++ ) {
2365 0 : acct_uses_remove( pack->bundle_temp_map, bundle_temp_inserted[ bundle_temp_inserted_cnt-i-1UL ] );
2366 0 : }
2367 0 : FD_TEST( acct_uses_key_cnt( pack->bundle_temp_map )==0UL );
2368 :
2369 0 : if( FD_UNLIKELY( retval==TRY_BUNDLE_DOES_NOT_FIT ) ) {
2370 : /* Decrement the skip count for the bundle we just tried. */
2371 :
2372 0 : for( _cur=_txn0; !treap_rev_iter_done( _cur ); _cur=treap_rev_iter_next( _cur, pool ) ) {
2373 0 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
2374 0 : ulong this_bundle_idx = RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est );
2375 0 : if( FD_UNLIKELY( this_bundle_idx!=bundle_idx ) ) break;
2376 :
2377 : /* See fd_pack_schedule_impl for this line */
2378 0 : cur->skip = (ushort)(1+fd_ushort_min( (ushort)(pack->compressed_slot_number-1),
2379 0 : (ushort)(fd_ushort_min( cur->skip, FD_PACK_SKIP_CNT )-2) ) );
2380 0 : }
2381 0 : }
2382 0 : return retval;
2383 0 : }
2384 :
2385 : /* This bundle passed validation, so now we'll take it! */
2386 6 : pack->outstanding_microblock_mask |= bank_tile_mask;
2387 :
2388 6 : treap_rev_iter_t _end = _cur;
2389 6 : treap_rev_iter_t _next;
2390 :
2391 : /* We'll carefully incrementally construct use_by_bank and
2392 : use_by_bank_txn based on the contents of bundle_temp and
2393 : last_use_in_txn_cnt. */
2394 6 : fd_pack_addr_use_t * use_by_bank = pack->use_by_bank [bank_tile];
2395 6 : ulong * use_by_bank_txn = pack->use_by_bank_txn[bank_tile];
2396 6 : ulong cum_sum = 0UL;
2397 24 : for( ulong k=0UL; k<txn_cnt; k++ ) { use_by_bank_txn[k] = cum_sum; cum_sum += last_use_in_txn_cnt[ k+1UL ]; }
2398 6 : pack->use_by_bank_cnt[bank_tile] = cum_sum;
2399 :
2400 :
2401 24 : for( _cur=_txn0; _cur!=_end; _cur=_next ) {
2402 18 : _next = treap_rev_iter_next( _cur, pool );
2403 :
2404 18 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
2405 18 : fd_txn_t const * txn = TXN(cur->txn);
2406 18 : fd_memcpy( out->payload, cur->txn->payload, cur->txn->payload_sz );
2407 18 : fd_memcpy( TXN(out), txn, fd_txn_footprint( txn->instr_cnt, txn->addr_table_lookup_cnt ) );
2408 18 : out->payload_sz = cur->txn->payload_sz;
2409 18 : out->pack_cu.requested_exec_plus_acct_data_cus = cur->txn->pack_cu.requested_exec_plus_acct_data_cus;
2410 18 : out->pack_cu.non_execution_cus = cur->txn->pack_cu.non_execution_cus;
2411 18 : out->scheduler_arrival_time_nanos = cur->txn->scheduler_arrival_time_nanos;
2412 18 : out->source_tpu = cur->txn->source_tpu;
2413 18 : out->source_ipv4 = cur->txn->source_ipv4;
2414 18 : out->flags = cur->txn->flags;
2415 18 : out++;
2416 :
2417 18 : pack->cumulative_block_cost += cur->compute_est;
2418 18 : pack->data_bytes_consumed += cur->txn->payload_sz + MICROBLOCK_DATA_OVERHEAD;
2419 18 : pack->microblock_cnt += 1UL;
2420 :
2421 18 : if( FD_UNLIKELY( cur->txn->flags & FD_TXN_P_FLAGS_DURABLE_NONCE ) ) noncemap_ele_remove_fast( pack->noncemap, cur, pack->pool );
2422 18 : sig2txn_ele_remove_fast( pack->signature_map, cur, pack->pool );
2423 :
2424 18 : cur->root = FD_ORD_TXN_ROOT_FREE;
2425 18 : expq_remove( pack->expiration_q, cur->expq_idx );
2426 18 : treap_idx_remove( pack->pending_bundles, _cur, pack->pool );
2427 18 : trp_pool_idx_release( pack->pool, _cur );
2428 18 : pack->pending_txn_cnt--;
2429 18 : }
2430 :
2431 :
2432 54 : for( ulong i=0UL; i<bundle_temp_inserted_cnt; i++ ) {
2433 : /* In order to clear bundle_temp_map with the typical trick, we need
2434 : to iterate through bundle_temp_inserted backwards. */
2435 48 : fd_pack_addr_use_t * addr_use = bundle_temp_inserted[ bundle_temp_inserted_cnt-i-1UL ];
2436 :
2437 48 : int any_writers = addr_use->carried_cost>0U; /* Did any transaction in this bundle write lock this account address? */
2438 :
2439 48 : if( FD_LIKELY( any_writers ) ) { /* UNLIKELY? */
2440 30 : fd_pack_addr_use_t * in_wcost_table = acct_uses_query( pack->writer_costs, addr_use->key, NULL );
2441 30 : if( !in_wcost_table ) {
2442 15 : in_wcost_table = acct_uses_insert( pack->writer_costs, addr_use->key );
2443 15 : in_wcost_table->total_cost = 0UL;
2444 15 : pack->written_list[ pack->written_list_cnt ] = in_wcost_table;
2445 15 : pack->written_list_cnt = fd_ulong_min( pack->written_list_cnt+1UL, pack->written_list_max-1UL );
2446 15 : }
2447 30 : in_wcost_table->total_cost += (ulong)addr_use->carried_cost;
2448 30 : }
2449 :
2450 : /* in_use_by must be set before releasing the bit reference */
2451 48 : fd_pack_addr_use_t * use = acct_uses_query( pack->acct_in_use, addr_use->key, NULL );
2452 48 : if( !use ) { use = acct_uses_insert( pack->acct_in_use, addr_use->key ); use->in_use_by = 0UL; }
2453 48 : use->in_use_by |= bank_tile_mask | fd_ulong_if( any_writers, FD_PACK_IN_USE_WRITABLE, 0UL );
2454 48 : use->in_use_by &= ~FD_PACK_IN_USE_BIT_CLEARED;
2455 :
2456 48 : use_by_bank[ use_by_bank_txn[ addr_use->last_use_in-1UL ]++ ] = *use;
2457 :
2458 192 : for( ulong k=0UL; k<(ulong)addr_use->ref_cnt; k++ ) {
2459 144 : release_result_t ret = release_bit_reference( pack, &(addr_use->key) );
2460 144 : FD_PACK_BITSET_CLEARN( bitset_rw_in_use, ret.clear_rw_bit );
2461 144 : FD_PACK_BITSET_CLEARN( bitset_w_in_use, ret.clear_w_bit );
2462 144 : }
2463 :
2464 48 : acct_uses_remove( pack->bundle_temp_map, addr_use );
2465 48 : }
2466 :
2467 6 : FD_PACK_BITSET_COPY( pack->bitset_rw_in_use, bitset_rw_in_use );
2468 6 : FD_PACK_BITSET_COPY( pack->bitset_w_in_use, bitset_w_in_use );
2469 :
2470 6 : if( FD_UNLIKELY( is_ib ) ) {
2471 0 : pack->initializer_bundle_state = FD_PACK_IB_STATE_PENDING;
2472 0 : }
2473 6 : return retval;
2474 6 : }
2475 :
2476 :
2477 : ulong
2478 : fd_pack_schedule_next_microblock( fd_pack_t * pack,
2479 : ulong total_cus,
2480 : float vote_fraction,
2481 : ulong bank_tile,
2482 : int schedule_flags,
2483 752913 : fd_txn_p_t * out ) {
2484 :
2485 : /* TODO: Decide if these are exactly how we want to handle limits */
2486 752913 : total_cus = fd_ulong_min( total_cus, pack->lim->max_cost_per_block - pack->cumulative_block_cost );
2487 752913 : ulong vote_cus = fd_ulong_min( (ulong)((float)total_cus * vote_fraction),
2488 752913 : pack->lim->max_vote_cost_per_block - pack->cumulative_vote_cost );
2489 752913 : ulong vote_reserved_txns = fd_ulong_min( vote_cus/FD_PACK_SIMPLE_VOTE_COST,
2490 752913 : (ulong)((float)pack->lim->max_txn_per_microblock * vote_fraction) );
2491 :
2492 :
2493 752913 : if( FD_UNLIKELY( (pack->microblock_cnt>=pack->lim->max_microblocks_per_block) ) ) {
2494 0 : FD_MCNT_INC( PACK, MICROBLOCK_PER_BLOCK_LIMIT, 1UL );
2495 0 : return 0UL;
2496 0 : }
2497 752913 : if( FD_UNLIKELY( pack->data_bytes_consumed+MICROBLOCK_DATA_OVERHEAD+FD_TXN_MIN_SERIALIZED_SZ>pack->lim->max_data_bytes_per_block) ) {
2498 0 : FD_MCNT_INC( PACK, DATA_PER_BLOCK_LIMIT, 1UL );
2499 0 : return 0UL;
2500 0 : }
2501 :
2502 752913 : ulong * use_by_bank_txn = pack->use_by_bank_txn[ bank_tile ];
2503 :
2504 752913 : ulong cu_limit = total_cus - vote_cus;
2505 752913 : ulong txn_limit = pack->lim->max_txn_per_microblock - vote_reserved_txns;
2506 752913 : ulong scheduled = 0UL;
2507 752913 : ulong byte_limit = pack->lim->max_data_bytes_per_block - pack->data_bytes_consumed - MICROBLOCK_DATA_OVERHEAD;
2508 :
2509 752913 : sched_return_t status = {0}, status1 = {0};
2510 :
2511 752913 : if( FD_LIKELY( schedule_flags & FD_PACK_SCHEDULE_VOTE ) ) {
2512 : /* Schedule vote transactions */
2513 752814 : status1= fd_pack_schedule_impl( pack, pack->pending_votes, vote_cus, vote_reserved_txns, byte_limit, bank_tile, pack->pending_votes_smallest, use_by_bank_txn, out+scheduled );
2514 :
2515 752814 : scheduled += status1.txns_scheduled;
2516 752814 : pack->cumulative_vote_cost += status1.cus_scheduled;
2517 752814 : pack->cumulative_block_cost += status1.cus_scheduled;
2518 752814 : pack->data_bytes_consumed += status1.bytes_scheduled;
2519 752814 : byte_limit -= status1.bytes_scheduled;
2520 752814 : use_by_bank_txn += status1.txns_scheduled;
2521 : /* Add any remaining CUs/txns to the non-vote limits */
2522 752814 : txn_limit += vote_reserved_txns - status1.txns_scheduled;
2523 752814 : cu_limit += vote_cus - status1.cus_scheduled;
2524 752814 : }
2525 :
2526 : /* Bundle can't mix with votes, so only try to schedule a bundle if we
2527 : didn't get any votes. */
2528 752913 : if( FD_UNLIKELY( !!(schedule_flags & FD_PACK_SCHEDULE_BUNDLE) & (status1.txns_scheduled==0UL) ) ) {
2529 752694 : int bundle_result = fd_pack_try_schedule_bundle( pack, bank_tile, out );
2530 752694 : if( FD_UNLIKELY( bundle_result>0 ) ) return (ulong)bundle_result;
2531 752688 : if( FD_UNLIKELY( bundle_result==TRY_BUNDLE_HAS_CONFLICTS ) ) return 0UL;
2532 : /* in the NO_READY_BUNDLES or DOES_NOT_FIT case, we schedule like
2533 : normal. */
2534 : /* We have the early returns here because try_schedule_bundle does
2535 : the bookeeping internally, since the calculations are a bit
2536 : different in that case. */
2537 752688 : }
2538 :
2539 :
2540 : /* Fill any remaining space with non-vote transactions */
2541 752907 : if( FD_LIKELY( schedule_flags & FD_PACK_SCHEDULE_TXN ) ) {
2542 752907 : status = fd_pack_schedule_impl( pack, pack->pending, cu_limit, txn_limit, byte_limit, bank_tile, pack->pending_smallest, use_by_bank_txn, out+scheduled );
2543 :
2544 752907 : scheduled += status.txns_scheduled;
2545 752907 : pack->cumulative_block_cost += status.cus_scheduled;
2546 752907 : pack->data_bytes_consumed += status.bytes_scheduled;
2547 752907 : }
2548 :
2549 752907 : ulong nonempty = (ulong)(scheduled>0UL);
2550 752907 : pack->microblock_cnt += nonempty;
2551 752907 : pack->outstanding_microblock_mask |= nonempty << bank_tile;
2552 752907 : pack->data_bytes_consumed += nonempty * MICROBLOCK_DATA_OVERHEAD;
2553 :
2554 : /* Update metrics counters */
2555 752907 : fd_pack_metrics_write( pack );
2556 752907 : FD_MGAUGE_SET( PACK, CUS_CONSUMED_IN_BLOCK, pack->cumulative_block_cost );
2557 :
2558 752907 : fd_histf_sample( pack->txn_per_microblock, scheduled );
2559 752907 : fd_histf_sample( pack->vote_per_microblock, status1.txns_scheduled );
2560 :
2561 250969 : #if FD_HAS_AVX512 && FD_PACK_USE_NON_TEMPORAL_MEMCPY
2562 250969 : _mm_sfence();
2563 250969 : #endif
2564 :
2565 752907 : return scheduled;
2566 752913 : }
2567 :
2568 274530 : ulong fd_pack_bank_tile_cnt ( fd_pack_t const * pack ) { return pack->bank_tile_cnt; }
2569 0 : ulong fd_pack_current_block_cost( fd_pack_t const * pack ) { return pack->cumulative_block_cost; }
2570 :
2571 :
2572 : void
2573 0 : fd_pack_set_block_limits( fd_pack_t * pack, fd_pack_limits_t const * limits ) {
2574 0 : FD_TEST( limits->max_cost_per_block >= FD_PACK_MAX_COST_PER_BLOCK_LOWER_BOUND );
2575 0 : FD_TEST( limits->max_vote_cost_per_block >= FD_PACK_MAX_VOTE_COST_PER_BLOCK_LOWER_BOUND );
2576 0 : FD_TEST( limits->max_write_cost_per_acct >= FD_PACK_MAX_WRITE_COST_PER_ACCT_LOWER_BOUND );
2577 :
2578 0 : pack->lim->max_microblocks_per_block = limits->max_microblocks_per_block;
2579 0 : pack->lim->max_data_bytes_per_block = limits->max_data_bytes_per_block;
2580 0 : pack->lim->max_cost_per_block = limits->max_cost_per_block;
2581 0 : pack->lim->max_vote_cost_per_block = limits->max_vote_cost_per_block;
2582 0 : pack->lim->max_write_cost_per_acct = limits->max_write_cost_per_acct;
2583 0 : }
2584 :
2585 : void
2586 0 : fd_pack_get_block_limits( fd_pack_t * pack, fd_pack_limits_usage_t * opt_limits_usage, fd_pack_limits_t * opt_limits ) {
2587 0 : if( FD_LIKELY( opt_limits_usage ) ) {
2588 0 : opt_limits_usage->block_cost = pack->cumulative_block_cost;
2589 0 : opt_limits_usage->vote_cost = pack->cumulative_vote_cost;
2590 0 : opt_limits_usage->block_data_bytes = pack->data_bytes_consumed;
2591 0 : opt_limits_usage->microblocks = pack->microblock_cnt;
2592 0 : }
2593 0 : if( FD_LIKELY( opt_limits ) ) fd_memcpy( opt_limits, pack->lim, sizeof(fd_pack_limits_t) );
2594 0 : }
2595 :
2596 : void
2597 0 : fd_pack_get_top_writers( fd_pack_t const * pack, fd_pack_addr_use_t top_writers[static FD_PACK_TOP_WRITERS_CNT] ) {
2598 0 : fd_memcpy( top_writers, pack->top_writers, sizeof(pack->top_writers) );
2599 0 : }
2600 :
2601 : void
2602 0 : fd_pack_get_pending_smallest( fd_pack_t * pack, fd_pack_smallest_t * opt_pending_smallest, fd_pack_smallest_t * opt_votes_smallest ) {
2603 0 : if( FD_LIKELY( opt_pending_smallest ) ) fd_memcpy( opt_pending_smallest, pack->pending_smallest, sizeof(fd_pack_smallest_t) );
2604 0 : if( FD_LIKELY( opt_votes_smallest ) ) fd_memcpy( opt_votes_smallest, pack->pending_votes_smallest, sizeof(fd_pack_smallest_t) );
2605 0 : }
2606 :
2607 : void
2608 : fd_pack_rebate_cus( fd_pack_t * pack,
2609 6 : fd_pack_rebate_t const * rebate ) {
2610 6 : if( FD_UNLIKELY( (rebate->ib_result!=0) & (pack->initializer_bundle_state==FD_PACK_IB_STATE_PENDING ) ) ) {
2611 0 : pack->initializer_bundle_state = fd_int_if( rebate->ib_result==1, FD_PACK_IB_STATE_READY, FD_PACK_IB_STATE_FAILED );
2612 0 : }
2613 :
2614 6 : pack->cumulative_block_cost -= rebate->total_cost_rebate;
2615 6 : pack->cumulative_vote_cost -= rebate->vote_cost_rebate;
2616 6 : pack->data_bytes_consumed -= rebate->data_bytes_rebate;
2617 6 : pack->cumulative_rebated_cus += rebate->total_cost_rebate;
2618 : /* For now, we want to ignore the microblock count rebate. There are
2619 : 3 places the microblock count is kept (here, in the pack tile, and
2620 : in the PoH tile), and they all need to count microblocks that end
2621 : up being empty in the same way. It would be better from a
2622 : DoS-resistance perspective for them all not to count empty
2623 : microblocks towards the total, but there's a race condition:
2624 : suppose pack schedules a microblock containing one transaction that
2625 : doesn't land on chain, the slot ends, and then pack informs PoH of
2626 : the number of microblocks before the final rebate comes through.
2627 : This isn't unsolvable, but it's pretty gross, so it's probably
2628 : better to just not apply the rebate for now. */
2629 6 : (void)rebate->microblock_cnt_rebate;
2630 :
2631 6 : fd_pack_addr_use_t * writer_costs = pack->writer_costs;
2632 18 : for( ulong i=0UL; i<rebate->writer_cnt; i++ ) {
2633 12 : fd_pack_addr_use_t * in_wcost_table = acct_uses_query( writer_costs, rebate->writer_rebates[i].key, NULL );
2634 12 : if( FD_UNLIKELY( !in_wcost_table ) ) FD_LOG_ERR(( "Rebate to unknown written account" ));
2635 12 : in_wcost_table->total_cost -= rebate->writer_rebates[i].rebate_cus;
2636 : /* Important: Even if this is 0, don't delete it from the table so
2637 : that the insert order doesn't get messed up. */
2638 12 : }
2639 6 : }
2640 :
2641 :
2642 : ulong
2643 : fd_pack_expire_before( fd_pack_t * pack,
2644 15 : ulong expire_before ) {
2645 15 : expire_before = fd_ulong_max( expire_before, pack->expire_before );
2646 15 : ulong deleted_cnt = 0UL;
2647 15 : fd_pack_expq_t * prq = pack->expiration_q;
2648 327 : while( (expq_cnt( prq )>0UL) & (prq->expires_at<expire_before) ) {
2649 312 : fd_pack_ord_txn_t * expired = prq->txn;
2650 :
2651 : /* fd_pack_delete_transaction also removes it from the heap */
2652 : /* All the transactions in the same bundle have the same expiration
2653 : time, so this loop will end up deleting them all, even with
2654 : delete_full_bundle set to 0. */
2655 312 : ulong _delete_cnt = delete_transaction( pack, expired, 0, 1 );
2656 312 : deleted_cnt += _delete_cnt;
2657 312 : FD_TEST( _delete_cnt );
2658 312 : }
2659 :
2660 15 : pack->expire_before = expire_before;
2661 15 : return deleted_cnt;
2662 15 : }
2663 :
2664 : void
2665 2646 : fd_pack_end_block( fd_pack_t * pack ) {
2666 : /* rounded division */
2667 2646 : ulong pct_cus_per_block = (pack->cumulative_block_cost*100UL + (pack->lim->max_cost_per_block>>1))/pack->lim->max_cost_per_block;
2668 2646 : fd_histf_sample( pack->pct_cus_per_block, pct_cus_per_block );
2669 2646 : fd_histf_sample( pack->net_cus_per_block, pack->cumulative_block_cost );
2670 2646 : fd_histf_sample( pack->rebated_cus_per_block, pack->cumulative_rebated_cus );
2671 2646 : fd_histf_sample( pack->scheduled_cus_per_block, pack->cumulative_rebated_cus + pack->cumulative_block_cost );
2672 :
2673 2646 : pack->microblock_cnt = 0UL;
2674 2646 : pack->data_bytes_consumed = 0UL;
2675 2646 : pack->cumulative_block_cost = 0UL;
2676 2646 : pack->cumulative_vote_cost = 0UL;
2677 2646 : pack->cumulative_rebated_cus = 0UL;
2678 2646 : pack->outstanding_microblock_mask = 0UL;
2679 :
2680 2646 : pack->initializer_bundle_state = FD_PACK_IB_STATE_NOT_INITIALIZED;
2681 :
2682 2646 : acct_uses_clear( pack->acct_in_use );
2683 2646 : memset( pack->top_writers, 0, sizeof(pack->top_writers) );
2684 :
2685 2646 : if( FD_LIKELY( pack->written_list_cnt<pack->written_list_max-1UL ) ) {
2686 : /* The less dangerous way of doing this is to instead record the
2687 : keys we inserted and do a query followed by a delete for each
2688 : key. The downside of that is that keys are 32 bytes and a
2689 : pointer is only 8 bytes, plus the computational cost for the
2690 : query.
2691 :
2692 : However, if we're careful, we can pull this off. We require two
2693 : things. First, we started from an empty map and did nothing but
2694 : insert and update. In particular, no deletions. Second, we have
2695 : to be careful to delete in the opposite order that we inserted.
2696 : This is essentially like unwinding the inserts we did. The
2697 : common case is that the element after the one we delete will be
2698 : empty, so we'll hit that case. It's possible that there's
2699 : another independent probe sequence that will be entirely intact
2700 : starting in the element after, but we'll never hit the MAP_MOVE
2701 : case. */
2702 779283 : for( ulong i=0UL; i<pack->written_list_cnt; i++ ) {
2703 776637 : fd_pack_addr_use_t * writer = pack->written_list[ pack->written_list_cnt - 1UL - i ];
2704 : /* build a small max heap with the top writer costs */
2705 776637 : if( FD_UNLIKELY( !fd_pack_unwritable_contains( &writer->key ) && !FD_PACK_TOP_WRITERS_SORT_BEFORE( pack->top_writers[ FD_PACK_TOP_WRITERS_CNT-1UL ], (*writer) ) ) ) {
2706 776607 : pack->top_writers[ FD_PACK_TOP_WRITERS_CNT-1UL ] = *writer;
2707 776607 : fd_pack_writer_cost_sort_insert( pack->top_writers, FD_PACK_TOP_WRITERS_CNT );
2708 776607 : }
2709 :
2710 : /* Clearing the cost field here is unnecessary (since it gets
2711 : cleared on insert), but makes debugging a bit easier. */
2712 776637 : writer->total_cost = 0UL;
2713 776637 : acct_uses_remove( pack->writer_costs, writer );
2714 776637 : }
2715 2646 : } else {
2716 0 : acct_uses_clear( pack->writer_costs );
2717 0 : }
2718 2646 : pack->written_list_cnt = 0UL;
2719 :
2720 : /* compressed_slot_number is > FD_PACK_SKIP_CNT, which means +1 is the
2721 : max unless it overflows. */
2722 2646 : pack->compressed_slot_number = fd_ushort_max( (ushort)(pack->compressed_slot_number+1), (ushort)(FD_PACK_SKIP_CNT+1) );
2723 :
2724 2646 : FD_PACK_BITSET_CLEAR( pack->bitset_rw_in_use );
2725 2646 : FD_PACK_BITSET_CLEAR( pack->bitset_w_in_use );
2726 :
2727 9234 : for( ulong i=0UL; i<pack->bank_tile_cnt; i++ ) pack->use_by_bank_cnt[i] = 0UL;
2728 :
2729 : /* If our stake is low and we don't become leader often, end_block
2730 : might get called on the order of O(1/hr), which feels too
2731 : infrequent to do anything related to metrics. However, we only
2732 : update the histograms when we are leader, so this is actually a
2733 : good place to copy them. */
2734 2646 : FD_MHIST_COPY( PACK, TOTAL_TRANSACTIONS_PER_MICROBLOCK_COUNT, pack->txn_per_microblock );
2735 2646 : FD_MHIST_COPY( PACK, VOTES_PER_MICROBLOCK_COUNT, pack->vote_per_microblock );
2736 :
2737 2646 : FD_MGAUGE_SET( PACK, CUS_CONSUMED_IN_BLOCK, 0UL );
2738 2646 : FD_MHIST_COPY( PACK, CUS_SCHEDULED, pack->scheduled_cus_per_block );
2739 2646 : FD_MHIST_COPY( PACK, CUS_REBATED, pack->rebated_cus_per_block );
2740 2646 : FD_MHIST_COPY( PACK, CUS_NET, pack->net_cus_per_block );
2741 2646 : FD_MHIST_COPY( PACK, CUS_PCT, pack->pct_cus_per_block );
2742 2646 : }
2743 :
2744 : static void
2745 : release_tree( treap_t * treap,
2746 : sig2txn_t * signature_map,
2747 : noncemap_t * noncemap,
2748 9 : fd_pack_ord_txn_t * pool ) {
2749 9 : treap_fwd_iter_t next;
2750 18 : for( treap_fwd_iter_t it=treap_fwd_iter_init( treap, pool ); !treap_fwd_iter_done( it ); it=next ) {
2751 9 : next = treap_fwd_iter_next( it, pool );
2752 9 : ulong idx = treap_fwd_iter_idx( it );
2753 9 : pool[ idx ].root = FD_ORD_TXN_ROOT_FREE;
2754 9 : treap_idx_remove ( treap, idx, pool );
2755 9 : sig2txn_idx_remove_fast( signature_map, idx, pool );
2756 9 : trp_pool_idx_release ( pool, idx );
2757 9 : if( pool[ idx ].txn->flags & FD_TXN_P_FLAGS_DURABLE_NONCE ) {
2758 9 : noncemap_idx_remove_fast( noncemap, idx, pool );
2759 9 : }
2760 9 : }
2761 9 : }
2762 :
2763 : void
2764 3 : fd_pack_clear_all( fd_pack_t * pack ) {
2765 3 : pack->pending_txn_cnt = 0UL;
2766 3 : pack->microblock_cnt = 0UL;
2767 3 : pack->cumulative_block_cost = 0UL;
2768 3 : pack->cumulative_vote_cost = 0UL;
2769 3 : pack->cumulative_rebated_cus = 0UL;
2770 :
2771 3 : pack->pending_smallest->cus = ULONG_MAX;
2772 3 : pack->pending_smallest->bytes = ULONG_MAX;
2773 3 : pack->pending_votes_smallest->cus = ULONG_MAX;
2774 3 : pack->pending_votes_smallest->bytes = ULONG_MAX;
2775 :
2776 3 : release_tree( pack->pending, pack->signature_map, pack->noncemap, pack->pool );
2777 3 : release_tree( pack->pending_votes, pack->signature_map, pack->noncemap, pack->pool );
2778 3 : release_tree( pack->pending_bundles, pack->signature_map, pack->noncemap, pack->pool );
2779 :
2780 3 : ulong const pool_max = trp_pool_max( pack->pool );
2781 132 : for( ulong i=0UL; i<pool_max; i++ ) {
2782 129 : if( FD_UNLIKELY( pack->pool[ i ].root!=FD_ORD_TXN_ROOT_FREE ) ) {
2783 0 : fd_pack_ord_txn_t * const del = pack->pool + i;
2784 0 : fd_txn_t * txn = TXN( del->txn );
2785 0 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, del->txn->payload );
2786 0 : fd_acct_addr_t const * alt_adj = del->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
2787 0 : fd_acct_addr_t penalty_acct = *ACCT_IDX_TO_PTR( FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( del->root ) );
2788 0 : fd_pack_penalty_treap_t * penalty_treap = penalty_map_query( pack->penalty_treaps, penalty_acct, NULL );
2789 0 : FD_TEST( penalty_treap );
2790 0 : release_tree( penalty_treap->penalty_treap, pack->signature_map, pack->noncemap, pack->pool );
2791 0 : }
2792 129 : }
2793 :
2794 3 : pack->compressed_slot_number = (ushort)(FD_PACK_SKIP_CNT+1);
2795 :
2796 3 : expq_remove_all( pack->expiration_q );
2797 :
2798 3 : acct_uses_clear( pack->acct_in_use );
2799 3 : acct_uses_clear( pack->writer_costs );
2800 :
2801 3 : penalty_map_clear( pack->penalty_treaps );
2802 :
2803 3 : FD_PACK_BITSET_CLEAR( pack->bitset_rw_in_use );
2804 3 : FD_PACK_BITSET_CLEAR( pack->bitset_w_in_use );
2805 3 : bitset_map_clear( pack->acct_to_bitset );
2806 3 : pack->bitset_avail[ 0 ] = FD_PACK_BITSET_SLOWPATH;
2807 1027 : for( ulong i=0UL; i<FD_PACK_BITSET_MAX; i++ ) pack->bitset_avail[ i+1UL ] = (ushort)i;
2808 3 : pack->bitset_avail_cnt = FD_PACK_BITSET_MAX;
2809 :
2810 6 : for( ulong i=0UL; i<pack->bank_tile_cnt; i++ ) pack->use_by_bank_cnt[i] = 0UL;
2811 3 : }
2812 :
2813 :
2814 : /* If delete_full_bundle is non-zero and the transaction to delete is
2815 : part of a bundle, the rest of the bundle it is part of will be
2816 : deleted as well.
2817 : If move_from_penalty_treap is non-zero and the transaction to delete
2818 : is in the pending treap, move the best transaction in any of the
2819 : conflicting penalty treaps to the pending treap (if there is one). */
2820 : static ulong
2821 : delete_transaction( fd_pack_t * pack,
2822 : fd_pack_ord_txn_t * containing,
2823 : int delete_full_bundle,
2824 495471 : int move_from_penalty_treap ) {
2825 :
2826 495471 : fd_txn_t * txn = TXN( containing->txn );
2827 495471 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, containing->txn->payload );
2828 495471 : fd_acct_addr_t const * alt_adj = containing->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
2829 :
2830 495471 : treap_t * root = NULL;
2831 495471 : int root_idx = containing->root;
2832 495471 : fd_pack_penalty_treap_t * penalty_treap = NULL;
2833 495471 : switch( root_idx & FD_ORD_TXN_ROOT_TAG_MASK ) {
2834 0 : case FD_ORD_TXN_ROOT_FREE: FD_LOG_CRIT(( "Double free detected" ));
2835 492291 : case FD_ORD_TXN_ROOT_PENDING: root = pack->pending; break;
2836 0 : case FD_ORD_TXN_ROOT_PENDING_VOTE: root = pack->pending_votes; break;
2837 519 : case FD_ORD_TXN_ROOT_PENDING_BUNDLE: root = pack->pending_bundles; break;
2838 2661 : case FD_ORD_TXN_ROOT_PENALTY( 0 ): {
2839 2661 : fd_acct_addr_t penalty_acct = *ACCT_IDX_TO_PTR( FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( root_idx ) );
2840 2661 : penalty_treap = penalty_map_query( pack->penalty_treaps, penalty_acct, NULL );
2841 2661 : FD_TEST( penalty_treap );
2842 2661 : root = penalty_treap->penalty_treap;
2843 2661 : break;
2844 2661 : }
2845 495471 : }
2846 :
2847 495471 : ulong delete_cnt = 0UL;
2848 495471 : if( FD_UNLIKELY( delete_full_bundle & (root==pack->pending_bundles) ) ) {
2849 : /* When we delete, the structure of the treap may move around, but
2850 : pointers to inside the pool will remain valid */
2851 123 : fd_pack_ord_txn_t * bundle_ptrs[ FD_PACK_MAX_TXN_PER_BUNDLE-1UL ];
2852 123 : fd_pack_ord_txn_t * pool = pack->pool;
2853 123 : ulong cnt = 0UL;
2854 123 : ulong bundle_idx = RC_TO_REL_BUNDLE_IDX( containing->rewards, containing->compute_est );
2855 :
2856 : /* Iterate in both directions from the current transaction */
2857 123 : for( treap_fwd_iter_t _cur=treap_fwd_iter_next( (treap_fwd_iter_t)treap_idx_fast( containing, pool ), pool );
2858 426 : !treap_fwd_iter_done( _cur ); _cur=treap_fwd_iter_next( _cur, pool ) ) {
2859 303 : fd_pack_ord_txn_t * cur = treap_fwd_iter_ele( _cur, pool );
2860 303 : if( FD_LIKELY( bundle_idx==RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est ) ) ) {
2861 303 : bundle_ptrs[ cnt++ ] = cur;
2862 303 : } else {
2863 0 : break;
2864 0 : }
2865 303 : FD_TEST( cnt<FD_PACK_MAX_TXN_PER_BUNDLE );
2866 303 : }
2867 :
2868 123 : for( treap_rev_iter_t _cur=treap_rev_iter_next( (treap_rev_iter_t)treap_idx_fast( containing, pool ), pool );
2869 216 : !treap_rev_iter_done( _cur ); _cur=treap_rev_iter_next( _cur, pool ) ) {
2870 93 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
2871 93 : if( FD_LIKELY( bundle_idx==RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est ) ) ) {
2872 93 : bundle_ptrs[ cnt++ ] = cur;
2873 93 : } else {
2874 0 : break;
2875 0 : }
2876 93 : FD_TEST( cnt<FD_PACK_MAX_TXN_PER_BUNDLE );
2877 93 : }
2878 :
2879 : /* Delete them each, setting delete_full_bundle to 0 to avoid
2880 : infinite recursion. */
2881 519 : for( ulong k=0UL; k<cnt; k++ ) delete_cnt += delete_transaction( pack, bundle_ptrs[ k ], 0, 0 );
2882 123 : }
2883 :
2884 :
2885 495471 : if( FD_UNLIKELY( move_from_penalty_treap & (root==pack->pending) ) ) {
2886 :
2887 492285 : fd_pack_ord_txn_t * best = NULL;
2888 492285 : fd_pack_penalty_treap_t * best_penalty = NULL;
2889 :
2890 492285 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
2891 986394 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2892 494109 : fd_pack_penalty_treap_t * p_trp = penalty_map_query( pack->penalty_treaps, *ACCT_ITER_TO_PTR( iter ), NULL );
2893 494109 : if( FD_UNLIKELY( p_trp ) ) {
2894 1289 : fd_pack_ord_txn_t * best_in_trp = treap_rev_iter_ele( treap_rev_iter_init( p_trp->penalty_treap, pack->pool ), pack->pool );
2895 1289 : if( FD_UNLIKELY( !best || COMPARE_WORSE( best, best_in_trp ) ) ) {
2896 672 : best = best_in_trp;
2897 672 : best_penalty = p_trp;
2898 672 : }
2899 1289 : }
2900 494109 : }
2901 :
2902 492285 : if( FD_LIKELY( best ) ) {
2903 : /* move best to the main treap */
2904 672 : treap_ele_remove( best_penalty->penalty_treap, best, pack->pool );
2905 672 : best->root = FD_ORD_TXN_ROOT_PENDING;
2906 672 : treap_ele_insert( pack->pending, best, pack->pool );
2907 :
2908 672 : pack->pending_smallest->cus = fd_ulong_min( pack->pending_smallest->cus, best->compute_est );
2909 672 : pack->pending_smallest->bytes = fd_ulong_min( pack->pending_smallest->bytes, best->txn_e->txnp->payload_sz );
2910 :
2911 672 : if( FD_UNLIKELY( !treap_ele_cnt( best_penalty->penalty_treap ) ) ) {
2912 9 : treap_delete( treap_leave( best_penalty->penalty_treap ) );
2913 9 : penalty_map_remove( pack->penalty_treaps, best_penalty );
2914 9 : }
2915 672 : }
2916 492285 : }
2917 :
2918 495471 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_ALL );
2919 2004108 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2920 1508637 : if( FD_UNLIKELY( fd_pack_unwritable_contains( ACCT_ITER_TO_PTR( iter ) ) ) ) continue;
2921 :
2922 1012098 : release_result_t ret = release_bit_reference( pack, ACCT_ITER_TO_PTR( iter ) );
2923 1012098 : FD_PACK_BITSET_CLEARN( pack->bitset_rw_in_use, ret.clear_rw_bit );
2924 1012098 : FD_PACK_BITSET_CLEARN( pack->bitset_w_in_use, ret.clear_w_bit );
2925 1012098 : }
2926 :
2927 495471 : if( FD_UNLIKELY( containing->txn->flags & FD_TXN_P_FLAGS_DURABLE_NONCE ) ) {
2928 261 : noncemap_ele_remove_fast( pack->noncemap, containing, pack->pool );
2929 261 : }
2930 495471 : expq_remove( pack->expiration_q, containing->expq_idx );
2931 495471 : containing->root = FD_ORD_TXN_ROOT_FREE;
2932 495471 : treap_ele_remove( root, containing, pack->pool );
2933 495471 : sig2txn_ele_remove_fast( pack->signature_map, containing, pack->pool );
2934 495471 : trp_pool_ele_release( pack->pool, containing );
2935 :
2936 495471 : delete_cnt += 1UL;
2937 495471 : pack->pending_txn_cnt--;
2938 :
2939 495471 : if( FD_UNLIKELY( penalty_treap && treap_ele_cnt( root )==0UL ) ) {
2940 0 : penalty_map_remove( pack->penalty_treaps, penalty_treap );
2941 0 : }
2942 :
2943 495471 : return delete_cnt;
2944 495471 : }
2945 :
2946 : ulong
2947 : fd_pack_delete_transaction( fd_pack_t * pack,
2948 180 : fd_ed25519_sig_t const * sig0 ) {
2949 180 : ulong cnt = 0;
2950 180 : ulong next = ULONG_MAX;
2951 180 : for( ulong idx = sig2txn_idx_query_const( pack->signature_map, (wrapped_sig_t const *)sig0, ULONG_MAX, pack->pool );
2952 336 : idx!=ULONG_MAX; idx=next ) {
2953 : /* Iterating while deleting, not just this element, but perhaps the
2954 : whole bundle, feels a bit dangerous, but is actually fine because
2955 : a bundle can't contain two transactions with the same signature.
2956 : That means we know next is not part of the same bundle as idx,
2957 : which means that deleting idx will not delete next. */
2958 156 : next = sig2txn_idx_next_const( idx, ULONG_MAX, pack->pool );
2959 156 : cnt += delete_transaction( pack, pack->pool+idx, 1, 1 );
2960 156 : }
2961 :
2962 180 : return cnt;
2963 180 : }
2964 :
2965 :
2966 : int
2967 : fd_pack_verify( fd_pack_t * pack,
2968 438 : void * scratch ) {
2969 : /* Invariants:
2970 : sig2txn_query has exact same contents as all treaps combined
2971 : root matches treap
2972 : Keys of acct_to_bitset is exactly union of all accounts in all
2973 : transactions in treaps, with ref counted appropriately
2974 : bits in bitset_avail is complement of bits allocated in
2975 : acct_to_bitset
2976 : expires_at consistent between treap, prq
2977 : use_by_bank does not contain duplicates
2978 : use_by_bank consistent with acct_in_use
2979 : elements in pool but not in a treap have root set to free
2980 : all penalty treaps have at least one transaction
2981 : all elements in penalty treaps are in the one that the root indicates
2982 : */
2983 :
2984 : /* TODO:
2985 : bitset_{r}w_in_use = bitset_map_query( everything in acct_in_use that doesn't have FD_PACK_IN_USE_BIT_CLEARED )
2986 : bitset_w_in_use & bitset_rw_in_use == bitset_w_in_use
2987 : */
2988 316762 : #define VERIFY_TEST( cond, ... ) do { \
2989 316762 : if( FD_UNLIKELY( !(cond) ) ) { \
2990 0 : FD_LOG_WARNING(( __VA_ARGS__ )); \
2991 0 : return -(__LINE__); \
2992 0 : } \
2993 316762 : } while( 0 )
2994 :
2995 438 : ulong max_acct_in_treap = pack->pack_depth * FD_TXN_ACCT_ADDR_MAX;
2996 438 : int lg_acct_in_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap ) );
2997 438 : void * _bitset_map_copy = scratch;
2998 438 : void * _bitset_map_orig = bitset_map_leave( pack->acct_to_bitset );
2999 438 : fd_memcpy( _bitset_map_copy, _bitset_map_orig, bitset_map_footprint( lg_acct_in_trp ) );
3000 :
3001 438 : fd_pack_bitset_acct_mapping_t * bitset_copy = bitset_map_join( _bitset_map_copy );
3002 :
3003 : /* Check that each bit is in exactly one place */
3004 438 : FD_PACK_BITSET_DECLARE( processed ); FD_PACK_BITSET_CLEAR( processed );
3005 438 : FD_PACK_BITSET_DECLARE( bit ); FD_PACK_BITSET_CLEAR( bit );
3006 438 : FD_PACK_BITSET_DECLARE( full ); FD_PACK_BITSET_CLEAR( full );
3007 :
3008 438 : if( FD_UNLIKELY( pack->bitset_avail[0]!=FD_PACK_BITSET_SLOWPATH ) ) return -1;
3009 149264 : for( ulong i=1UL; i<=pack->bitset_avail_cnt; i++ ) {
3010 148826 : FD_PACK_BITSET_CLEAR( bit );
3011 148826 : FD_PACK_BITSET_SETN( bit, pack->bitset_avail[ i ] );
3012 148826 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, processed, processed ),
3013 148826 : "bit %hu in avail set twice", pack->bitset_avail[ i ] );
3014 148826 : FD_PACK_BITSET_OR( processed, bit );
3015 148826 : }
3016 :
3017 438 : ulong total_references = 0UL;
3018 1589281206 : for( ulong i=0UL; i<bitset_map_slot_cnt( bitset_copy ); i++ ) {
3019 1589280768 : if( !bitset_map_key_inval( bitset_copy[ i ].key ) ) {
3020 1080 : VERIFY_TEST( bitset_copy[ i ].ref_cnt>0UL, "account address in table with 0 ref count" );
3021 :
3022 1080 : total_references += bitset_copy[ i ].ref_cnt;
3023 :
3024 1080 : FD_PACK_BITSET_CLEAR( bit );
3025 1080 : FD_PACK_BITSET_SETN( bit, bitset_copy[ i ].bit );
3026 1080 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, processed, processed ), "bit %hu used twice", bitset_copy[ i ].bit );
3027 1080 : FD_PACK_BITSET_OR( processed, bit );
3028 1080 : }
3029 1589280768 : }
3030 149942 : for( ulong i=0UL; i<FD_PACK_BITSET_MAX; i++ ) {
3031 149504 : FD_PACK_BITSET_CLEAR( bit );
3032 149504 : FD_PACK_BITSET_SETN( bit, i );
3033 149504 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, processed, processed ), "bit %lu missing", i );
3034 149504 : FD_PACK_BITSET_SETN( full, i );
3035 149504 : }
3036 :
3037 :
3038 438 : fd_pack_ord_txn_t * pool = pack->pool;
3039 438 : treap_t * treaps[ 3 ] = { pack->pending, pack->pending_votes, pack->pending_bundles };
3040 438 : ulong txn_cnt = 0UL;
3041 :
3042 24834264 : for( ulong k=0UL; k<3UL+penalty_map_slot_cnt( pack->penalty_treaps ); k++ ) {
3043 24833826 : treap_t * treap = NULL;
3044 :
3045 24833826 : if( k<3UL ) treap = treaps[ k ];
3046 24832512 : else if( FD_LIKELY( penalty_map_key_inval( pack->penalty_treaps[ k-3UL ].key ) ) ) continue;
3047 0 : else {
3048 0 : treap = pack->penalty_treaps[ k-3UL ].penalty_treap;
3049 0 : VERIFY_TEST( treap_ele_cnt( treap )>0UL, "empty penalty treap in map" );
3050 0 : }
3051 :
3052 1737 : for( treap_rev_iter_t _cur=treap_rev_iter_init( treap, pool ); !treap_rev_iter_done( _cur );
3053 1314 : _cur=treap_rev_iter_next( _cur, pool ) ) {
3054 423 : txn_cnt++;
3055 423 : fd_pack_ord_txn_t const * cur = treap_rev_iter_ele_const( _cur, pool );
3056 423 : fd_txn_t const * txn = TXN(cur->txn);
3057 423 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, cur->txn->payload );
3058 423 : fd_acct_addr_t const * alt_adj = cur->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
3059 :
3060 423 : fd_ed25519_sig_t const * sig0 = fd_txn_get_signatures( txn, cur->txn->payload );
3061 :
3062 423 : fd_pack_ord_txn_t const * in_tbl = sig2txn_ele_query_const( pack->signature_map, (wrapped_sig_t const *)sig0, NULL, pool );
3063 423 : VERIFY_TEST( in_tbl, "signature missing from sig2txn" );
3064 :
3065 423 : VERIFY_TEST( (ulong)(cur->root & FD_ORD_TXN_ROOT_TAG_MASK)==fd_ulong_min( k, 3UL )+1UL, "treap element had bad root" );
3066 423 : if( FD_LIKELY( (cur->root & FD_ORD_TXN_ROOT_TAG_MASK)==FD_ORD_TXN_ROOT_PENALTY(0) ) ) {
3067 0 : fd_acct_addr_t const * penalty_acct = ACCT_IDX_TO_PTR( FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( cur->root ) );
3068 0 : VERIFY_TEST( !memcmp( penalty_acct, pack->penalty_treaps[ k-3UL ].key.b, 32UL ), "transaction in wrong penalty treap" );
3069 0 : }
3070 423 : VERIFY_TEST( cur->expires_at>=pack->expire_before, "treap element expired" );
3071 :
3072 423 : fd_pack_expq_t const * eq = pack->expiration_q + cur->expq_idx;
3073 423 : VERIFY_TEST( eq->txn==cur, "expq inconsistent" );
3074 423 : VERIFY_TEST( eq->expires_at==cur->expires_at, "expq expires_at inconsistent" );
3075 :
3076 423 : FD_PACK_BITSET_DECLARE( complement );
3077 423 : FD_PACK_BITSET_COPY( complement, full );
3078 423 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
3079 1413 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
3080 990 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
3081 :
3082 990 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( bitset_copy, acct, NULL );
3083 990 : VERIFY_TEST( q, "account in transaction missing from bitset mapping" );
3084 990 : VERIFY_TEST( q->ref_cnt>0UL, "account in transaction ref_cnt already 0" );
3085 990 : q->ref_cnt--;
3086 990 : total_references--;
3087 :
3088 990 : FD_PACK_BITSET_CLEAR( bit );
3089 990 : FD_PACK_BITSET_SETN( bit, q->bit );
3090 990 : if( q->bit<FD_PACK_BITSET_MAX ) {
3091 597 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, cur->rw_bitset, cur->rw_bitset ), "missing from rw bitset" );
3092 597 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, cur->w_bitset, cur->w_bitset ), "missing from w bitset" );
3093 597 : }
3094 990 : FD_PACK_BITSET_CLEARN( complement, q->bit );
3095 990 : }
3096 423 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( complement, complement, cur->w_bitset, cur->w_bitset ), "extra in w bitset" );
3097 :
3098 423 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
3099 1836 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
3100 :
3101 1413 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
3102 1413 : if( FD_UNLIKELY( fd_pack_unwritable_contains( &acct ) ) ) continue;
3103 888 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( bitset_copy, acct, NULL );
3104 888 : VERIFY_TEST( q, "account in transaction missing from bitset mapping" );
3105 888 : VERIFY_TEST( q->ref_cnt>0UL, "account in transaction ref_cnt already 0" );
3106 888 : q->ref_cnt--;
3107 888 : total_references--;
3108 :
3109 888 : FD_PACK_BITSET_CLEAR( bit );
3110 888 : FD_PACK_BITSET_SETN( bit, q->bit );
3111 888 : if( q->bit<FD_PACK_BITSET_MAX ) {
3112 879 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, cur->rw_bitset, cur->rw_bitset ), "missing from rw bitset" );
3113 879 : }
3114 888 : FD_PACK_BITSET_CLEARN( complement, q->bit );
3115 888 : }
3116 423 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( complement, complement, cur->rw_bitset, cur->rw_bitset ), "extra in rw bitset" );
3117 423 : }
3118 1314 : }
3119 :
3120 438 : bitset_map_leave( bitset_copy );
3121 438 : VERIFY_TEST( txn_cnt==pack->pending_txn_cnt, "txn_cnt" );
3122 :
3123 438 : VERIFY_TEST( total_references==0UL, "extra references in bitset mapping" );
3124 438 : ulong sig2txn_key_cnt = 0UL;
3125 438 : for( sig2txn_iter_t iter = sig2txn_iter_init( pack->signature_map, pool );
3126 861 : !sig2txn_iter_done( iter, pack->signature_map, pool );
3127 438 : iter = sig2txn_iter_next( iter, pack->signature_map, pool ) ) {
3128 423 : sig2txn_key_cnt++;
3129 423 : }
3130 438 : VERIFY_TEST( txn_cnt==sig2txn_key_cnt, "extra signatures in sig2txn" );
3131 438 : VERIFY_TEST( !sig2txn_verify( pack->signature_map, trp_pool_max( pool ), pool ), "sig2txn corrupt" );
3132 :
3133 : /* Count noncemap keys */
3134 438 : ulong noncemap_key_cnt = 0UL;
3135 438 : for( noncemap_iter_t iter = noncemap_iter_init( pack->noncemap, pool );
3136 486 : !noncemap_iter_done( iter, pack->noncemap, pool );
3137 438 : iter = noncemap_iter_next( iter, pack->noncemap, pool ) ) {
3138 48 : noncemap_key_cnt++;
3139 : /* Ensure element is in pool */
3140 48 : fd_pack_ord_txn_t const * ord = noncemap_iter_ele_const( iter, pack->noncemap, pool );
3141 48 : VERIFY_TEST( ord->txn->flags & FD_TXN_P_FLAGS_DURABLE_NONCE, "invalid entry in noncemap" );
3142 :
3143 : /* Although pack allows multiple transactions with the same
3144 : signature in sig2txn (MAP_MULTI==1), the noncemap checks prevent
3145 : multiple nonce transactions with the same signature. */
3146 48 : wrapped_sig_t sig = FD_LOAD( wrapped_sig_t, fd_txn_get_signatures( TXN( ord->txn ), ord->txn->payload ) );
3147 48 : VERIFY_TEST( ord==sig2txn_ele_query_const( pack->signature_map, &sig, NULL, pool ), "noncemap and sig2txn desynced" );
3148 48 : }
3149 438 : VERIFY_TEST( txn_cnt>=noncemap_key_cnt, "phantom txns in noncemap" );
3150 438 : VERIFY_TEST( !noncemap_verify( pack->noncemap, trp_pool_max( pool ), pool ), "noncemap corrupt" );
3151 :
3152 438 : ulong slots_found = 0UL;
3153 438 : ulong const pool_max = trp_pool_max( pool );
3154 3890922 : for( ulong i=0UL; i<pool_max; i++ ) {
3155 3890484 : fd_pack_ord_txn_t * ord = pack->pool + i;
3156 3890484 : if( ord->root!=FD_ORD_TXN_ROOT_FREE ) slots_found++;
3157 3890484 : }
3158 438 : VERIFY_TEST( slots_found==txn_cnt, "phantom slots in pool" );
3159 :
3160 438 : bitset_map_join( _bitset_map_orig );
3161 :
3162 438 : int lg_uses_tbl_sz = acct_uses_lg_slot_cnt( pack->acct_in_use );
3163 :
3164 438 : void * _acct_in_use_copy = scratch;
3165 438 : void * _acct_in_use_orig = acct_uses_leave( pack->acct_in_use );
3166 438 : fd_memcpy( _acct_in_use_copy, _acct_in_use_orig, acct_uses_footprint( lg_uses_tbl_sz ) );
3167 :
3168 438 : fd_pack_addr_use_t * acct_in_use_copy = acct_uses_join( _acct_in_use_copy );
3169 :
3170 438 : FD_PACK_BITSET_DECLARE( w_complement );
3171 438 : FD_PACK_BITSET_DECLARE( rw_complement );
3172 438 : FD_PACK_BITSET_COPY( w_complement, full );
3173 438 : FD_PACK_BITSET_COPY( rw_complement, full );
3174 :
3175 438 : FD_PACK_BITSET_DECLARE( rw_bitset ); FD_PACK_BITSET_COPY( rw_bitset, pack->bitset_rw_in_use );
3176 438 : FD_PACK_BITSET_DECLARE( w_bitset ); FD_PACK_BITSET_COPY( w_bitset, pack->bitset_w_in_use );
3177 :
3178 :
3179 438 : ulong const EMPTY_MASK = ~(FD_PACK_IN_USE_WRITABLE | FD_PACK_IN_USE_BIT_CLEARED);
3180 :
3181 12255 : for( ulong bank=0UL; bank<pack->bank_tile_cnt; bank++ ) {
3182 :
3183 11817 : fd_pack_addr_use_t const * base = pack->use_by_bank[ bank ];
3184 11817 : ulong bank_mask = 1UL << bank;
3185 :
3186 12672 : for( ulong i=0UL; i<pack->use_by_bank_cnt[ bank ]; i++ ) {
3187 855 : fd_pack_addr_use_t * use = acct_uses_query( acct_in_use_copy, base[i].key, NULL );
3188 855 : VERIFY_TEST( use, "acct in use by bank not in acct_in_use, or in uses_by_bank twice" );
3189 :
3190 855 : VERIFY_TEST( use->in_use_by & bank_mask, "acct in uses_by_bank doesn't have corresponding bit set in acct_in_use, or it was in the list twice" );
3191 :
3192 855 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, base[i].key, NULL );
3193 : /* The normal case is that the acct->bit mapping is preserved
3194 : while in use by other transactions in the pending list. This
3195 : might not always happen though. It's okay for the mapping to
3196 : get deleted while the acct is in use, which is noted with
3197 : BIT_CLEARED. If that is set, the mapping may not exist, or it
3198 : may have been re-created, perhaps with a different bit. */
3199 855 : if( q==NULL ) VERIFY_TEST( use->in_use_by & FD_PACK_IN_USE_BIT_CLEARED, "acct in use not in acct_to_bitset, but not marked as cleared" );
3200 0 : else if( !(use->in_use_by & FD_PACK_IN_USE_BIT_CLEARED) ) {
3201 0 : FD_PACK_BITSET_CLEAR( bit );
3202 0 : FD_PACK_BITSET_SETN( bit, q->bit );
3203 0 : if( q->bit<FD_PACK_BITSET_MAX ) {
3204 0 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, rw_bitset, rw_bitset ), "missing from rw bitset" );
3205 0 : if( use->in_use_by & FD_PACK_IN_USE_WRITABLE ) {
3206 0 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, w_bitset, w_bitset ), "missing from w bitset" );
3207 0 : FD_PACK_BITSET_CLEARN( w_complement, q->bit );
3208 0 : }
3209 0 : }
3210 0 : FD_PACK_BITSET_CLEARN( rw_complement, q->bit );
3211 0 : }
3212 855 : if( use->in_use_by & FD_PACK_IN_USE_WRITABLE ) VERIFY_TEST( (use->in_use_by & EMPTY_MASK)==bank_mask, "writable, but in use by multiple" );
3213 :
3214 855 : use->in_use_by &= ~bank_mask;
3215 855 : if( !(use->in_use_by & EMPTY_MASK) ) acct_uses_remove( acct_in_use_copy, use );
3216 855 : }
3217 11817 : }
3218 438 : VERIFY_TEST( acct_uses_key_cnt( acct_in_use_copy )==0UL, "stray uses in acct_in_use" );
3219 438 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( rw_complement, rw_complement, rw_bitset, rw_bitset ), "extra in rw bitset" );
3220 438 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( w_complement, w_complement, w_bitset, w_bitset ), "extra in w bitset" );
3221 :
3222 438 : acct_uses_leave( acct_in_use_copy );
3223 :
3224 438 : acct_uses_join( _acct_in_use_orig );
3225 438 : return 0;
3226 438 : }
3227 :
3228 3 : void * fd_pack_leave ( fd_pack_t * pack ) { FD_COMPILER_MFENCE(); return (void *)pack; }
3229 3 : void * fd_pack_delete( void * mem ) { FD_COMPILER_MFENCE(); return mem; }
|