Line data Source code
1 : #define FD_UNALIGNED_ACCESS_STYLE 0
2 : #include "fd_pack.h"
3 : #include "fd_pack_cost.h"
4 : #include "fd_compute_budget_program.h"
5 : #include "fd_pack_bitset.h"
6 : #include "fd_pack_unwritable.h"
7 : #include "fd_chkdup.h"
8 : #include "fd_pack_tip_prog_blacklist.h"
9 : #include <math.h> /* for sqrt */
10 : #include <stddef.h> /* for offsetof */
11 : #include "../metrics/fd_metrics.h"
12 :
13 : #define FD_PACK_USE_NON_TEMPORAL_MEMCPY 1
14 :
15 : /* Declare a bunch of helper structs used for pack-internal data
16 : structures. */
17 : typedef struct {
18 : fd_ed25519_sig_t sig;
19 : } wrapped_sig_t;
20 :
21 : /* fd_pack_ord_txn_t: An fd_txn_p_t with information required to order
22 : it by priority. */
23 : struct fd_pack_private_ord_txn {
24 : /* It's important that there be no padding here (asserted below)
25 : because the code casts back and forth from pointers to this element
26 : to pointers to the whole struct. */
27 : union {
28 : fd_txn_p_t txn[1]; /* txn is an alias for txn_e->txnp */
29 : fd_txn_e_t txn_e[1];
30 : struct{ uchar _sig_cnt; wrapped_sig_t sig; };
31 : };
32 :
33 : /* Since this struct can be in one of several trees, it's helpful to
34 : store which tree. This should be one of the FD_ORD_TXN_ROOT_*
35 : values. */
36 : int root;
37 :
38 : /* The sig2txn map_chain fields */
39 : ushort sigmap_next;
40 : ushort sigmap_prev;
41 :
42 : /* Each transaction is inserted with an expiration "time." This code
43 : doesn't care about the units (blocks, rdtsc tick, ns, etc.), and
44 : doesn't require transactions to be inserted in expiration date
45 : order. */
46 : ulong expires_at;
47 : /* expq_idx: When this object is part of one of the treaps, it's
48 : also in the expiration priority queue. This field (which is
49 : manipulated behind the scenes by the fd_prq code) stores where so
50 : that if we delete this transaction, we can also delete it from the
51 : expiration priority queue. */
52 : ulong expq_idx;
53 :
54 : /* We want rewards*compute_est to fit in a ulong so that r1/c1 < r2/c2 can be
55 : computed as r1*c2 < r2*c1, with the product fitting in a ulong.
56 : compute_est has a small natural limit of mid-20 bits. rewards doesn't have
57 : a natural limit, so there is some argument to be made for raising the
58 : limit for rewards to 40ish bits. The struct has better packing with
59 : uint/uint though. */
60 : uint __attribute__((aligned(64))) /* We want the treap fields and the bitsets
61 : to be on the same double cache line pair */
62 : rewards; /* in Lamports */
63 : uint compute_est; /* in compute units */
64 :
65 : /* The treap fields */
66 : ushort left;
67 : ushort right;
68 : ushort parent;
69 : ushort prio;
70 : ushort prev;
71 : ushort next;
72 :
73 : /* skip: if we skip this transaction more than FD_PACK_SKIP_CNT times
74 : for reasons that won't go away until the end of the block, then we
75 : want to skip it very quickly. If skip is in [1, FD_PACK_SKIP_CNT],
76 : then that means we have to skip it `skip` more times before taking
77 : any action. If skip>FD_PACK_SKIP_CNT, then it is a compressed slot
78 : number during which it should be skipped, and we'll skip it until
79 : the compressed slot reaches a new value. skip is never 0. */
80 : ushort skip;
81 :
82 : FD_PACK_BITSET_DECLARE( rw_bitset ); /* all accts this txn references */
83 : FD_PACK_BITSET_DECLARE( w_bitset ); /* accts this txn write-locks */
84 :
85 : };
86 : typedef struct fd_pack_private_ord_txn fd_pack_ord_txn_t;
87 :
88 : /* What we want is that the payload starts at byte 0 of
89 : fd_pack_ord_txn_t so that the trick with the signature map works
90 : properly. GCC and Clang seem to disagree on the rules of offsetof.
91 : */
92 : FD_STATIC_ASSERT( offsetof( fd_pack_ord_txn_t, txn )==0UL, fd_pack_ord_txn_t );
93 : FD_STATIC_ASSERT( offsetof( fd_pack_ord_txn_t, sig )==1UL, fd_pack_ord_txn_t );
94 : #if FD_USING_CLANG
95 : FD_STATIC_ASSERT( offsetof( fd_txn_p_t, payload )==0UL, fd_pack_ord_txn_t );
96 : #else
97 : FD_STATIC_ASSERT( offsetof( fd_pack_ord_txn_t, txn->payload )==0UL, fd_pack_ord_txn_t );
98 : FD_STATIC_ASSERT( offsetof( fd_pack_ord_txn_t, txn_e->txnp )==0UL, fd_pack_ord_txn_t );
99 : #endif
100 :
101 : /* FD_ORD_TXN_ROOT is essentially a small union packed into an int. The low
102 : byte is the "tag". The higher 3 bytes depend on the low byte. */
103 4451673 : #define FD_ORD_TXN_ROOT_TAG_MASK 0xFF
104 15761382 : #define FD_ORD_TXN_ROOT_FREE 0
105 18006344 : #define FD_ORD_TXN_ROOT_PENDING 1
106 13276737 : #define FD_ORD_TXN_ROOT_PENDING_VOTE 2
107 0 : #define FD_ORD_TXN_ROOT_PENDING_BUNDLE 3
108 329301 : #define FD_ORD_TXN_ROOT_PENALTY( idx ) (4 | (idx)<<8)
109 :
110 : /* if root & TAG_MASK == PENALTY, then PENALTY_ACCT_IDX(root) gives the index
111 : in the transaction's list of account addresses of which penalty treap the
112 : transaction is in. */
113 : #define FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( root ) (((root) & 0xFF00)>>8)
114 :
115 28430171 : #define FD_PACK_IN_USE_WRITABLE (0x8000000000000000UL)
116 15395222 : #define FD_PACK_IN_USE_BIT_CLEARED (0x4000000000000000UL)
117 :
118 : /* Each non-empty microblock we schedule also has an overhead of 48
119 : bytes that counts towards shed limits. That comes from the 32 byte
120 : hash, the hash count (8 bytes) and the transaction count (8 bytes).
121 : We don't have to pay this overhead if the microblock is empty, since
122 : those microblocks get dropped. */
123 1482126 : #define MICROBLOCK_DATA_OVERHEAD 48UL
124 :
125 : /* Keep track of accounts that are written to in each block so that we
126 : can reset the writer costs to 0. If the number of accounts that are
127 : written to is above or equal to this, we'll just clear the whole
128 : writer cost map instead of only removing the elements we increased. */
129 1353 : #define DEFAULT_WRITTEN_LIST_MAX 16384UL
130 :
131 : /* fd_pack_addr_use_t: Used for three distinct purposes:
132 : - to record that an address is in use and can't be used again until
133 : certain microblocks finish execution
134 : - to keep track of the cost of all transactions that write to the
135 : specified account.
136 : - to keep track of the write cost for accounts referenced by
137 : transactions in a bundle and which transactions use which
138 : accounts.
139 : Making these separate structs might make it more clear, but then
140 : they'd have identical shape and result in several fd_map_dynamic sets
141 : of functions with identical code. It doesn't seem like the compiler
142 : is very good at merging code like that, so in order to reduce code
143 : bloat, we'll just combine them. */
144 : struct fd_pack_private_addr_use_record {
145 : fd_acct_addr_t key; /* account address */
146 : union {
147 : ulong _;
148 : ulong in_use_by; /* Bitmask indicating which banks */
149 : ulong total_cost; /* In cost units/CUs */
150 : struct { uint carried_cost; /* In cost units */
151 : ushort ref_cnt; /* In transactions */
152 : ushort last_use_in; }; /* In transactions */
153 : };
154 : };
155 : typedef struct fd_pack_private_addr_use_record fd_pack_addr_use_t;
156 :
157 :
158 : /* fd_pack_expq_t: An element of an fd_prq to sort the transactions by
159 : timeout. This structure has several invariants for entries
160 : corresponding to pending transactions:
161 : expires_at == txn->expires_at
162 : txn->exp_prq_idx is the index of this structure
163 : Notice that prq is an array-based heap, which means the indexes of
164 : elements change. The PRQ_TMP_ST macro is hijacked to keep that
165 : invariant up to date.
166 :
167 : Note: this could be easier if fd_heap supported deleting from the
168 : middle, but that's not possible with the current design of fd_heap,
169 : which omits a parent pointer for improved performance. */
170 : struct fd_pack_expq {
171 : ulong expires_at;
172 : fd_pack_ord_txn_t * txn;
173 : };
174 : typedef struct fd_pack_expq fd_pack_expq_t;
175 :
176 :
177 : /* fd_pack_bitset_acct_mapping_t: An element of an fd_map_dynamic that
178 : maps an account address to the number of transactions that are
179 : referencing it and the bit that is reserved to indicate it in the
180 : bitset, if any. */
181 : struct fd_pack_bitset_acct_mapping {
182 : fd_acct_addr_t key; /* account address */
183 : ulong ref_cnt;
184 :
185 : /* first_instance and first_instance_was_write are only valid when
186 : bit==FD_PACK_BITSET_FIRST_INSTANCE, which is set when ref_cnt
187 : transitions from 0 to 1. These just exist to implement the
188 : optimization that accounts referenced a single time aren't
189 : allocated a bit, but this seems to be an important optimization. */
190 : fd_pack_ord_txn_t * first_instance;
191 : int first_instance_was_write;
192 :
193 : /* bit is in [0, FD_PACK_BITSET_MAX) U
194 : { FD_PACK_BITSET_FIRST_INSTANCE, FD_PACK_BITSET_SLOWPATH }. */
195 : ushort bit;
196 : };
197 : typedef struct fd_pack_bitset_acct_mapping fd_pack_bitset_acct_mapping_t;
198 :
199 :
200 :
201 : /* pack maintains a small state machine related to initializer bundles.
202 : See the header file for more details about it, but it's
203 : also summarized here:
204 : * NOT_INITIALIZED: The starting state for each block
205 : * PENDING: an initializer bundle has been scheduled, but pack has
206 : not observed its result yet, so we don't know if it was successful
207 : or not.
208 : * FAILED: the most recently scheduled initializer bundle failed
209 : for reasons other than already being executed. Most commonly, this
210 : could be because of a bug in the code that generated the
211 : initializer bundle, a lack of fee payer balance, or an expired
212 : blockhash.
213 : * READY: the most recently scheduled initialization bundle succeeded
214 : and normal bundles can be scheduled in this slot. */
215 2646 : #define FD_PACK_IB_STATE_NOT_INITIALIZED 0
216 0 : #define FD_PACK_IB_STATE_PENDING 1
217 0 : #define FD_PACK_IB_STATE_FAILED 2
218 0 : #define FD_PACK_IB_STATE_READY 3
219 :
220 :
221 : /* Returns 1 if x.rewards/x.compute < y.rewards/y.compute. Not robust. */
222 89075672 : #define COMPARE_WORSE(x,y) ( ((ulong)((x)->rewards)*(ulong)((y)->compute_est)) < ((ulong)((y)->rewards)*(ulong)((x)->compute_est)) )
223 :
224 : /* Declare all the data structures */
225 :
226 :
227 : /* Define the big max-"heap" that we pull transactions off to schedule.
228 : The priority is given by reward/compute. We may want to add in some
229 : additional terms at a later point. In order to cheaply remove nodes,
230 : we actually use a treap. */
231 : #define POOL_NAME trp_pool
232 1566 : #define POOL_T fd_pack_ord_txn_t
233 : #define POOL_IDX_T ushort
234 29587584 : #define POOL_NEXT parent
235 : #include "../../util/tmpl/fd_pool.c"
236 :
237 : #define TREAP_T fd_pack_ord_txn_t
238 : #define TREAP_NAME treap
239 : #define TREAP_QUERY_T void * /* We don't use query ... */
240 : #define TREAP_CMP(a,b) (__extension__({ (void)(a); (void)(b); -1; })) /* which means we don't need to give a real
241 : implementation to cmp either */
242 180417143 : #define TREAP_IDX_T ushort
243 : #define TREAP_OPTIMIZE_ITERATION 1
244 89075672 : #define TREAP_LT COMPARE_WORSE
245 : #include "../../util/tmpl/fd_treap.c"
246 :
247 :
248 : #define MAP_NAME sig2txn
249 : #define MAP_OPTIMIZE_RANDOM_ACCESS_REMOVAL 1
250 : #define MAP_MULTI 1
251 13581267 : #define MAP_ELE_T fd_pack_ord_txn_t
252 36541751 : #define MAP_PREV sigmap_prev
253 34999496 : #define MAP_NEXT sigmap_next
254 13582308 : #define MAP_IDX_T ushort
255 : #define MAP_KEY_T wrapped_sig_t
256 26651824 : #define MAP_KEY sig
257 33 : #define MAP_KEY_EQ(k0,k1) (!memcmp( (k0),(k1), FD_TXN_SIGNATURE_SZ) )
258 26651881 : #define MAP_KEY_HASH(key,seed) fd_hash( (seed), (key), 64UL )
259 : #include "../../util/tmpl/fd_map_chain.c"
260 :
261 :
262 : static const fd_acct_addr_t null_addr = { 0 };
263 :
264 : #define MAP_NAME acct_uses
265 94466112 : #define MAP_T fd_pack_addr_use_t
266 111463057 : #define MAP_KEY_T fd_acct_addr_t
267 318360085 : #define MAP_KEY_NULL null_addr
268 : #if FD_HAS_AVX
269 111463057 : # define MAP_KEY_INVAL(k) _mm256_testz_si256( wb_ldu( (k).b ), wb_ldu( (k).b ) )
270 : #else
271 : # define MAP_KEY_INVAL(k) MAP_KEY_EQUAL(k, null_addr)
272 : #endif
273 77449020 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp((k0).b,(k1).b, FD_TXN_ACCT_ADDR_SZ))
274 : #define MAP_KEY_EQUAL_IS_SLOW 1
275 : #define MAP_MEMOIZE 0
276 94473326 : #define MAP_KEY_HASH(key) ((uint)fd_ulong_hash( fd_ulong_load_8( (key).b ) ))
277 : #include "../../util/tmpl/fd_map_dynamic.c"
278 :
279 :
280 : #define MAP_NAME bitset_map
281 52401902 : #define MAP_T fd_pack_bitset_acct_mapping_t
282 65749212 : #define MAP_KEY_T fd_acct_addr_t
283 873155377 : #define MAP_KEY_NULL null_addr
284 : #if FD_HAS_AVX
285 65749212 : # define MAP_KEY_INVAL(k) _mm256_testz_si256( wb_ldu( (k).b ), wb_ldu( (k).b ) )
286 : #else
287 : # define MAP_KEY_INVAL(k) MAP_KEY_EQUAL(k, null_addr)
288 : #endif
289 39093307 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp((k0).b,(k1).b, FD_TXN_ACCT_ADDR_SZ))
290 : #define MAP_KEY_EQUAL_IS_SLOW 1
291 : #define MAP_MEMOIZE 0
292 52429901 : #define MAP_KEY_HASH(key) ((uint)fd_ulong_hash( fd_ulong_load_8( (key).b ) ))
293 : #include "../../util/tmpl/fd_map_dynamic.c"
294 :
295 :
296 : /* Since transactions can also expire, we also maintain a parallel
297 : priority queue. This means elements are simultaneously part of the
298 : treap (ordered by priority) and the expiration queue (ordered by
299 : expiration). It's tempting to use the priority field of the treap
300 : for this purpose, but that can result in degenerate treaps in some
301 : cases. */
302 : #define PRQ_NAME expq
303 32820278 : #define PRQ_T fd_pack_expq_t
304 27144834 : #define PRQ_TIMEOUT_T ulong
305 27144834 : #define PRQ_TIMEOUT expires_at
306 15888426 : #define PRQ_TMP_ST(p,t) do { \
307 15888426 : (p)[0] = (t); \
308 15888426 : t.txn->expq_idx = (ulong)((p)-heap); \
309 15888426 : } while( 0 )
310 : #include "../../util/tmpl/fd_prq.c"
311 :
312 : /* fd_pack_smallest: We want to keep track of the smallest transaction
313 : in each treap. That way, if we know the amount of space left in the
314 : block is less than the smallest transaction in the heap, we can just
315 : skip the heap. Since transactions can be deleted, etc. maintaining
316 : this precisely is hard, but we can maintain a conservative value
317 : fairly cheaply. Since the CU limit or the byte limit can be the one
318 : that matters, we keep track of the smallest by both. */
319 : struct fd_pack_smallest {
320 : ulong cus;
321 : ulong bytes;
322 : };
323 : typedef struct fd_pack_smallest fd_pack_smallest_t;
324 :
325 :
326 : /* With realistic traffic patterns, we often see many, many transactions
327 : competing for the same writable account. Since only one of these can
328 : execute at a time, we sometimes waste lots of scheduling time going
329 : through them one at a time. To combat that, when a transaction
330 : writes to an account with more than PENALTY_TREAP_THRESHOLD
331 : references (readers or writers), instead of inserting it into the
332 : main treap, we insert it into a penalty treap for that specific hot
333 : account address. These transactions are not immediately available
334 : for scheduling. Then, when a transaction that writes to the hot
335 : address completes, we move the most lucrative transaction from the
336 : penalty treap to the main treap, making it available for scheduling.
337 : This policy may slightly violate the price-time priority scheduling
338 : approach pack normally uses: if the most lucrative transaction
339 : competing for hot state arrives after PENALTY_TREAP_THRESHOLD has
340 : been hit, it may be scheduled second instead of first. However, if
341 : the account is in use at the time the new transaction arrives, it
342 : will be scheduled next, as desired. This minor difference seems
343 : reasonable to reduce complexity.
344 :
345 : fd_pack_penalty_treap is one account-specific penalty treap. All the
346 : transactions in the penalty_treap treap write to key.
347 :
348 : penalty_map is the fd_map_dynamic that maps accounts to their
349 : respective penalty treaps. */
350 : struct fd_pack_penalty_treap {
351 : fd_acct_addr_t key;
352 : treap_t penalty_treap[1];
353 : };
354 : typedef struct fd_pack_penalty_treap fd_pack_penalty_treap_t;
355 :
356 : #define MAP_NAME penalty_map
357 4227885 : #define MAP_T fd_pack_penalty_treap_t
358 4229742 : #define MAP_KEY_T fd_acct_addr_t
359 13437909 : #define MAP_KEY_NULL null_addr
360 : #if FD_HAS_AVX
361 4229742 : # define MAP_KEY_INVAL(k) _mm256_testz_si256( wb_ldu( (k).b ), wb_ldu( (k).b ) )
362 : #else
363 : # define MAP_KEY_INVAL(k) MAP_KEY_EQUAL(k, null_addr)
364 : #endif
365 4223940 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp((k0).b,(k1).b, FD_TXN_ACCT_ADDR_SZ))
366 : #define MAP_KEY_EQUAL_IS_SLOW 1
367 : #define MAP_MEMOIZE 0
368 4226841 : #define MAP_KEY_HASH(key) ((uint)fd_ulong_hash( fd_ulong_load_8( (key).b ) ))
369 : #include "../../util/tmpl/fd_map_dynamic.c"
370 :
371 : /* PENALTY_TREAP_THRESHOLD: How many references to an account do we
372 : allow before subsequent transactions that write to the account go to
373 : the penalty treap. */
374 29525271 : #define PENALTY_TREAP_THRESHOLD 64UL
375 :
376 :
377 : /* FD_PACK_SKIP_CNT: How many times we'll skip a transaction (for
378 : reasons other than account conflicts) before we won't consider it
379 : until the next slot. For performance reasons, this doesn't reset at
380 : the end of a slot, so e.g. we might skip twice in slot 1, then three
381 : times in slot 2, which would be enough to prevent considering it
382 : until slot 3. The main reason this is not 1 is that some skips that
383 : seem permanent until the end of the slot can actually go away based
384 : on rebates. */
385 13584870 : #define FD_PACK_SKIP_CNT 5UL
386 :
387 : /* Finally, we can now declare the main pack data structure */
388 : struct fd_pack_private {
389 : ulong pack_depth;
390 : ulong bundle_meta_sz; /* if 0, bundles are disabled */
391 : ulong bank_tile_cnt;
392 :
393 : fd_pack_limits_t lim[1];
394 :
395 : ulong pending_txn_cnt; /* Summed across all treaps */
396 : ulong microblock_cnt; /* How many microblocks have we
397 : generated in this block? */
398 : ulong data_bytes_consumed; /* How much data is in this block so
399 : far ? */
400 : fd_rng_t * rng;
401 :
402 : ulong cumulative_block_cost;
403 : ulong cumulative_vote_cost;
404 :
405 : /* expire_before: Any transactions with expires_at strictly less than
406 : the current expire_before are removed from the available pending
407 : transaction. Here, "expire" is used as a verb: cause all
408 : transactions before this time to expire. */
409 : ulong expire_before;
410 :
411 : /* outstanding_microblock_mask: a bitmask indicating which banking
412 : tiles have outstanding microblocks, i.e. fd_pack has generated a
413 : microblock for that banking tile and the banking tile has not yet
414 : notified fd_pack that it has completed it. */
415 : ulong outstanding_microblock_mask;
416 :
417 : /* The actual footprint for the pool and maps is allocated
418 : in the same order in which they are declared immediately following
419 : the struct. I.e. these pointers point to memory not far after the
420 : struct. The trees are just pointers into the pool so don't take up
421 : more space. */
422 :
423 : fd_pack_ord_txn_t * pool;
424 :
425 : /* Treaps (sorted by priority) of pending transactions. We store the
426 : pending simple votes and transactions that come from bundles
427 : separately. */
428 : treap_t pending[1];
429 : treap_t pending_votes[1];
430 : treap_t pending_bundles[1];
431 :
432 : /* penalty_treaps: an fd_map_dynamic mapping hotly contended account
433 : addresses to treaps of transactions that write to them. We try not
434 : to allow more than roughly PENALTY_TREAP_THRESHOLD transactions in
435 : the main treap that write to each account, though this is not
436 : exact. */
437 : fd_pack_penalty_treap_t * penalty_treaps;
438 :
439 : /* initializer_bundle_state: The current state of the initialization
440 : bundle state machine. One of the FD_PACK_IB_STATE_* values. See
441 : the long comment in the header and the comments attached to the
442 : respective values for a discussion of what each state means and the
443 : transitions between them. */
444 : int initializer_bundle_state;
445 :
446 : /* pending_bundle_cnt: the number of bundles in pending_bundles. */
447 : ulong pending_bundle_cnt;
448 :
449 : /* relative_bundle_idx: the number of bundles that have been inserted
450 : since the last time pending_bundles was empty. See the long
451 : comment about encoding this index in the rewards field of each
452 : transaction in the bundle, and why it is important that this reset
453 : to 0 as frequently as possible. */
454 : ulong relative_bundle_idx;
455 :
456 : /* pending{_votes}_smallest: keep a conservative estimate of the
457 : smallest transaction (by cost units and by bytes) in each heap.
458 : Both CUs and bytes should be set to ULONG_MAX is the treap is
459 : empty. */
460 : fd_pack_smallest_t pending_smallest[1];
461 : fd_pack_smallest_t pending_votes_smallest[1];
462 :
463 : /* expiration_q: At the same time that a transaction is in exactly one
464 : of the above treaps, it is also in the expiration queue, sorted by
465 : its expiration time. This enables deleting all transactions that
466 : have expired, regardless of which treap they are in. */
467 : fd_pack_expq_t * expiration_q;
468 :
469 : /* acct_in_use: Map from account address to bitmask indicating which
470 : bank tiles are using the account and whether that use is read or
471 : write (msb). */
472 : fd_pack_addr_use_t * acct_in_use;
473 :
474 : /* bitset_{w, rw}_in_use stores a subset of the information in
475 : acct_in_use using the compressed set format explained at the top of
476 : this file. rw_in_use stores accounts in use for read or write
477 : while w_in_use stores only those in use for write. */
478 : FD_PACK_BITSET_DECLARE( bitset_rw_in_use );
479 : FD_PACK_BITSET_DECLARE( bitset_w_in_use );
480 :
481 : /* writer_costs: Map from account addresses to the sum of costs of
482 : transactions that write to the account. Used for enforcing limits
483 : on the max write cost per account per block. */
484 : fd_pack_addr_use_t * writer_costs;
485 :
486 : /* At the end of every slot, we have to clear out writer_costs. The
487 : map is large, but typically very sparsely populated. As an
488 : optimization, we keep track of the elements of the map that we've
489 : actually used, up to a maximum. If we use more than the maximum,
490 : we revert to the old way of just clearing the whole map.
491 :
492 : written_list indexed [0, written_list_cnt).
493 : written_list_cnt in [0, written_list_max).
494 :
495 : written_list_cnt==written_list_max-1 means that the list may be
496 : incomplete and should be ignored. */
497 : fd_pack_addr_use_t * * written_list;
498 : ulong written_list_cnt;
499 : ulong written_list_max;
500 :
501 :
502 : sig2txn_t * signature_map; /* Stores pointers into pool for deleting by signature */
503 :
504 : /* bundle_temp_map: A fd_map_dynamic (although it could be an fd_map)
505 : used during fd_pack_try_schedule_bundle to store information about
506 : what accounts are used by transactions in the bundle. It's empty
507 : (in a map sense) outside of calls to try_schedule_bundle, and each
508 : call to try_schedule_bundle clears it after use. If bundles are
509 : disabled, this is a valid fd_map_dynamic, but it's as small as
510 : convenient and remains empty. */
511 : fd_pack_addr_use_t * bundle_temp_map;
512 :
513 :
514 : /* use_by_bank: An array of size (max_txn_per_microblock *
515 : FD_TXN_ACCT_ADDR_MAX) for each banking tile. Only the MSB of
516 : in_use_by is relevant. Addressed use_by_bank[i][j] where i is in
517 : [0, bank_tile_cnt) and j is in [0, use_by_bank_cnt[i]). Used
518 : mostly for clearing the proper bits of acct_in_use when a
519 : microblock finishes.
520 :
521 : use_by_bank_txn: indexed [i][j], where i is in [0, bank_tile_cnt)
522 : and j is in [0, max_txn_per_microblock). Transaction j in the
523 : microblock currently scheduled to bank i uses account addresses in
524 : use_by_bank[i][k] where k is in [0, use_by_bank[i][j]). For
525 : example, if use_by_bank[i][0] = 2 and use_by_bank[i][1] = 3, then
526 : all the accounts that the first transaction in the outstanding
527 : microblock for bank 0 uses are contained in the set
528 : { use_by_bank[i][0], use_by_bank[i][1] },
529 : and all the accounts in the second transaction in the microblock
530 : are in the set
531 : { use_by_bank[i][0], use_by_bank[i][1], use_by_bank[i][2] }.
532 : Each transaction writes to at least one account (the fee payer)
533 : that no other transaction scheduled to the bank uses, which means
534 : that use_by_bank_txn[i][j] - use_by_bank_txn[i][j-1] >= 1 (with 0
535 : for use_by_bank_txn[i][-1]). This means we can stop iterating when
536 : use_by_bank_txn[i][j] == use_by_bank_cnt[i]. */
537 : fd_pack_addr_use_t * use_by_bank [ FD_PACK_MAX_BANK_TILES ];
538 : ulong use_by_bank_cnt[ FD_PACK_MAX_BANK_TILES ];
539 : ulong * use_by_bank_txn[ FD_PACK_MAX_BANK_TILES ];
540 :
541 : fd_histf_t txn_per_microblock [ 1 ];
542 : fd_histf_t vote_per_microblock[ 1 ];
543 :
544 : fd_histf_t scheduled_cus_per_block[ 1 ];
545 : fd_histf_t rebated_cus_per_block [ 1 ];
546 : fd_histf_t net_cus_per_block [ 1 ];
547 : ulong cumulative_rebated_cus;
548 :
549 :
550 : /* compressed_slot_number: a number in (FD_PACK_SKIP_CNT, USHORT_MAX]
551 : that advances each time we start packing for a new slot. */
552 : ushort compressed_slot_number;
553 :
554 : /* bitset_avail: a stack of which bits are not currently reserved and
555 : can be used to represent an account address.
556 : Indexed [0, bitset_avail_cnt]. Element 0 is fixed at
557 : FD_PACK_BITSET_SLOWPATH. */
558 : ushort bitset_avail[ 1UL+FD_PACK_BITSET_MAX ];
559 : ulong bitset_avail_cnt;
560 :
561 : /* acct_to_bitset: an fd_map_dynamic that maps acct addresses to the
562 : reference count, which bit, etc. */
563 : fd_pack_bitset_acct_mapping_t * acct_to_bitset;
564 :
565 : /* chdkup: scratch memory chkdup needs for its internal processing */
566 : fd_chkdup_t chkdup[ 1 ];
567 :
568 : /* bundle_meta: an array, parallel to the pool, with each element
569 : having size bundle_meta_sz. I.e. if pool[i] has an associated
570 : bundle meta, it's located at bundle_meta[j] for j in
571 : [i*bundle_meta_sz, (i+1)*bundle_meta_sz). */
572 : void * bundle_meta;
573 : };
574 :
575 : typedef struct fd_pack_private fd_pack_t;
576 :
577 : FD_STATIC_ASSERT( offsetof(fd_pack_t, pending_txn_cnt)==FD_PACK_PENDING_TXN_CNT_OFF, txn_cnt_off );
578 :
579 : /* Forward-declare some helper functions */
580 : int delete_transaction( fd_pack_t * pack, fd_pack_ord_txn_t * txn, int delete_full_bundle, int move_from_penalty_treap );
581 : static inline void insert_bundle_impl( fd_pack_t * pack, ulong bundle_idx, ulong txn_cnt, fd_pack_ord_txn_t * * bundle, ulong expires_at );
582 :
583 : FD_FN_PURE ulong
584 : fd_pack_footprint( ulong pack_depth,
585 : ulong bundle_meta_sz,
586 : ulong bank_tile_cnt,
587 309 : fd_pack_limits_t const * limits ) {
588 309 : if( FD_UNLIKELY( (bank_tile_cnt==0) | (bank_tile_cnt>FD_PACK_MAX_BANK_TILES) ) ) return 0UL;
589 309 : if( FD_UNLIKELY( pack_depth<4UL ) ) return 0UL;
590 :
591 309 : int enable_bundles = !!bundle_meta_sz;
592 309 : ulong l;
593 309 : ulong extra_depth = fd_ulong_if( enable_bundles, 1UL+2UL*FD_PACK_MAX_TXN_PER_BUNDLE, 1UL ); /* space for use between init and fini */
594 309 : ulong max_acct_in_treap = pack_depth * FD_TXN_ACCT_ADDR_MAX;
595 309 : ulong max_txn_per_mblk = fd_ulong_max( limits->max_txn_per_microblock,
596 309 : fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE, 0UL ) );
597 309 : ulong max_acct_in_flight = bank_tile_cnt * (FD_TXN_ACCT_ADDR_MAX * max_txn_per_mblk + 1UL);
598 309 : ulong max_txn_in_flight = bank_tile_cnt * max_txn_per_mblk;
599 :
600 309 : ulong max_w_per_block = fd_ulong_min( limits->max_cost_per_block / FD_PACK_COST_PER_WRITABLE_ACCT,
601 309 : max_txn_per_mblk * limits->max_microblocks_per_block * FD_TXN_ACCT_ADDR_MAX );
602 309 : ulong written_list_max = fd_ulong_min( max_w_per_block>>1, DEFAULT_WRITTEN_LIST_MAX );
603 309 : ulong bundle_temp_accts = fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE*FD_TXN_ACCT_ADDR_MAX, 1UL );
604 309 : ulong sig_chain_cnt = sig2txn_chain_cnt_est( pack_depth );
605 :
606 : /* log base 2, but with a 2* so that the hash table stays sparse */
607 309 : int lg_uses_tbl_sz = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_flight ) );
608 309 : int lg_max_writers = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_w_per_block ) );
609 309 : int lg_acct_in_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap ) );
610 309 : int lg_penalty_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap/PENALTY_TREAP_THRESHOLD ) );
611 309 : int lg_bundle_temp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*bundle_temp_accts ) );
612 :
613 309 : l = FD_LAYOUT_INIT;
614 309 : l = FD_LAYOUT_APPEND( l, FD_PACK_ALIGN, sizeof(fd_pack_t) );
615 309 : l = FD_LAYOUT_APPEND( l, trp_pool_align (), trp_pool_footprint ( pack_depth+extra_depth ) ); /* pool */
616 309 : l = FD_LAYOUT_APPEND( l, penalty_map_align(), penalty_map_footprint( lg_penalty_trp ) ); /* penalty_treaps */
617 309 : l = FD_LAYOUT_APPEND( l, expq_align (), expq_footprint ( pack_depth ) ); /* expiration prq */
618 309 : l = FD_LAYOUT_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_uses_tbl_sz ) ); /* acct_in_use */
619 309 : l = FD_LAYOUT_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_max_writers ) ); /* writer_costs */
620 309 : l = FD_LAYOUT_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t*)*written_list_max ); /* written_list */
621 309 : l = FD_LAYOUT_APPEND( l, sig2txn_align (), sig2txn_footprint ( sig_chain_cnt ) ); /* signature_map */
622 309 : l = FD_LAYOUT_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_bundle_temp ) ); /* bundle_temp_map*/
623 309 : l = FD_LAYOUT_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t)*max_acct_in_flight ); /* use_by_bank */
624 309 : l = FD_LAYOUT_APPEND( l, 32UL, sizeof(ulong)*max_txn_in_flight ); /* use_by_bank_txn*/
625 309 : l = FD_LAYOUT_APPEND( l, bitset_map_align(), bitset_map_footprint( lg_acct_in_trp ) ); /* acct_to_bitset */
626 309 : l = FD_LAYOUT_APPEND( l, 64UL, (pack_depth+extra_depth)*bundle_meta_sz ); /* bundle_meta */
627 309 : return FD_LAYOUT_FINI( l, FD_PACK_ALIGN );
628 309 : }
629 :
630 : void *
631 : fd_pack_new( void * mem,
632 : ulong pack_depth,
633 : ulong bundle_meta_sz,
634 : ulong bank_tile_cnt,
635 : fd_pack_limits_t const * limits,
636 522 : fd_rng_t * rng ) {
637 :
638 522 : int enable_bundles = !!bundle_meta_sz;
639 522 : ulong extra_depth = fd_ulong_if( enable_bundles, 1UL+2UL*FD_PACK_MAX_TXN_PER_BUNDLE, 1UL );
640 522 : ulong max_acct_in_treap = pack_depth * FD_TXN_ACCT_ADDR_MAX;
641 522 : ulong max_txn_per_mblk = fd_ulong_max( limits->max_txn_per_microblock,
642 522 : fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE, 0UL ) );
643 522 : ulong max_acct_in_flight = bank_tile_cnt * (FD_TXN_ACCT_ADDR_MAX * max_txn_per_mblk + 1UL);
644 522 : ulong max_txn_in_flight = bank_tile_cnt * max_txn_per_mblk;
645 :
646 522 : ulong max_w_per_block = fd_ulong_min( limits->max_cost_per_block / FD_PACK_COST_PER_WRITABLE_ACCT,
647 522 : max_txn_per_mblk * limits->max_microblocks_per_block * FD_TXN_ACCT_ADDR_MAX );
648 522 : ulong written_list_max = fd_ulong_min( max_w_per_block>>1, DEFAULT_WRITTEN_LIST_MAX );
649 522 : ulong bundle_temp_accts = fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE*FD_TXN_ACCT_ADDR_MAX, 1UL );
650 522 : ulong sig_chain_cnt = sig2txn_chain_cnt_est( pack_depth );
651 :
652 : /* log base 2, but with a 2* so that the hash table stays sparse */
653 522 : int lg_uses_tbl_sz = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_flight ) );
654 522 : int lg_max_writers = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_w_per_block ) );
655 522 : int lg_acct_in_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap ) );
656 522 : int lg_penalty_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap/PENALTY_TREAP_THRESHOLD ) );
657 522 : int lg_bundle_temp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*bundle_temp_accts ) );
658 :
659 522 : FD_SCRATCH_ALLOC_INIT( l, mem );
660 522 : fd_pack_t * pack = FD_SCRATCH_ALLOC_APPEND( l, FD_PACK_ALIGN, sizeof(fd_pack_t) );
661 : /* The pool has one extra element that is used between insert_init and
662 : cancel/fini. */
663 522 : void * _pool = FD_SCRATCH_ALLOC_APPEND( l, trp_pool_align(), trp_pool_footprint ( pack_depth+extra_depth ) );
664 522 : void * _penalty_map = FD_SCRATCH_ALLOC_APPEND( l, penalty_map_align(), penalty_map_footprint( lg_penalty_trp ) );
665 522 : void * _expq = FD_SCRATCH_ALLOC_APPEND( l, expq_align(), expq_footprint ( pack_depth ) );
666 522 : void * _uses = FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_uses_tbl_sz ) );
667 522 : void * _writer_cost = FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_max_writers ) );
668 522 : void * _written_lst = FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t*)*written_list_max );
669 522 : void * _sig_map = FD_SCRATCH_ALLOC_APPEND( l, sig2txn_align(), sig2txn_footprint ( sig_chain_cnt ) );
670 522 : void * _bundle_temp = FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint( lg_bundle_temp ) );
671 522 : void * _use_by_bank = FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t)*max_acct_in_flight );
672 522 : void * _use_by_txn = FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(ulong)*max_txn_in_flight );
673 522 : void * _acct_bitset = FD_SCRATCH_ALLOC_APPEND( l, bitset_map_align(), bitset_map_footprint( lg_acct_in_trp ) );
674 522 : void * bundle_meta = FD_SCRATCH_ALLOC_APPEND( l, 64UL, (pack_depth+extra_depth)*bundle_meta_sz );
675 :
676 0 : pack->pack_depth = pack_depth;
677 522 : pack->bundle_meta_sz = bundle_meta_sz;
678 522 : pack->bank_tile_cnt = bank_tile_cnt;
679 522 : pack->lim[0] = *limits;
680 522 : pack->pending_txn_cnt = 0UL;
681 522 : pack->microblock_cnt = 0UL;
682 522 : pack->data_bytes_consumed = 0UL;
683 522 : pack->rng = rng;
684 522 : pack->cumulative_block_cost = 0UL;
685 522 : pack->cumulative_vote_cost = 0UL;
686 522 : pack->expire_before = 0UL;
687 522 : pack->outstanding_microblock_mask = 0UL;
688 522 : pack->cumulative_rebated_cus = 0UL;
689 :
690 :
691 522 : trp_pool_new( _pool, pack_depth+extra_depth );
692 :
693 522 : fd_pack_ord_txn_t * pool = trp_pool_join( _pool );
694 522 : treap_seed( pool, pack_depth+extra_depth, fd_rng_ulong( rng ) );
695 2180148 : for( ulong i=0UL; i<pack_depth+extra_depth; i++ ) pool[i].root = FD_ORD_TXN_ROOT_FREE;
696 :
697 522 : (void)trp_pool_leave( pool );
698 :
699 522 : penalty_map_new( _penalty_map, lg_penalty_trp );
700 :
701 : /* These treaps can have at most pack_depth elements at any moment,
702 : but they come from a pool of size pack_depth+extra_depth. */
703 522 : treap_new( (void*)pack->pending, pack_depth+extra_depth );
704 522 : treap_new( (void*)pack->pending_votes, pack_depth+extra_depth );
705 522 : treap_new( (void*)pack->pending_bundles, pack_depth+extra_depth );
706 :
707 522 : pack->pending_smallest->cus = ULONG_MAX;
708 522 : pack->pending_smallest->bytes = ULONG_MAX;
709 522 : pack->pending_votes_smallest->cus = ULONG_MAX;
710 522 : pack->pending_votes_smallest->bytes = ULONG_MAX;
711 :
712 522 : expq_new( _expq, pack_depth );
713 :
714 522 : FD_PACK_BITSET_CLEAR( pack->bitset_rw_in_use );
715 522 : FD_PACK_BITSET_CLEAR( pack->bitset_w_in_use );
716 :
717 522 : acct_uses_new( _uses, lg_uses_tbl_sz );
718 522 : acct_uses_new( _writer_cost, lg_max_writers );
719 522 : acct_uses_new( _bundle_temp, lg_bundle_temp );
720 :
721 522 : pack->written_list = _written_lst;
722 522 : pack->written_list_cnt = 0UL;
723 522 : pack->written_list_max = written_list_max;
724 :
725 522 : sig2txn_new( _sig_map, sig_chain_cnt, fd_rng_ulong( rng ) );
726 :
727 522 : fd_pack_addr_use_t * use_by_bank = (fd_pack_addr_use_t *)_use_by_bank;
728 522 : ulong * use_by_bank_txn = (ulong *)_use_by_txn;
729 6771 : for( ulong i=0UL; i<bank_tile_cnt; i++ ) {
730 6249 : pack->use_by_bank [i] = use_by_bank + i*(FD_TXN_ACCT_ADDR_MAX*max_txn_per_mblk+1UL);
731 6249 : pack->use_by_bank_cnt[i] = 0UL;
732 6249 : pack->use_by_bank_txn[i] = use_by_bank_txn + i*max_txn_per_mblk;
733 6249 : pack->use_by_bank_txn[i][0] = 0UL;
734 6249 : }
735 26637 : for( ulong i=bank_tile_cnt; i<FD_PACK_MAX_BANK_TILES; i++ ) {
736 26115 : pack->use_by_bank [i] = NULL;
737 26115 : pack->use_by_bank_cnt[i] = 0UL;
738 26115 : pack->use_by_bank_txn[i] = NULL;
739 26115 : }
740 :
741 522 : fd_histf_new( pack->txn_per_microblock, FD_MHIST_MIN( PACK, TOTAL_TRANSACTIONS_PER_MICROBLOCK_COUNT ),
742 522 : FD_MHIST_MAX( PACK, TOTAL_TRANSACTIONS_PER_MICROBLOCK_COUNT ) );
743 522 : fd_histf_new( pack->vote_per_microblock, FD_MHIST_MIN( PACK, VOTES_PER_MICROBLOCK_COUNT ),
744 522 : FD_MHIST_MAX( PACK, VOTES_PER_MICROBLOCK_COUNT ) );
745 :
746 522 : fd_histf_new( pack->scheduled_cus_per_block, FD_MHIST_MIN( PACK, CUS_SCHEDULED ),
747 522 : FD_MHIST_MAX( PACK, CUS_SCHEDULED ) );
748 522 : fd_histf_new( pack->rebated_cus_per_block, FD_MHIST_MIN( PACK, CUS_REBATED ),
749 522 : FD_MHIST_MAX( PACK, CUS_REBATED ) );
750 522 : fd_histf_new( pack->net_cus_per_block, FD_MHIST_MIN( PACK, CUS_NET ),
751 522 : FD_MHIST_MAX( PACK, CUS_NET ) );
752 :
753 522 : pack->compressed_slot_number = (ushort)(FD_PACK_SKIP_CNT+1);
754 :
755 522 : pack->bitset_avail[ 0 ] = FD_PACK_BITSET_SLOWPATH;
756 178698 : for( ulong i=0UL; i<FD_PACK_BITSET_MAX; i++ ) pack->bitset_avail[ i+1UL ] = (ushort)i;
757 522 : pack->bitset_avail_cnt = FD_PACK_BITSET_MAX;
758 :
759 522 : bitset_map_new( _acct_bitset, lg_acct_in_trp );
760 :
761 522 : fd_chkdup_new( pack->chkdup, rng );
762 :
763 522 : pack->bundle_meta = bundle_meta;
764 :
765 522 : return mem;
766 522 : }
767 :
768 : fd_pack_t *
769 522 : fd_pack_join( void * mem ) {
770 522 : FD_SCRATCH_ALLOC_INIT( l, mem );
771 522 : fd_pack_t * pack = FD_SCRATCH_ALLOC_APPEND( l, FD_PACK_ALIGN, sizeof(fd_pack_t) );
772 :
773 0 : int enable_bundles = !!pack->bundle_meta_sz;
774 522 : ulong pack_depth = pack->pack_depth;
775 522 : ulong extra_depth = fd_ulong_if( enable_bundles, 1UL+2UL*FD_PACK_MAX_TXN_PER_BUNDLE, 1UL );
776 522 : ulong bank_tile_cnt = pack->bank_tile_cnt;
777 522 : ulong max_txn_per_microblock = fd_ulong_max( pack->lim->max_txn_per_microblock,
778 522 : fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE, 0UL ) );
779 :
780 522 : ulong max_acct_in_treap = pack_depth * FD_TXN_ACCT_ADDR_MAX;
781 522 : ulong max_acct_in_flight = bank_tile_cnt * (FD_TXN_ACCT_ADDR_MAX * max_txn_per_microblock + 1UL);
782 522 : ulong max_txn_in_flight = bank_tile_cnt * max_txn_per_microblock;
783 522 : ulong max_w_per_block = fd_ulong_min( pack->lim->max_cost_per_block / FD_PACK_COST_PER_WRITABLE_ACCT,
784 522 : max_txn_per_microblock * pack->lim->max_microblocks_per_block * FD_TXN_ACCT_ADDR_MAX );
785 522 : ulong written_list_max = fd_ulong_min( max_w_per_block>>1, DEFAULT_WRITTEN_LIST_MAX );
786 522 : ulong bundle_temp_accts = fd_ulong_if( enable_bundles, FD_PACK_MAX_TXN_PER_BUNDLE*FD_TXN_ACCT_ADDR_MAX, 1UL );
787 522 : ulong sig_chain_cnt = sig2txn_chain_cnt_est( pack_depth );
788 :
789 522 : int lg_uses_tbl_sz = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_flight ) );
790 522 : int lg_max_writers = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_w_per_block ) );
791 522 : int lg_acct_in_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap ) );
792 522 : int lg_penalty_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap/PENALTY_TREAP_THRESHOLD ) );
793 522 : int lg_bundle_temp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*bundle_temp_accts ) );
794 :
795 :
796 522 : pack->pool = trp_pool_join( FD_SCRATCH_ALLOC_APPEND( l, trp_pool_align(), trp_pool_footprint ( pack_depth+extra_depth ) ) );
797 522 : pack->penalty_treaps= penalty_map_join(FD_SCRATCH_ALLOC_APPEND( l, penalty_map_align(),penalty_map_footprint( lg_penalty_trp ) ) );
798 522 : pack->expiration_q = expq_join ( FD_SCRATCH_ALLOC_APPEND( l, expq_align(), expq_footprint ( pack_depth ) ) );
799 522 : pack->acct_in_use = acct_uses_join( FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint ( lg_uses_tbl_sz ) ) );
800 522 : pack->writer_costs = acct_uses_join( FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint ( lg_max_writers ) ) );
801 522 : /* */ FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t*)*written_list_max );
802 522 : pack->signature_map = sig2txn_join( FD_SCRATCH_ALLOC_APPEND( l, sig2txn_align(), sig2txn_footprint ( sig_chain_cnt ) ) );
803 522 : pack->bundle_temp_map=acct_uses_join( FD_SCRATCH_ALLOC_APPEND( l, acct_uses_align(), acct_uses_footprint ( lg_bundle_temp ) ) );
804 522 : /* */ FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(fd_pack_addr_use_t)*max_acct_in_flight );
805 522 : /* */ FD_SCRATCH_ALLOC_APPEND( l, 32UL, sizeof(ulong)*max_txn_in_flight );
806 522 : pack->acct_to_bitset= bitset_map_join( FD_SCRATCH_ALLOC_APPEND( l, bitset_map_align(), bitset_map_footprint( lg_acct_in_trp ) ) );
807 522 : /* */ FD_SCRATCH_ALLOC_APPEND( l, 64UL, (pack_depth+extra_depth)*pack->bundle_meta_sz );
808 :
809 522 : FD_MGAUGE_SET( PACK, PENDING_TRANSACTIONS_HEAP_SIZE, pack->pack_depth );
810 522 : return pack;
811 522 : }
812 :
813 :
814 : /* Returns 0 on failure, 1 on success for a vote, 2 on success for a
815 : non-vote. */
816 : static int
817 : fd_pack_estimate_rewards_and_compute( fd_txn_e_t * txne,
818 13581810 : fd_pack_ord_txn_t * out ) {
819 13581810 : fd_txn_t * txn = TXN(txne->txnp);
820 13581810 : ulong sig_rewards = FD_PACK_FEE_PER_SIGNATURE * txn->signature_cnt; /* Easily in [5000, 635000] */
821 :
822 13581810 : ulong requested_execution_cus;
823 13581810 : ulong priority_rewards;
824 13581810 : ulong precompile_sigs;
825 13581810 : ulong requested_loaded_accounts_data_cost;
826 13581810 : ulong cost_estimate = fd_pack_compute_cost( txn, txne->txnp->payload, &txne->txnp->flags, &requested_execution_cus, &priority_rewards, &precompile_sigs, &requested_loaded_accounts_data_cost );
827 :
828 13581810 : if( FD_UNLIKELY( !cost_estimate ) ) return 0;
829 :
830 : /* precompile_sigs <= 16320, so after the addition,
831 : sig_rewards < 83,000,000 */
832 13581807 : sig_rewards += FD_PACK_FEE_PER_SIGNATURE * precompile_sigs;
833 :
834 : /* No fancy CU estimation in this version of pack
835 : for( ulong i=0UL; i<(ulong)txn->instr_cnt; i++ ) {
836 : uchar prog_id_idx = txn->instr[ i ].program_id;
837 : fd_acct_addr_t const * acct_addr = fd_txn_get_acct_addrs( txn, txnp->payload ) + (ulong)prog_id_idx;
838 : }
839 : */
840 13581807 : out->rewards = (priority_rewards < (UINT_MAX - sig_rewards)) ? (uint)(sig_rewards + priority_rewards) : UINT_MAX;
841 13581807 : out->compute_est = (uint)cost_estimate;
842 13581807 : out->txn->pack_cu.requested_exec_plus_acct_data_cus = (uint)(requested_execution_cus + requested_loaded_accounts_data_cost);
843 13581807 : out->txn->pack_cu.non_execution_cus = (uint)(cost_estimate - requested_execution_cus - requested_loaded_accounts_data_cost);
844 :
845 13581807 : return fd_int_if( txne->txnp->flags & FD_TXN_P_FLAGS_IS_SIMPLE_VOTE, 1, 2 );
846 13581810 : }
847 :
848 : /* Can the fee payer afford to pay a transaction with the specified
849 : price? Returns 1 if so, 0 otherwise. This is just a stub that
850 : always returns 1 for now, and the real check is deferred to the bank
851 : tile. In general, this function can't be totally accurate, because
852 : the transactions immediately prior to this one can affect the balance
853 : of this fee payer, but a simple check here may be helpful for
854 : reducing spam. */
855 : static int
856 : fd_pack_can_fee_payer_afford( fd_acct_addr_t const * acct_addr,
857 13581807 : ulong price /* in lamports */) {
858 13581807 : (void)acct_addr;
859 13581807 : (void)price;
860 13581807 : return 1;
861 13581807 : }
862 :
863 :
864 :
865 :
866 :
867 13704210 : fd_txn_e_t * fd_pack_insert_txn_init( fd_pack_t * pack ) { return trp_pool_ele_acquire( pack->pool )->txn_e; }
868 122400 : void fd_pack_insert_txn_cancel( fd_pack_t * pack, fd_txn_e_t * txn ) { trp_pool_ele_release( pack->pool, (fd_pack_ord_txn_t*)txn ); }
869 :
870 15 : #define REJECT( reason ) do { \
871 15 : trp_pool_ele_release( pack->pool, ord ); \
872 15 : return FD_PACK_INSERT_REJECT_ ## reason; \
873 15 : } while( 0 )
874 :
875 : /* These require txn, accts, and alt_adj to be defined as per usual */
876 329301 : #define ACCT_IDX_TO_PTR( idx ) (__extension__( { \
877 329301 : ulong __idx = (idx); \
878 329301 : fd_ptr_if( __idx<fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM ), accts, alt_adj )+__idx; \
879 329301 : }))
880 71258429 : #define ACCT_ITER_TO_PTR( iter ) (__extension__( { \
881 71258429 : ulong __idx = fd_txn_acct_iter_idx( iter ); \
882 71258429 : fd_ptr_if( __idx<fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM ), accts, alt_adj )+__idx; \
883 71258429 : }))
884 :
885 :
886 : /* Tries to find the worst transaction in any treap in pack. If that
887 : transaction's score is worse than or equal to threshold_score, it
888 : deletes it and returns 1. If it's higher than threshold_score, it
889 : returns 0. To force this function to delete the worst transaction if
890 : there are any eligible ones, pass FLT_MAX as threshold_score. */
891 : static inline int
892 : delete_worst( fd_pack_t * pack,
893 : float threshold_score,
894 494592 : int is_vote ) {
895 : /* If the tree is full, we want to see if this is better than the
896 : worst element in the pool before inserting. If the new transaction
897 : is better than that one, we'll delete it and insert the new
898 : transaction. Otherwise, we'll throw away this transaction.
899 :
900 : We want to bias the definition of "worst" here to provide better
901 : quality of service. For example, if the pool is filled with
902 : transactions that all write to the same account or are all votes,
903 : we want to bias towards treating one of those transactions as the
904 : worst, even if they pay slightly higher fees per computer unit,
905 : since we know we won't actually be able to schedule them all.
906 :
907 : This is a tricky task, however. All our notions of priority and
908 : better/worse are based on static information about the transaction,
909 : and there's not an easy way to take into account global
910 : information, for example, how many other transactions contend with
911 : this one. One idea is to build a heap (not a treap, since we only
912 : need pop-min, insert, and delete) with one element for each element
913 : in the pool, with a "delete me" score that's related but not
914 : identical to the normal score. This would allow building in some
915 : global information. The downside is that the global information
916 : that gets integrated is static. E.g. if you bias a transaction's
917 : "delete me" score to make it more likely to be deleted because
918 : there are many conflicting transactions in the pool, the score
919 : stays biased, even if the global conditions change (unless you come
920 : up with some complicated re-scoring scheme). This can work, since
921 : when the pool is full, the global bias factors are unlikely to
922 : change significantly at the relevant timescales.
923 :
924 : However, rather than this, we implement a simpler probabilistic
925 : scheme. We'll sample M transactions, find the worst transaction in
926 : each of the M treaps, compute a "delete me" score for those <= M
927 : transactions, and delete the worst. If one penalty treap is
928 : starting to get big, then it becomes very likely that the random
929 : sample will find it and choose to delete a transaction from it.
930 :
931 : The exact formula for the "delete me" score should be the matter of
932 : some more intense quantitative research. For now, we'll just use
933 : this:
934 :
935 : Treap with N transactions Scale Factor
936 : Pending 1.0 unless inserting a vote and votes < 25%
937 : Pending votes 1.0 until 75% of depth, then 0
938 : Penalty treap 1.0 at <= 100 transactions, then sqrt(100/N)
939 : Pending bundles inf (since the rewards value is fudged)
940 :
941 : We'll also use M=8. */
942 :
943 494592 : float worst_score = FLT_MAX;
944 494592 : fd_pack_ord_txn_t * worst = NULL;
945 4451328 : for( ulong i=0UL; i<8UL; i++ ) {
946 3956736 : uint pool_max = (uint)trp_pool_max( pack->pool );
947 3956736 : ulong sample_i = fd_rng_uint_roll( pack->rng, pool_max );
948 :
949 3956736 : fd_pack_ord_txn_t * sample = &pack->pool[ sample_i ];
950 : /* Presumably if we're calling this, the pool is almost entirely
951 : full, so the probability of choosing a free one is small. If
952 : it does happen, find the first one that isn't free. */
953 3957731 : while( FD_UNLIKELY( sample->root==FD_ORD_TXN_ROOT_FREE ) ) sample = &pack->pool[ (++sample_i)%pool_max ];
954 :
955 3956736 : int root_idx = sample->root;
956 3956736 : float multiplier = 0.0f; /* The smaller this is, the more biased we'll be to deleting it */
957 3956736 : treap_t * treap;
958 3956736 : switch( root_idx & FD_ORD_TXN_ROOT_TAG_MASK ) {
959 0 : default:
960 0 : case FD_ORD_TXN_ROOT_FREE: {
961 0 : FD_TEST( 0 );
962 0 : return -1; /* Can't be hit */
963 0 : }
964 3935077 : case FD_ORD_TXN_ROOT_PENDING: {
965 3935077 : treap = pack->pending;
966 3935077 : ulong vote_cnt = treap_ele_cnt( pack->pending_votes );
967 3935077 : if( FD_LIKELY( !is_vote || (vote_cnt>=pack->pack_depth/4UL ) ) ) multiplier = 1.0f;
968 3935077 : break;
969 0 : }
970 0 : case FD_ORD_TXN_ROOT_PENDING_VOTE: {
971 0 : treap = pack->pending_votes;
972 0 : ulong vote_cnt = treap_ele_cnt( pack->pending_votes );
973 0 : if( FD_LIKELY( is_vote || (vote_cnt<=3UL*pack->pack_depth/4UL ) ) ) multiplier = 1.0f;
974 0 : break;
975 0 : }
976 0 : case FD_ORD_TXN_ROOT_PENDING_BUNDLE: {
977 : /* We don't have a way to tell how much these actually pay in
978 : rewards, so we just assume they are very high. */
979 0 : treap = pack->pending_bundles;
980 : /* We cap rewards at UINT_MAX lamports for estimation, and min
981 : CUs is about 1000, which means rewards/compute < 5e6.
982 : FLT_MAX is around 3e38. That means, 1e20*rewards/compute is
983 : much less than FLT_MAX, so we won't have any issues with
984 : overflow. On the other hand, if rewards==1 lamport and
985 : compute is 2 million CUs, 1e20*1/2e6 is still higher than any
986 : normal transaction. */
987 0 : multiplier = 1e20f;
988 0 : break;
989 0 : }
990 21659 : case FD_ORD_TXN_ROOT_PENALTY( 0 ): {
991 21659 : fd_txn_t * txn = TXN( sample->txn );
992 21659 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, sample->txn->payload );
993 21659 : fd_acct_addr_t const * alt_adj = sample->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
994 21659 : fd_acct_addr_t penalty_acct = *ACCT_IDX_TO_PTR( FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( root_idx ) );
995 21659 : fd_pack_penalty_treap_t * q = penalty_map_query( pack->penalty_treaps, penalty_acct, NULL );
996 21659 : FD_TEST( q );
997 21659 : ulong cnt = treap_ele_cnt( q->penalty_treap );
998 21659 : treap = q->penalty_treap;
999 :
1000 21659 : multiplier = sqrtf( 100.0f / (float)fd_ulong_max( 100UL, cnt ) );
1001 21659 : break;
1002 21659 : }
1003 3956736 : }
1004 : /* Get the worst from the sampled treap */
1005 3956736 : treap_fwd_iter_t _cur=treap_fwd_iter_init( treap, pack->pool );
1006 3956736 : FD_TEST( !treap_fwd_iter_done( _cur ) ); /* It can't be empty because we just sampled an element from it. */
1007 3956736 : sample = treap_fwd_iter_ele( _cur, pack->pool );
1008 :
1009 3956736 : float score = multiplier * (float)sample->rewards / (float)sample->compute_est;
1010 3956736 : worst = fd_ptr_if( score<worst_score, sample, worst );
1011 3956736 : worst_score = fd_float_if( worst_score<score, worst_score, score );
1012 3956736 : }
1013 :
1014 494592 : if( FD_UNLIKELY( !worst ) ) return 0;
1015 494592 : if( FD_UNLIKELY( threshold_score<worst_score ) ) return 0;
1016 :
1017 494592 : delete_transaction( pack, worst, 1, 1 );
1018 494592 : return 1;
1019 494592 : }
1020 :
1021 : static inline int
1022 : validate_transaction( fd_pack_t * pack,
1023 : fd_pack_ord_txn_t const * ord,
1024 : fd_txn_t const * txn,
1025 : fd_acct_addr_t const * accts,
1026 : fd_acct_addr_t const * alt_adj,
1027 13581807 : int check_bundle_blacklist ) {
1028 13581807 : int writes_to_sysvar = 0;
1029 13581807 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
1030 28344054 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1031 14762247 : writes_to_sysvar |= fd_pack_unwritable_contains( ACCT_ITER_TO_PTR( iter ) );
1032 14762247 : }
1033 :
1034 13581807 : int bundle_blacklist = 0;
1035 13581807 : if( FD_UNLIKELY( check_bundle_blacklist ) ) {
1036 0 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_ALL );
1037 0 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1038 0 : bundle_blacklist |= (3==fd_pack_tip_prog_check_blacklist( ACCT_ITER_TO_PTR( iter ) ));
1039 0 : }
1040 0 : }
1041 :
1042 13581807 : fd_acct_addr_t const * alt = ord->txn_e->alt_accts;
1043 13581807 : fd_chkdup_t * chkdup = pack->chkdup;
1044 13581807 : ulong imm_cnt = fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1045 13581807 : ulong alt_cnt = fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_ALT );
1046 :
1047 : /* Throw out transactions ... */
1048 : /* ... that are unfunded */
1049 13581807 : if( FD_UNLIKELY( !fd_pack_can_fee_payer_afford( accts, ord->rewards ) ) ) return FD_PACK_INSERT_REJECT_UNAFFORDABLE;
1050 : /* ... that are so big they'll never run */
1051 13581807 : if( FD_UNLIKELY( ord->compute_est >= pack->lim->max_cost_per_block ) ) return FD_PACK_INSERT_REJECT_TOO_LARGE;
1052 : /* ... that load too many accounts (ignoring 9LZdXeKGeBV6hRLdxS1rHbHoEUsKqesCC2ZAPTPKJAbK) */
1053 13581807 : if( FD_UNLIKELY( fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_ALL )>64UL ) ) return FD_PACK_INSERT_REJECT_ACCOUNT_CNT;
1054 : /* ... that duplicate an account address */
1055 13581804 : if( FD_UNLIKELY( fd_chkdup_check( chkdup, accts, imm_cnt, alt, alt_cnt ) ) ) return FD_PACK_INSERT_REJECT_DUPLICATE_ACCT;
1056 : /* ... that try to write to a sysvar */
1057 13581801 : if( FD_UNLIKELY( writes_to_sysvar ) ) return FD_PACK_INSERT_REJECT_WRITES_SYSVAR;
1058 : /* ... that use an account that violates bundle rules */
1059 13581708 : if( FD_UNLIKELY( bundle_blacklist & 1 ) ) return FD_PACK_INSERT_REJECT_BUNDLE_BLACKLIST;
1060 :
1061 13581708 : return 0;
1062 13581708 : }
1063 :
1064 :
1065 :
1066 : /* returns cumulative penalty "points", i.e. the sum of the populated
1067 : section of penalties (which also tells the caller how much of the
1068 : array is populated. */
1069 : static inline ulong
1070 : populate_bitsets( fd_pack_t * pack,
1071 : fd_pack_ord_txn_t * ord,
1072 : ushort penalties [ static FD_TXN_ACCT_ADDR_MAX ],
1073 13581696 : uchar penalty_idx[ static FD_TXN_ACCT_ADDR_MAX ] ) {
1074 13581696 : FD_PACK_BITSET_CLEAR( ord->rw_bitset );
1075 13581696 : FD_PACK_BITSET_CLEAR( ord->w_bitset );
1076 :
1077 13581696 : fd_txn_t * txn = TXN(ord->txn);
1078 13581696 : uchar * payload = ord->txn->payload;
1079 :
1080 13581696 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, payload );
1081 : /* alt_adj is the pointer to the ALT expansion, adjusted so that if
1082 : account address n is the first that comes from the ALT, it can be
1083 : accessed with adj_lut[n]. */
1084 13581696 : fd_acct_addr_t const * alt_adj = ord->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1085 :
1086 13581696 : ulong cumulative_penalty = 0UL;
1087 13581696 : ulong penalty_i = 0UL;
1088 :
1089 13581696 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
1090 28343655 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1091 14761959 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
1092 14761959 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, acct, NULL );
1093 14761959 : if( FD_UNLIKELY( q==NULL ) ) {
1094 13278973 : q = bitset_map_insert( pack->acct_to_bitset, acct );
1095 13278973 : q->ref_cnt = 0UL;
1096 13278973 : q->first_instance = ord;
1097 13278973 : q->first_instance_was_write = 1;
1098 13278973 : q->bit = FD_PACK_BITSET_FIRST_INSTANCE;
1099 13278973 : } else if( FD_UNLIKELY( q->bit == FD_PACK_BITSET_FIRST_INSTANCE ) ) {
1100 6695 : q->bit = pack->bitset_avail[ pack->bitset_avail_cnt ];
1101 6695 : pack->bitset_avail_cnt = fd_ulong_if( !!pack->bitset_avail_cnt, pack->bitset_avail_cnt-1UL, 0UL );
1102 :
1103 6695 : FD_PACK_BITSET_SETN( q->first_instance->rw_bitset, q->bit );
1104 6695 : if( q->first_instance_was_write ) FD_PACK_BITSET_SETN( q->first_instance->w_bitset, q->bit );
1105 6695 : }
1106 14761959 : ulong penalty = fd_ulong_max( q->ref_cnt, PENALTY_TREAP_THRESHOLD )-PENALTY_TREAP_THRESHOLD;
1107 14761959 : if( FD_UNLIKELY( penalty ) ) {
1108 1212867 : penalties [ penalty_i ] = (ushort)penalty;
1109 1212867 : penalty_idx[ penalty_i ] = (uchar )fd_txn_acct_iter_idx( iter );
1110 1212867 : penalty_i++;
1111 1212867 : cumulative_penalty += penalty;
1112 1212867 : }
1113 :
1114 14761959 : q->ref_cnt++;
1115 14761959 : FD_PACK_BITSET_SETN( ord->rw_bitset, q->bit );
1116 14761959 : FD_PACK_BITSET_SETN( ord->w_bitset , q->bit );
1117 14761959 : }
1118 :
1119 13581696 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
1120 18155118 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1121 :
1122 4573422 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
1123 4573422 : if( FD_UNLIKELY( fd_pack_unwritable_contains( &acct ) ) ) continue;
1124 :
1125 3081654 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, acct, NULL );
1126 3081654 : if( FD_UNLIKELY( q==NULL ) ) {
1127 34920 : q = bitset_map_insert( pack->acct_to_bitset, acct );
1128 34920 : q->ref_cnt = 0UL;
1129 34920 : q->first_instance = ord;
1130 34920 : q->first_instance_was_write = 0;
1131 34920 : q->bit = FD_PACK_BITSET_FIRST_INSTANCE;
1132 3046734 : } else if( FD_UNLIKELY( q->bit == FD_PACK_BITSET_FIRST_INSTANCE ) ) {
1133 11499 : q->bit = pack->bitset_avail[ pack->bitset_avail_cnt ];
1134 11499 : pack->bitset_avail_cnt = fd_ulong_if( !!pack->bitset_avail_cnt, pack->bitset_avail_cnt-1UL, 0UL );
1135 :
1136 11499 : FD_PACK_BITSET_SETN( q->first_instance->rw_bitset, q->bit );
1137 11499 : if( q->first_instance_was_write ) FD_PACK_BITSET_SETN( q->first_instance->w_bitset, q->bit );
1138 11499 : }
1139 :
1140 3081654 : q->ref_cnt++;
1141 3081654 : FD_PACK_BITSET_SETN( ord->rw_bitset, q->bit );
1142 3081654 : }
1143 13581696 : return cumulative_penalty;
1144 13581696 : }
1145 :
1146 : int
1147 : fd_pack_insert_txn_fini( fd_pack_t * pack,
1148 : fd_txn_e_t * txne,
1149 13581810 : ulong expires_at ) {
1150 :
1151 13581810 : fd_pack_ord_txn_t * ord = (fd_pack_ord_txn_t *)txne;
1152 :
1153 13581810 : fd_txn_t * txn = TXN(txne->txnp);
1154 13581810 : uchar * payload = txne->txnp->payload;
1155 :
1156 13581810 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, payload );
1157 : /* alt_adj is the pointer to the ALT expansion, adjusted so that if
1158 : account address n is the first that comes from the ALT, it can be
1159 : accessed with adj_lut[n]. */
1160 13581810 : fd_acct_addr_t const * alt_adj = ord->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1161 :
1162 13581810 : int est_result = fd_pack_estimate_rewards_and_compute( txne, ord );
1163 13581810 : if( FD_UNLIKELY( !est_result ) ) REJECT( ESTIMATION_FAIL );
1164 :
1165 13581807 : ord->expires_at = expires_at;
1166 13581807 : int is_vote = est_result==1;
1167 :
1168 13581807 : int validation_result = validate_transaction( pack, ord, txn, accts, alt_adj, !!pack->bundle_meta_sz );
1169 13581807 : if( FD_UNLIKELY( validation_result ) ) {
1170 99 : trp_pool_ele_release( pack->pool, ord );
1171 99 : return validation_result;
1172 99 : }
1173 :
1174 : /* Reject any transactions that have already expired */
1175 13581708 : if( FD_UNLIKELY( expires_at<pack->expire_before ) ) REJECT( EXPIRED );
1176 :
1177 13581696 : int replaces = 0;
1178 13581696 : if( FD_UNLIKELY( pack->pending_txn_cnt == pack->pack_depth ) ) {
1179 494592 : float threshold_score = (float)ord->rewards/(float)ord->compute_est;
1180 494592 : if( FD_UNLIKELY( !delete_worst( pack, threshold_score, is_vote ) ) ) REJECT( PRIORITY );
1181 494592 : replaces = 1;
1182 494592 : }
1183 :
1184 13581696 : ord->txn->flags &= ~(FD_TXN_P_FLAGS_BUNDLE | FD_TXN_P_FLAGS_INITIALIZER_BUNDLE);
1185 13581696 : ord->skip = FD_PACK_SKIP_CNT;
1186 :
1187 : /* At this point, we know we have space to insert the transaction and
1188 : we've committed to insert it. */
1189 :
1190 : /* Since the pool uses ushorts, the size of the pool is < USHORT_MAX.
1191 : Each transaction can reference an account at most once, which means
1192 : that the total number of references for an account is < USHORT_MAX.
1193 : If these were ulongs, the array would be 512B, which is kind of a
1194 : lot to zero out.*/
1195 13581696 : ushort penalties[ FD_TXN_ACCT_ADDR_MAX ] = {0};
1196 13581696 : uchar penalty_idx[ FD_TXN_ACCT_ADDR_MAX ];
1197 13581696 : ulong cumulative_penalty = populate_bitsets( pack, ord, penalties, penalty_idx );
1198 :
1199 13581696 : treap_t * insert_into = pack->pending;
1200 :
1201 13581696 : if( FD_UNLIKELY( cumulative_penalty && !is_vote ) ) { /* Optimize for high parallelism case */
1202 : /* Compute a weighted random choice */
1203 304959 : ulong roll = (ulong)fd_rng_uint_roll( pack->rng, (uint)cumulative_penalty ); /* cumulative_penalty < USHORT_MAX*64 < UINT_MAX */
1204 304959 : ulong i = 0UL;
1205 : /* Find the right one. This can be done in O(log N), but I imagine
1206 : N is normally so small that doesn't matter. */
1207 758372 : while( roll>=penalties[i] ) roll -= (ulong)penalties[i++];
1208 :
1209 304959 : fd_acct_addr_t penalty_acct = *ACCT_IDX_TO_PTR( penalty_idx[i] );
1210 304959 : fd_pack_penalty_treap_t * q = penalty_map_query( pack->penalty_treaps, penalty_acct, NULL );
1211 304959 : if( FD_UNLIKELY( q==NULL ) ) {
1212 2901 : q = penalty_map_insert( pack->penalty_treaps, penalty_acct );
1213 2901 : treap_new( q->penalty_treap, pack->pack_depth );
1214 2901 : }
1215 304959 : insert_into = q->penalty_treap;
1216 304959 : ord->root = FD_ORD_TXN_ROOT_PENALTY( penalty_idx[i] );
1217 13276737 : } else {
1218 13276737 : ord->root = fd_int_if( is_vote, FD_ORD_TXN_ROOT_PENDING_VOTE, FD_ORD_TXN_ROOT_PENDING );
1219 :
1220 13276737 : fd_pack_smallest_t * smallest = fd_ptr_if( is_vote, &pack->pending_votes_smallest[0], pack->pending_smallest );
1221 13276737 : smallest->cus = fd_ulong_min( smallest->cus, ord->compute_est );
1222 13276737 : smallest->bytes = fd_ulong_min( smallest->bytes, txne->txnp->payload_sz );
1223 13276737 : }
1224 :
1225 13581696 : pack->pending_txn_cnt++;
1226 :
1227 13581696 : sig2txn_ele_insert( pack->signature_map, ord, pack->pool );
1228 :
1229 13581696 : fd_pack_expq_t temp[ 1 ] = {{ .expires_at = expires_at, .txn = ord }};
1230 13581696 : expq_insert( pack->expiration_q, temp );
1231 :
1232 13581696 : if( FD_LIKELY( is_vote ) ) {
1233 37596 : treap_ele_insert( pack->pending_votes, ord, pack->pool );
1234 37596 : return replaces ? FD_PACK_INSERT_ACCEPT_VOTE_REPLACE : FD_PACK_INSERT_ACCEPT_VOTE_ADD;
1235 13544100 : } else {
1236 13544100 : treap_ele_insert( insert_into, ord, pack->pool );
1237 13544100 : return replaces ? FD_PACK_INSERT_ACCEPT_NONVOTE_REPLACE : FD_PACK_INSERT_ACCEPT_NONVOTE_ADD;
1238 13544100 : }
1239 13581696 : }
1240 : #undef REJECT
1241 :
1242 : fd_txn_e_t * const *
1243 : fd_pack_insert_bundle_init( fd_pack_t * pack,
1244 : fd_txn_e_t * * bundle,
1245 0 : ulong txn_cnt ) {
1246 0 : FD_TEST( txn_cnt<=FD_PACK_MAX_TXN_PER_BUNDLE );
1247 0 : FD_TEST( trp_pool_free( pack->pool )>=txn_cnt );
1248 0 : for( ulong i=0UL; i<txn_cnt; i++ ) bundle[ i ] = trp_pool_ele_acquire( pack->pool )->txn_e;
1249 0 : return bundle;
1250 0 : }
1251 :
1252 : void
1253 : fd_pack_insert_bundle_cancel( fd_pack_t * pack,
1254 : fd_txn_e_t * const * bundle,
1255 0 : ulong txn_cnt ) {
1256 : /* There's no real reason these have to be released in reverse, but it
1257 : seems fitting to release them in the opposite order they were
1258 : acquired. */
1259 0 : for( ulong i=0UL; i<txn_cnt; i++ ) trp_pool_ele_release( pack->pool, (fd_pack_ord_txn_t*)bundle[ txn_cnt-1UL-i ] );
1260 0 : }
1261 :
1262 : /* Explained below */
1263 : #define BUNDLE_L_PRIME 37896771UL
1264 : #define BUNDLE_N 312671UL
1265 0 : #define RC_TO_REL_BUNDLE_IDX( r, c ) (BUNDLE_N - ((ulong)(r) * 1UL<<32)/((ulong)(c) * BUNDLE_L_PRIME))
1266 :
1267 : int
1268 : fd_pack_insert_bundle_fini( fd_pack_t * pack,
1269 : fd_txn_e_t * const * bundle,
1270 : ulong txn_cnt,
1271 : ulong expires_at,
1272 : int initializer_bundle,
1273 0 : void const * bundle_meta ) {
1274 :
1275 0 : int err = 0;
1276 :
1277 0 : ulong pending_b_txn_cnt = treap_ele_cnt( pack->pending_bundles );
1278 : /* We want to prevent bundles from consuming the whole treap, but in
1279 : general, we assume bundles are lucrative. We'll set the policy
1280 : on capping bundles at half of the pack depth. We assume that the
1281 : bundles are coming in a pre-prioritized order, so it doesn't make
1282 : sense to drop an earlier bundle for this one. That means that
1283 : really, the best thing to do is drop this one. */
1284 0 : if( FD_UNLIKELY( (!initializer_bundle)&(pending_b_txn_cnt+txn_cnt>pack->pack_depth/2UL) ) ) err = FD_PACK_INSERT_REJECT_PRIORITY;
1285 :
1286 0 : if( FD_UNLIKELY( expires_at<pack->expire_before ) ) err = FD_PACK_INSERT_REJECT_EXPIRED;
1287 :
1288 :
1289 0 : for( ulong i=0UL; (i<txn_cnt) && !err; i++ ) {
1290 0 : fd_pack_ord_txn_t * ord = (fd_pack_ord_txn_t *)bundle[ i ];
1291 :
1292 0 : fd_txn_t const * txn = TXN(bundle[ i ]->txnp);
1293 0 : uchar const * payload = bundle[ i ]->txnp->payload;
1294 :
1295 0 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, payload );
1296 0 : fd_acct_addr_t const * alt_adj = ord->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1297 :
1298 0 : int est_result = fd_pack_estimate_rewards_and_compute( bundle[ i ], ord );
1299 0 : if( FD_UNLIKELY( !est_result ) ) { err = FD_PACK_INSERT_REJECT_ESTIMATION_FAIL; break; }
1300 :
1301 0 : bundle[ i ]->txnp->flags |= FD_TXN_P_FLAGS_BUNDLE;
1302 0 : bundle[ i ]->txnp->flags &= ~FD_TXN_P_FLAGS_INITIALIZER_BUNDLE;
1303 0 : bundle[ i ]->txnp->flags |= fd_uint_if( initializer_bundle, FD_TXN_P_FLAGS_INITIALIZER_BUNDLE, 0 );
1304 0 : ord->expires_at = expires_at;
1305 :
1306 0 : int validation_result = validate_transaction( pack, ord, txn, accts, alt_adj, !initializer_bundle );
1307 0 : if( FD_UNLIKELY( validation_result ) ) { err = validation_result; break; }
1308 0 : }
1309 :
1310 0 : if( FD_UNLIKELY( err ) ) {
1311 0 : fd_pack_insert_bundle_cancel( pack, bundle, txn_cnt );
1312 0 : return err;
1313 0 : }
1314 :
1315 0 : if( FD_UNLIKELY( initializer_bundle && pending_b_txn_cnt>0UL ) ) {
1316 0 : treap_rev_iter_t _cur=treap_rev_iter_init( pack->pending_bundles, pack->pool );
1317 0 : FD_TEST( !treap_rev_iter_done( _cur ) );
1318 0 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pack->pool );
1319 0 : int is_ib = !!(cur->txn->flags & FD_TXN_P_FLAGS_INITIALIZER_BUNDLE);
1320 :
1321 : /* Delete the previous IB if there is one */
1322 0 : if( FD_UNLIKELY( is_ib && 0UL==RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est ) ) ) delete_transaction( pack, cur, 1, 0 );
1323 0 : }
1324 :
1325 0 : int replaces = 0;
1326 0 : while( FD_UNLIKELY( pack->pending_txn_cnt+txn_cnt > pack->pack_depth ) ) {
1327 0 : if( FD_UNLIKELY( !delete_worst( pack, FLT_MAX, 0 ) ) ) {
1328 0 : fd_pack_insert_bundle_cancel( pack, bundle, txn_cnt );
1329 0 : return FD_PACK_INSERT_REJECT_PRIORITY;
1330 0 : }
1331 0 : replaces = 1;
1332 0 : }
1333 :
1334 0 : if( FD_UNLIKELY( !pending_b_txn_cnt ) ) {
1335 0 : pack->relative_bundle_idx = 1UL;
1336 0 : }
1337 :
1338 0 : if( FD_LIKELY( bundle_meta ) ) {
1339 0 : memcpy( (uchar *)pack->bundle_meta + (ulong)((fd_pack_ord_txn_t *)bundle[0]-pack->pool)*pack->bundle_meta_sz, bundle_meta, pack->bundle_meta_sz );
1340 0 : }
1341 :
1342 : /* We put bundles in a treap just like all the other transactions, but
1343 : we actually want to sort them in a very specific order; the order
1344 : within the bundle is determined at bundle creation time, and the
1345 : order among the bundles is FIFO. However, it's going to be a pain
1346 : to use a different sorting function for this treap, since it's
1347 : fixed as part of the treap creation for performance. Don't fear
1348 : though; we can pull a cool math trick out of the bag to shoehorn
1349 : the order we'd like into the sort function we need, and to get even
1350 : more.
1351 :
1352 : Recall that the sort function is r_i/c_i, smallest to largest,
1353 : where r_i is the rewards and c_i is the cost units. r_i and c_i
1354 : are both uints, and the comparison is done by cross-multiplication
1355 : as ulongs. We actually use the c_i value for testing if
1356 : transactions fit, etc. so let's assume that's fixed, and we know
1357 : it's in the range [1020, 1,556,782].
1358 :
1359 : This means, if c_0, c_1, ... c_4 are the CU costs of the
1360 : transactions in the first bundle, we require r_0/c_0 > r_1/c_1 >
1361 : ... > r_4/c_4. Then, if c_5, ... c_9 are the CU costs of the
1362 : transactions in the second bundle, we also require that r_4/c_4 >
1363 : r_5/c_5. For convenience, we'll impose a slightly stronger
1364 : constraint: we want the kth bundle to obey L*(N-k) <= r_i/c_i <
1365 : L*(N+1-k), for fixed constants L and N, real and integer,
1366 : respectively, that we'll determine. For example, this means r_4/c_4
1367 : >= L*N > r_5/c_5. This enables us to group the transactions in the
1368 : same bundle more easily.
1369 :
1370 : For convenience in the math below, we'll set j=N-k and relabel the
1371 : transactions from the jth bundle c_0, ... c_4.
1372 : From above, we know that Lj <= r_4/c_4. We'd like to make it as
1373 : close as possible given that r_4 is an integers. Thus, put
1374 : r_4 = ceil( c_4 * Lj ). r_4 is clearly an integer, and it satisfies
1375 : the required inequality because:
1376 : r_4/c_4 = ceil( c_4 * Lj)/c_4 >= c_4*Lj / c_4 >= Lj.
1377 :
1378 : Following in the same spirit, put r_3 = ceil( c_3 * (r_4+1)/c_4 ).
1379 : Again, r_3 is clearly an integer, and
1380 : r_3/c_3 = ceil(c_3*(r_4+1)/c_4)/c_3
1381 : >= (c_3*(r_4+1))/(c_3 * c_4)
1382 : >= r_4/c_4 + 1/c_4
1383 : > r_4/c_4.
1384 : Following the pattern, we put
1385 : r_2 = ceil( c_2 * (r_3+1)/c_3 )
1386 : r_1 = ceil( c_1 * (r_2+1)/c_2 )
1387 : r_0 = ceil( c_0 * (r_1+1)/c_1 )
1388 : which work for the same reason that as r_3.
1389 :
1390 : We now need for r_0 to satisfy the final inequality with L, and
1391 : we'll use this to guide our choice of L. Theoretically, r_0 can be
1392 : expressed in terms of L, j, and c_0, ... c_4, but that's a truly
1393 : inscrutible expression. Instead, we need some bounds so we can get
1394 : rid of all the ceil using the property that x <= ceil(x) < x+1.
1395 : c_4 * Lj <= r_4 < c_4 * Lj + 1
1396 : The lower bound on r_3 is easy:
1397 : r_3 >= c_3 * (c_4 * Lj + 1)/c_4 = c_3 * Lj + c_3/c_4
1398 : For the upper bound,
1399 : r_3 < 1 + c_3*(r_4+1)/c_4 < 1 + c_3*(c_4*Lj+1 + 1)/c_4
1400 : = 1 + c_3 * Lj + 2*c_3/c_4
1401 : Continuing similarly gives
1402 : c_2*Lj + c_2/c_3 + c_2/c_4 <= r_2
1403 : c_1*Lj + c_1/c_2 + c_1/c_c + c_1/c_4 <= r_1
1404 : c_0*Lj + c_0/c_1 + c_0/c_2 + c_0/c_3 + c_0/c_4 <= r_0
1405 : and
1406 : r_2 < 1 + c_2*Lj + 2c_2/c_3 + 2c_2/c_4
1407 : r_1 < 1 + c_1*Lj + 2c_1/c_2 + 2c_1/c_3 + 2c_1/c_4
1408 : r_0 < 1 + c_0*Lj + 2c_0/c_1 + 2c_0/c_2 + 2c_0/c_3 + 2c_0/c_4.
1409 :
1410 : Setting L(j+1)>=(1 + c_0*Lj+2c_0/c_1+2c_0/c_2+2c_0/c_3+2c_0/c_4)/c_0
1411 : is then sufficient to ensure the whole sequence of 5 fits between Lj
1412 : and L(j+1). Simplifying gives
1413 : L<= 1/c_0 + 2/c_1 + 2/c_2 + 2/c_3 + 2/c_4
1414 : but L must be a constant and not depend on individual values of c_i,
1415 : so, given that c_i >= 1020, we set L = 9/1020.
1416 :
1417 : Now all that remains is to determine N. It's a bit unfortunate
1418 : that we require N, since it limits our capacity, but it's necessary
1419 : in any system that tries to compute priorities to enforce a FIFO
1420 : order. If we've inserted more than N bundles without ever having
1421 : the bundle treap go empty, we'll briefly break the FIFO ordering as
1422 : we underflow.
1423 :
1424 : Thus, we'd like to make N as big as possible, avoiding overflow.
1425 : r_0, ..., r_4 are all uints, and taking the bounds from above,
1426 : given that for any i, i' c_i/c_{i'} < 1527, we have
1427 : r_i < 1 + 1556782 * Lj + 8*1527.
1428 : To avoid overflow, we assert the right-hand side is < 2^32, which
1429 : implies N <= 312671.
1430 :
1431 : We want to use a fixed point representation for L so that the
1432 : entire computation can be done with integer arithmetic. We can do
1433 : the arithmetic as ulongs, which means defining L' >= L * 2^s, and
1434 : we compute ceil( c_4*Lj ) as floor( (c_4 * L' * j + 2^s - 1)/2^s ),
1435 : so c_4 * L' * j + 2^s should fit in a ulong. With j<=N, this gives
1436 : s<=32, so we set s=32, which means L' = 37896771 >= 9/1020 * 2^32.
1437 : Note that 1 + 1556782 * L' * N + 8*1527 + 2^32 is approximately
1438 : 2^63.999993.
1439 :
1440 : Note that this is all checked by a proof of the code translated
1441 : into Z3. Unfortunately CBMC was too slow to prove this code
1442 : directly. */
1443 0 : #define BUNDLE_L_PRIME 37896771UL
1444 0 : #define BUNDLE_N 312671UL
1445 :
1446 0 : if( FD_UNLIKELY( pack->relative_bundle_idx>BUNDLE_N ) ) {
1447 0 : FD_LOG_WARNING(( "Too many bundles inserted without allowing pending bundles to go empty. "
1448 0 : "Ordering of bundles may be incorrect." ));
1449 0 : pack->relative_bundle_idx = 1UL;
1450 0 : }
1451 0 : ulong bundle_idx = fd_ulong_if( initializer_bundle, 0UL, pack->relative_bundle_idx );
1452 0 : insert_bundle_impl( pack, bundle_idx, txn_cnt, (fd_pack_ord_txn_t * *)bundle, expires_at );
1453 : /* if IB this is max( 1, x ), which is x. Otherwise, this is max(x,
1454 : x+1) which is x++ */
1455 0 : pack->relative_bundle_idx = fd_ulong_max( bundle_idx+1UL, pack->relative_bundle_idx );
1456 :
1457 0 : return replaces ? FD_PACK_INSERT_ACCEPT_NONVOTE_REPLACE : FD_PACK_INSERT_ACCEPT_NONVOTE_ADD;
1458 :
1459 0 : }
1460 : static inline void
1461 : insert_bundle_impl( fd_pack_t * pack,
1462 : ulong bundle_idx,
1463 : ulong txn_cnt,
1464 : fd_pack_ord_txn_t * * bundle,
1465 0 : ulong expires_at ) {
1466 0 : ulong prev_reward = ((BUNDLE_L_PRIME * (BUNDLE_N - bundle_idx))) - 1UL;
1467 0 : ulong prev_cost = 1UL<<32;
1468 :
1469 : /* Assign last to first */
1470 0 : for( ulong i=0UL; i<txn_cnt; i++ ) {
1471 0 : fd_pack_ord_txn_t * ord = bundle[ txn_cnt-1UL - i ];
1472 0 : ord->rewards = (uint)(((ulong)ord->compute_est * (prev_reward + 1UL) + prev_cost-1UL)/prev_cost);
1473 0 : ord->root = FD_ORD_TXN_ROOT_PENDING_BUNDLE;
1474 0 : prev_reward = ord->rewards;
1475 0 : prev_cost = ord->compute_est;
1476 :
1477 : /* The penalty information isn't used for bundles. */
1478 0 : ushort penalties [ FD_TXN_ACCT_ADDR_MAX ];
1479 0 : uchar penalty_idx[ FD_TXN_ACCT_ADDR_MAX ];
1480 0 : populate_bitsets( pack, ord, penalties, penalty_idx );
1481 :
1482 0 : treap_ele_insert( pack->pending_bundles, ord, pack->pool );
1483 0 : pack->pending_txn_cnt++;
1484 :
1485 0 : sig2txn_ele_insert( pack->signature_map, ord, pack->pool );
1486 :
1487 0 : fd_pack_expq_t temp[ 1 ] = {{ .expires_at = expires_at, .txn = ord }};
1488 0 : expq_insert( pack->expiration_q, temp );
1489 0 : }
1490 :
1491 0 : }
1492 :
1493 : void const *
1494 0 : fd_pack_peek_bundle_meta( fd_pack_t const * pack ) {
1495 0 : int ib_state = pack->initializer_bundle_state;
1496 0 : if( FD_UNLIKELY( (ib_state==FD_PACK_IB_STATE_PENDING) | (ib_state==FD_PACK_IB_STATE_FAILED) ) ) return NULL;
1497 :
1498 0 : treap_rev_iter_t _cur=treap_rev_iter_init( pack->pending_bundles, pack->pool );
1499 0 : if( FD_UNLIKELY( treap_rev_iter_done( _cur ) ) ) return NULL; /* empty */
1500 :
1501 0 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pack->pool );
1502 0 : int is_ib = !!(cur->txn->flags & FD_TXN_P_FLAGS_INITIALIZER_BUNDLE);
1503 0 : if( FD_UNLIKELY( is_ib ) ) return NULL;
1504 :
1505 0 : return (void const *)((uchar const *)pack->bundle_meta + (ulong)_cur * pack->bundle_meta_sz);
1506 0 : }
1507 :
1508 : void
1509 0 : fd_pack_set_initializer_bundles_ready( fd_pack_t * pack ) {
1510 0 : pack->initializer_bundle_state = FD_PACK_IB_STATE_READY;
1511 0 : }
1512 :
1513 : void
1514 741063 : fd_pack_metrics_write( fd_pack_t const * pack ) {
1515 741063 : ulong pending_regular = treap_ele_cnt( pack->pending );
1516 741063 : ulong pending_votes = treap_ele_cnt( pack->pending_votes );
1517 741063 : ulong pending_bundle = treap_ele_cnt( pack->pending_bundles );
1518 741063 : ulong conflicting = pack->pending_txn_cnt - pending_votes - pending_bundle - treap_ele_cnt( pack->pending );
1519 741063 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_ALL, pack->pending_txn_cnt );
1520 741063 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_REGULAR, pending_regular );
1521 741063 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_VOTES, pending_votes );
1522 741063 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_CONFLICTING, conflicting );
1523 741063 : FD_MGAUGE_SET( PACK, AVAILABLE_TRANSACTIONS_BUNDLES, pending_bundle );
1524 741063 : FD_MGAUGE_SET( PACK, SMALLEST_PENDING_TRANSACTION, pack->pending_smallest->cus );
1525 741063 : }
1526 :
1527 : typedef struct {
1528 : ushort clear_rw_bit;
1529 : ushort clear_w_bit;
1530 : } release_result_t;
1531 :
1532 : static inline release_result_t
1533 : release_bit_reference( fd_pack_t * pack,
1534 17842689 : fd_acct_addr_t const * acct ) {
1535 :
1536 17842689 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, *acct, NULL );
1537 17842689 : FD_TEST( q ); /* q==NULL not be possible */
1538 :
1539 17842689 : q->ref_cnt--;
1540 :
1541 17842689 : if( FD_UNLIKELY( q->ref_cnt==0UL ) ) {
1542 13312969 : ushort bit = q->bit;
1543 13312969 : bitset_map_remove( pack->acct_to_bitset, q );
1544 13312969 : if( FD_LIKELY( bit<FD_PACK_BITSET_MAX ) ) pack->bitset_avail[ ++(pack->bitset_avail_cnt) ] = bit;
1545 :
1546 13312969 : fd_pack_addr_use_t * use = acct_uses_query( pack->acct_in_use, *acct, NULL );
1547 13312969 : if( FD_LIKELY( use ) ) {
1548 12818537 : use->in_use_by |= FD_PACK_IN_USE_BIT_CLEARED;
1549 12818537 : release_result_t ret = { .clear_rw_bit = bit,
1550 12818537 : .clear_w_bit = fd_ushort_if( !!(use->in_use_by & FD_PACK_IN_USE_WRITABLE), bit, FD_PACK_BITSET_MAX ) };
1551 12818537 : return ret;
1552 12818537 : }
1553 13312969 : }
1554 5024152 : release_result_t ret = { .clear_rw_bit = FD_PACK_BITSET_MAX, .clear_w_bit = FD_PACK_BITSET_MAX };
1555 5024152 : return ret;
1556 17842689 : }
1557 :
1558 : typedef struct {
1559 : ulong cus_scheduled;
1560 : ulong txns_scheduled;
1561 : ulong bytes_scheduled;
1562 : } sched_return_t;
1563 :
1564 : static inline sched_return_t
1565 : fd_pack_schedule_impl( fd_pack_t * pack,
1566 : treap_t * sched_from,
1567 : ulong cu_limit,
1568 : ulong txn_limit,
1569 : ulong byte_limit,
1570 : ulong bank_tile,
1571 : fd_pack_smallest_t * smallest_in_treap,
1572 : ulong * use_by_bank_txn,
1573 1482126 : fd_txn_p_t * out ) {
1574 :
1575 1482126 : fd_pack_ord_txn_t * pool = pack->pool;
1576 1482126 : fd_pack_addr_use_t * acct_in_use = pack->acct_in_use;
1577 1482126 : fd_pack_addr_use_t * writer_costs = pack->writer_costs;
1578 :
1579 1482126 : fd_pack_addr_use_t ** written_list = pack->written_list;
1580 1482126 : ulong written_list_cnt = pack->written_list_cnt;
1581 1482126 : ulong written_list_max = pack->written_list_max;
1582 :
1583 1482126 : FD_PACK_BITSET_DECLARE( bitset_rw_in_use );
1584 1482126 : FD_PACK_BITSET_DECLARE( bitset_w_in_use );
1585 1482126 : FD_PACK_BITSET_COPY( bitset_rw_in_use, pack->bitset_rw_in_use );
1586 1482126 : FD_PACK_BITSET_COPY( bitset_w_in_use, pack->bitset_w_in_use );
1587 :
1588 1482126 : fd_pack_addr_use_t * use_by_bank = pack->use_by_bank [bank_tile];
1589 1482126 : ulong use_by_bank_cnt = pack->use_by_bank_cnt[bank_tile];
1590 :
1591 1482126 : ulong max_write_cost_per_acct = pack->lim->max_write_cost_per_acct;
1592 :
1593 1482126 : ushort compressed_slot_number = pack->compressed_slot_number;
1594 :
1595 1482126 : ulong txns_scheduled = 0UL;
1596 1482126 : ulong cus_scheduled = 0UL;
1597 1482126 : ulong bytes_scheduled = 0UL;
1598 :
1599 1482126 : ulong bank_tile_mask = 1UL << bank_tile;
1600 :
1601 1482126 : ulong fast_path = 0UL;
1602 1482126 : ulong slow_path = 0UL;
1603 1482126 : ulong cu_limit_c = 0UL;
1604 1482126 : ulong byte_limit_c = 0UL;
1605 1482126 : ulong write_limit_c = 0UL;
1606 1482126 : ulong skip_c = 0UL;
1607 :
1608 1482126 : ulong min_cus = ULONG_MAX;
1609 1482126 : ulong min_bytes = ULONG_MAX;
1610 :
1611 1482126 : if( FD_UNLIKELY( (cu_limit<smallest_in_treap->cus) | (txn_limit==0UL) | (byte_limit<smallest_in_treap->bytes) ) ) {
1612 802272 : sched_return_t to_return = { .cus_scheduled = 0UL, .txns_scheduled = 0UL, .bytes_scheduled = 0UL };
1613 802272 : return to_return;
1614 802272 : }
1615 :
1616 679854 : treap_rev_iter_t prev = treap_idx_null();
1617 23737056 : for( treap_rev_iter_t _cur=treap_rev_iter_init( sched_from, pool ); !treap_rev_iter_done( _cur ); _cur=prev ) {
1618 : /* Capture next so that we can delete while we iterate. */
1619 23654820 : prev = treap_rev_iter_next( _cur, pool );
1620 :
1621 23654820 : # if FD_HAS_X86
1622 23654820 : _mm_prefetch( &(pool[ prev ].prev), _MM_HINT_T0 );
1623 23654820 : # endif
1624 :
1625 23654820 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
1626 :
1627 23654820 : min_cus = fd_ulong_min( min_cus, cur->compute_est );
1628 23654820 : min_bytes = fd_ulong_min( min_bytes, cur->txn->payload_sz );
1629 :
1630 23654820 : ulong conflicts = 0UL;
1631 :
1632 23654820 : if( FD_UNLIKELY( cur->compute_est>cu_limit ) ) {
1633 : /* Too big to be scheduled at the moment, but might be okay for
1634 : the next microblock, so we don't want to delay it. */
1635 0 : cu_limit_c++;
1636 0 : continue;
1637 0 : }
1638 :
1639 : /* Likely? Unlikely? */
1640 23654820 : if( FD_LIKELY( !FD_PACK_BITSET_INTERSECT4_EMPTY( bitset_rw_in_use, bitset_w_in_use, cur->w_bitset, cur->rw_bitset ) ) ) {
1641 10568505 : fast_path++;
1642 10568505 : continue;
1643 10568505 : }
1644 :
1645 13086315 : if( FD_UNLIKELY( cur->skip==compressed_slot_number ) ) {
1646 0 : skip_c++;
1647 0 : continue;
1648 0 : }
1649 :
1650 : /* If skip>FD_PACK_MAX_SKIP but not compressed_slot_number, it means
1651 : it's the compressed slot number of a previous slot. We don't
1652 : care unless we're going to update the value though, so we don't
1653 : need to eagerly reset it to FD_PACK_MAX_SKIP.
1654 : compressed_slot_number is a ushort, so it's possible for it to
1655 : roll over, but the transaction lifetime is much shorter than
1656 : that, so it won't be a problem. */
1657 :
1658 13086315 : if( FD_UNLIKELY( cur->txn->payload_sz>byte_limit ) ) {
1659 6 : byte_limit_c++;
1660 6 : continue;
1661 6 : }
1662 :
1663 :
1664 13086309 : fd_txn_t const * txn = TXN(cur->txn);
1665 13086309 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, cur->txn->payload );
1666 13086309 : fd_acct_addr_t const * alt_adj = cur->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
1667 : /* Check conflicts between this transaction's writable accounts and
1668 : current readers */
1669 13086309 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
1670 27342864 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1671 :
1672 14256561 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
1673 :
1674 14256561 : fd_pack_addr_use_t * in_wcost_table = acct_uses_query( writer_costs, acct, NULL );
1675 14256561 : if( FD_UNLIKELY( in_wcost_table && in_wcost_table->total_cost+cur->compute_est > max_write_cost_per_acct ) ) {
1676 : /* Can't be scheduled until the next block */
1677 6 : conflicts = ULONG_MAX;
1678 6 : break;
1679 6 : }
1680 :
1681 14256555 : fd_pack_addr_use_t * use = acct_uses_query( acct_in_use, acct, NULL );
1682 14256555 : if( FD_UNLIKELY( use ) ) conflicts |= use->in_use_by; /* break? */
1683 14256555 : }
1684 :
1685 13086309 : if( FD_UNLIKELY( conflicts==ULONG_MAX ) ) {
1686 : /* The logic for how to adjust skip is a bit complicated, and we
1687 : want to do it branchlessly.
1688 : Before After
1689 : 1 compressed_slot_number
1690 : x in [2, 5] x-1
1691 : x where x>5 4
1692 :
1693 : Set A=min(x, 5), B=min(A-2, compressed_slot_number-1), and
1694 : note that compressed_slot_number is in [6, USHORT_MAX].
1695 : Then:
1696 : x A A-2 B B+1
1697 : 1 1 USHORT_MAX csn-1 csn
1698 : x in [2, 5] x x-2 x-2 x-1
1699 : x where x>5 5 3 3 4
1700 : So B+1 is the desired value. */
1701 6 : cur->skip = (ushort)(1+fd_ushort_min( (ushort)(compressed_slot_number-1),
1702 6 : (ushort)(fd_ushort_min( cur->skip, FD_PACK_SKIP_CNT )-2) ) );
1703 6 : write_limit_c++;
1704 6 : continue;
1705 6 : }
1706 :
1707 13086303 : if( FD_UNLIKELY( conflicts ) ) {
1708 6 : slow_path++;
1709 6 : continue;
1710 6 : }
1711 :
1712 : /* Check conflicts between this transaction's readonly accounts and
1713 : current writers */
1714 13086297 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
1715 16658427 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1716 :
1717 3572130 : fd_acct_addr_t const * acct = ACCT_ITER_TO_PTR( iter );
1718 3572130 : if( fd_pack_unwritable_contains( acct ) ) continue; /* No need to track sysvars because they can't be writable */
1719 :
1720 2576685 : fd_pack_addr_use_t * use = acct_uses_query( acct_in_use, *acct, NULL );
1721 2576685 : if( use ) conflicts |= (use->in_use_by & FD_PACK_IN_USE_WRITABLE) ? use->in_use_by : 0UL;
1722 2576685 : }
1723 :
1724 13086297 : if( FD_UNLIKELY( conflicts ) ) {
1725 0 : slow_path++;
1726 0 : continue;
1727 0 : }
1728 :
1729 : /* Include this transaction in the microblock! */
1730 13086297 : FD_PACK_BITSET_OR( bitset_rw_in_use, cur->rw_bitset );
1731 13086297 : FD_PACK_BITSET_OR( bitset_w_in_use, cur->w_bitset );
1732 :
1733 13086297 : if(
1734 4362099 : #if FD_HAS_AVX512 && FD_PACK_USE_NON_TEMPORAL_MEMCPY
1735 4362099 : FD_LIKELY( cur->txn->payload_sz>=1024UL )
1736 : #else
1737 8724198 : 0
1738 8724198 : #endif
1739 13086297 : ) {
1740 4224 : #if FD_HAS_AVX512 && FD_PACK_USE_NON_TEMPORAL_MEMCPY
1741 4224 : _mm512_stream_si512( (void*)(out->payload+ 0UL), _mm512_load_epi64( cur->txn->payload+ 0UL ) );
1742 4224 : _mm512_stream_si512( (void*)(out->payload+ 64UL), _mm512_load_epi64( cur->txn->payload+ 64UL ) );
1743 4224 : _mm512_stream_si512( (void*)(out->payload+ 128UL), _mm512_load_epi64( cur->txn->payload+ 128UL ) );
1744 4224 : _mm512_stream_si512( (void*)(out->payload+ 192UL), _mm512_load_epi64( cur->txn->payload+ 192UL ) );
1745 4224 : _mm512_stream_si512( (void*)(out->payload+ 256UL), _mm512_load_epi64( cur->txn->payload+ 256UL ) );
1746 4224 : _mm512_stream_si512( (void*)(out->payload+ 320UL), _mm512_load_epi64( cur->txn->payload+ 320UL ) );
1747 4224 : _mm512_stream_si512( (void*)(out->payload+ 384UL), _mm512_load_epi64( cur->txn->payload+ 384UL ) );
1748 4224 : _mm512_stream_si512( (void*)(out->payload+ 448UL), _mm512_load_epi64( cur->txn->payload+ 448UL ) );
1749 4224 : _mm512_stream_si512( (void*)(out->payload+ 512UL), _mm512_load_epi64( cur->txn->payload+ 512UL ) );
1750 4224 : _mm512_stream_si512( (void*)(out->payload+ 576UL), _mm512_load_epi64( cur->txn->payload+ 576UL ) );
1751 4224 : _mm512_stream_si512( (void*)(out->payload+ 640UL), _mm512_load_epi64( cur->txn->payload+ 640UL ) );
1752 4224 : _mm512_stream_si512( (void*)(out->payload+ 704UL), _mm512_load_epi64( cur->txn->payload+ 704UL ) );
1753 4224 : _mm512_stream_si512( (void*)(out->payload+ 768UL), _mm512_load_epi64( cur->txn->payload+ 768UL ) );
1754 4224 : _mm512_stream_si512( (void*)(out->payload+ 832UL), _mm512_load_epi64( cur->txn->payload+ 832UL ) );
1755 4224 : _mm512_stream_si512( (void*)(out->payload+ 896UL), _mm512_load_epi64( cur->txn->payload+ 896UL ) );
1756 4224 : _mm512_stream_si512( (void*)(out->payload+ 960UL), _mm512_load_epi64( cur->txn->payload+ 960UL ) );
1757 4224 : _mm512_stream_si512( (void*)(out->payload+1024UL), _mm512_load_epi64( cur->txn->payload+1024UL ) );
1758 4224 : _mm512_stream_si512( (void*)(out->payload+1088UL), _mm512_load_epi64( cur->txn->payload+1088UL ) );
1759 4224 : _mm512_stream_si512( (void*)(out->payload+1152UL), _mm512_load_epi64( cur->txn->payload+1152UL ) );
1760 4224 : _mm512_stream_si512( (void*)(out->payload+1216UL), _mm512_load_epi64( cur->txn->payload+1216UL ) );
1761 : /* Copied out to 1280 bytes, which copies some other fields we needed to
1762 : copy anyway. */
1763 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, payload_sz )+sizeof(((fd_txn_p_t*)NULL)->payload_sz )<=1280UL, nt_memcpy );
1764 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, blockhash_slot )+sizeof(((fd_txn_p_t*)NULL)->blockhash_slot)<=1280UL, nt_memcpy );
1765 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, flags )+sizeof(((fd_txn_p_t*)NULL)->flags )<=1280UL, nt_memcpy );
1766 4224 : FD_STATIC_ASSERT( offsetof(fd_txn_p_t, _ ) <=1280UL, nt_memcpy );
1767 4224 : const ulong offset_into_txn = 1280UL - offsetof(fd_txn_p_t, _ );
1768 4224 : fd_memcpy( offset_into_txn+(uchar *)TXN(out), offset_into_txn+(uchar const *)txn,
1769 4224 : fd_ulong_max( offset_into_txn, fd_txn_footprint( txn->instr_cnt, txn->addr_table_lookup_cnt ) )-offset_into_txn );
1770 4224 : #endif
1771 13082073 : } else {
1772 13082073 : fd_memcpy( out->payload, cur->txn->payload, cur->txn->payload_sz );
1773 13082073 : fd_memcpy( TXN(out), txn, fd_txn_footprint( txn->instr_cnt, txn->addr_table_lookup_cnt ) );
1774 13082073 : out->payload_sz = cur->txn->payload_sz;
1775 13082073 : out->pack_cu.requested_exec_plus_acct_data_cus = cur->txn->pack_cu.requested_exec_plus_acct_data_cus;
1776 13082073 : out->pack_cu.non_execution_cus = cur->txn->pack_cu.non_execution_cus;
1777 13082073 : out->flags = cur->txn->flags;
1778 13082073 : }
1779 13086297 : out++;
1780 :
1781 13086297 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
1782 27342834 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1783 14256537 : fd_acct_addr_t acct_addr = *ACCT_ITER_TO_PTR( iter );
1784 :
1785 14256537 : fd_pack_addr_use_t * in_wcost_table = acct_uses_query( writer_costs, acct_addr, NULL );
1786 14256537 : if( !in_wcost_table ) {
1787 794048 : in_wcost_table = acct_uses_insert( writer_costs, acct_addr );
1788 794048 : in_wcost_table->total_cost = 0UL;
1789 794048 : written_list[ written_list_cnt ] = in_wcost_table;
1790 794048 : written_list_cnt = fd_ulong_min( written_list_cnt+1UL, written_list_max-1UL );
1791 794048 : }
1792 14256537 : in_wcost_table->total_cost += cur->compute_est;
1793 :
1794 14256537 : fd_pack_addr_use_t * use = acct_uses_insert( acct_in_use, acct_addr );
1795 14256537 : use->in_use_by = bank_tile_mask | FD_PACK_IN_USE_WRITABLE;
1796 :
1797 14256537 : use_by_bank[use_by_bank_cnt++] = *use;
1798 :
1799 : /* If there aren't any more references to this account in the
1800 : heap, it can't cause any conflicts. That means we actually
1801 : don't need to record that we are using it, which is good
1802 : because we want to release the bit. */
1803 14256537 : release_result_t ret = release_bit_reference( pack, &acct_addr );
1804 14256537 : FD_PACK_BITSET_CLEARN( bitset_rw_in_use, ret.clear_rw_bit );
1805 14256537 : FD_PACK_BITSET_CLEARN( bitset_w_in_use, ret.clear_w_bit );
1806 14256537 : }
1807 13086297 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
1808 16658427 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
1809 :
1810 3572130 : fd_acct_addr_t acct_addr = *ACCT_ITER_TO_PTR( iter );
1811 :
1812 3572130 : if( fd_pack_unwritable_contains( &acct_addr ) ) continue; /* No need to track sysvars because they can't be writable */
1813 :
1814 2576685 : fd_pack_addr_use_t * use = acct_uses_query( acct_in_use, acct_addr, NULL );
1815 2576685 : if( !use ) { use = acct_uses_insert( acct_in_use, acct_addr ); use->in_use_by = 0UL; }
1816 :
1817 2576685 : if( !(use->in_use_by & bank_tile_mask) ) use_by_bank[use_by_bank_cnt++] = *use;
1818 2576685 : use->in_use_by |= bank_tile_mask;
1819 2576685 : use->in_use_by &= ~FD_PACK_IN_USE_BIT_CLEARED;
1820 :
1821 :
1822 2576685 : release_result_t ret = release_bit_reference( pack, &acct_addr );
1823 2576685 : FD_PACK_BITSET_CLEARN( bitset_rw_in_use, ret.clear_rw_bit );
1824 2576685 : FD_PACK_BITSET_CLEARN( bitset_w_in_use, ret.clear_w_bit );
1825 2576685 : }
1826 :
1827 13086297 : txns_scheduled += 1UL; txn_limit -= 1UL;
1828 13086297 : cus_scheduled += cur->compute_est; cu_limit -= cur->compute_est;
1829 13086297 : bytes_scheduled += cur->txn->payload_sz; byte_limit -= cur->txn->payload_sz;
1830 :
1831 13086297 : *(use_by_bank_txn++) = use_by_bank_cnt;
1832 :
1833 13086297 : sig2txn_ele_remove_fast( pack->signature_map, cur, pool );
1834 :
1835 13086297 : cur->root = FD_ORD_TXN_ROOT_FREE;
1836 13086297 : expq_remove( pack->expiration_q, cur->expq_idx );
1837 13086297 : treap_idx_remove( sched_from, _cur, pool );
1838 13086297 : trp_pool_idx_release( pool, _cur );
1839 13086297 : pack->pending_txn_cnt--;
1840 :
1841 13086297 : if( FD_UNLIKELY( (cu_limit<smallest_in_treap->cus) | (txn_limit==0UL) | (byte_limit<smallest_in_treap->bytes) ) ) break;
1842 13086297 : }
1843 :
1844 679854 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_TAKEN, txns_scheduled );
1845 679854 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_CU_LIMIT, cu_limit_c );
1846 679854 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_FAST_PATH, fast_path );
1847 679854 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_BYTE_LIMIT, byte_limit_c );
1848 679854 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_WRITE_COST, write_limit_c );
1849 679854 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_SLOW_PATH, slow_path );
1850 679854 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_DEFER_SKIP, skip_c );
1851 :
1852 : /* If we scanned the whole treap and didn't break early, we now have a
1853 : better estimate of the smallest. */
1854 679854 : if( FD_UNLIKELY( treap_rev_iter_done( prev ) ) ) {
1855 85476 : smallest_in_treap->cus = min_cus;
1856 85476 : smallest_in_treap->bytes = min_bytes;
1857 85476 : }
1858 :
1859 679854 : pack->use_by_bank_cnt[bank_tile] = use_by_bank_cnt;
1860 679854 : FD_PACK_BITSET_COPY( pack->bitset_rw_in_use, bitset_rw_in_use );
1861 679854 : FD_PACK_BITSET_COPY( pack->bitset_w_in_use, bitset_w_in_use );
1862 :
1863 679854 : pack->written_list_cnt = written_list_cnt;
1864 :
1865 679854 : sched_return_t to_return = { .cus_scheduled=cus_scheduled, .txns_scheduled=txns_scheduled, .bytes_scheduled=bytes_scheduled };
1866 679854 : return to_return;
1867 1482126 : }
1868 :
1869 : int
1870 : fd_pack_microblock_complete( fd_pack_t * pack,
1871 741063 : ulong bank_tile ) {
1872 : /* If the account is in use writably, and it's in use by this banking
1873 : tile, then this banking tile must be the sole writer to it, so it's
1874 : always okay to clear the writable bit. */
1875 741063 : ulong clear_mask = ~((1UL<<bank_tile) | FD_PACK_IN_USE_WRITABLE);
1876 :
1877 : /* If nothing outstanding, bail quickly */
1878 741063 : if( FD_UNLIKELY( !(pack->outstanding_microblock_mask & (1UL<<bank_tile)) ) ) return 0;
1879 :
1880 673878 : FD_PACK_BITSET_DECLARE( bitset_rw_in_use );
1881 673878 : FD_PACK_BITSET_DECLARE( bitset_w_in_use );
1882 673878 : FD_PACK_BITSET_COPY( bitset_rw_in_use, pack->bitset_rw_in_use );
1883 673878 : FD_PACK_BITSET_COPY( bitset_w_in_use, pack->bitset_w_in_use );
1884 :
1885 673878 : fd_pack_addr_use_t * base = pack->use_by_bank[bank_tile];
1886 :
1887 673878 : fd_pack_ord_txn_t * best = NULL;
1888 673878 : fd_pack_penalty_treap_t * best_penalty = NULL;
1889 673878 : ulong txn_cnt = 0UL;
1890 :
1891 16884972 : for( ulong i=0UL; i<pack->use_by_bank_cnt[bank_tile]; i++ ) {
1892 16211094 : fd_pack_addr_use_t * use = acct_uses_query( pack->acct_in_use, base[i].key, NULL );
1893 16211094 : FD_TEST( use );
1894 16211094 : use->in_use_by &= clear_mask;
1895 :
1896 : /* In order to properly bound the size of bitset_map, we need to
1897 : release the "reference" to the account when we schedule it.
1898 : However, that poses a bit of a problem here, because by the time
1899 : we complete the microblock, that account could have been assigned
1900 : a different bit in the bitset. The scheduling step tells us if
1901 : that is the case, and if so, we know that the bits in
1902 : bitset_w_in_use and bitset_rw_in_use were already cleared as
1903 : necessary.
1904 :
1905 : Note that it's possible for BIT_CLEARED to be set and then unset
1906 : by later uses, but then the account would be in use on other
1907 : banks, so we wouldn't try to observe the old value. For example:
1908 : Suppose bit 0->account A, bit 1->account B, and we have two
1909 : transactions that read A, B. We schedule a microblock to bank 0,
1910 : taking both transactions, which sets the counts for A, B to 0,
1911 : and releases the bits, clearing bits 0 and 1, and setting
1912 : BIT_CLEARED. Then we get two more transactions that read
1913 : accounts C, D, A, B, and they get assigned 0->C, 1->D, 2->A,
1914 : 3->B. We try to schedule a microblock to bank 1 that takes one
1915 : of those transactions. This unsets BIT_CLEARED for A, B.
1916 : Finally, the first microblock completes. Even though the bitset
1917 : map has the new bits for A and B which are "wrong" compared to
1918 : when the transaction was initially scheduled, those bits have
1919 : already been cleared and reset properly in the bitset as needed.
1920 : A and B will still be in use by bank 1, so we won't clear any
1921 : bits. If, on the other hand, the microblock scheduled to bank 1
1922 : completes first, bits 0 and 1 will be cleared for accounts C and
1923 : D, while bits 2 and 3 will remain set, which is correct. Then
1924 : when bank 0 completes, bits 2 and 3 will be cleared. */
1925 16211094 : if( FD_LIKELY( !use->in_use_by ) ) { /* if in_use_by==0, doesn't include BIT_CLEARED */
1926 3400663 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, base[i].key, NULL );
1927 3400663 : FD_TEST( q );
1928 3400663 : FD_PACK_BITSET_CLEARN( bitset_w_in_use, q->bit );
1929 3400663 : FD_PACK_BITSET_CLEARN( bitset_rw_in_use, q->bit );
1930 :
1931 : /* Because this account is no longer in use, it might be possible
1932 : to schedule a transaction that writes to it. Check its
1933 : penalty treap if it has one, and potentially move it to the
1934 : main treap. */
1935 3400663 : fd_pack_penalty_treap_t * p_trp = penalty_map_query( pack->penalty_treaps, base[i].key, NULL );
1936 3400663 : if( FD_UNLIKELY( p_trp ) ) {
1937 753130 : fd_pack_ord_txn_t * best_in_trp = treap_rev_iter_ele( treap_rev_iter_init( p_trp->penalty_treap, pack->pool ), pack->pool );
1938 753130 : if( FD_UNLIKELY( !best || COMPARE_WORSE( best, best_in_trp ) ) ) {
1939 301625 : best = best_in_trp;
1940 301625 : best_penalty = p_trp;
1941 301625 : }
1942 753130 : }
1943 3400663 : }
1944 :
1945 16211094 : if( FD_LIKELY( !(use->in_use_by & ~FD_PACK_IN_USE_BIT_CLEARED) ) ) acct_uses_remove( pack->acct_in_use, use );
1946 :
1947 16211094 : if( FD_UNLIKELY( i+1UL==pack->use_by_bank_txn[ bank_tile ][ txn_cnt ] ) ) {
1948 13082484 : txn_cnt++;
1949 13082484 : if( FD_LIKELY( best ) ) {
1950 : /* move best to the main treap */
1951 301625 : treap_ele_remove( best_penalty->penalty_treap, best, pack->pool );
1952 301625 : best->root = FD_ORD_TXN_ROOT_PENDING;
1953 301625 : treap_ele_insert( pack->pending, best, pack->pool );
1954 :
1955 301625 : pack->pending_smallest->cus = fd_ulong_min( pack->pending_smallest->cus, best->compute_est );
1956 301625 : pack->pending_smallest->bytes = fd_ulong_min( pack->pending_smallest->bytes, best->txn_e->txnp->payload_sz );
1957 :
1958 301625 : if( FD_UNLIKELY( !treap_ele_cnt( best_penalty->penalty_treap ) ) ) {
1959 2892 : treap_delete( treap_leave( best_penalty->penalty_treap ) );
1960 : /* Removal invalidates any pointers we got from
1961 : penalty_map_query, but we immediately set these to NULL, so
1962 : we're not keeping any pointers around. */
1963 2892 : penalty_map_remove( pack->penalty_treaps, best_penalty );
1964 2892 : }
1965 301625 : best = NULL;
1966 301625 : best_penalty = NULL;
1967 301625 : }
1968 13082484 : }
1969 16211094 : }
1970 :
1971 673878 : pack->use_by_bank_cnt[bank_tile] = 0UL;
1972 :
1973 673878 : FD_PACK_BITSET_COPY( pack->bitset_rw_in_use, bitset_rw_in_use );
1974 673878 : FD_PACK_BITSET_COPY( pack->bitset_w_in_use, bitset_w_in_use );
1975 :
1976 : /* outstanding_microblock_mask never has the writable bit set, so we
1977 : don't care about clearing it here either. */
1978 673878 : pack->outstanding_microblock_mask &= clear_mask;
1979 673878 : return 1;
1980 673878 : }
1981 :
1982 438366 : #define TRY_BUNDLE_NO_READY_BUNDLES 0
1983 0 : #define TRY_BUNDLE_HAS_CONFLICTS (-1)
1984 0 : #define TRY_BUNDLE_DOES_NOT_FIT (-2)
1985 0 : #define TRY_BUNDLE_SUCCESS(n) ( n) /* schedule bundle with n transactions */
1986 : static inline int
1987 : fd_pack_try_schedule_bundle( fd_pack_t * pack,
1988 : ulong bank_tile,
1989 438366 : fd_txn_p_t * out ) {
1990 438366 : int state = pack->initializer_bundle_state;
1991 438366 : if( FD_UNLIKELY( (state==FD_PACK_IB_STATE_PENDING) | (state==FD_PACK_IB_STATE_FAILED ) ) ) return TRY_BUNDLE_NO_READY_BUNDLES;
1992 :
1993 438366 : fd_pack_ord_txn_t * pool = pack->pool;
1994 438366 : treap_t * bundles = pack->pending_bundles;
1995 :
1996 438366 : int require_ib;
1997 438366 : if( FD_UNLIKELY( state==FD_PACK_IB_STATE_NOT_INITIALIZED ) ) { require_ib = 1; }
1998 438366 : if( FD_LIKELY ( state==FD_PACK_IB_STATE_READY ) ) { require_ib = 0; }
1999 :
2000 438366 : treap_rev_iter_t _cur = treap_rev_iter_init( bundles, pool );
2001 438366 : ulong bundle_idx = ULONG_MAX;
2002 :
2003 438366 : if( FD_UNLIKELY( treap_rev_iter_done( _cur ) ) ) return TRY_BUNDLE_NO_READY_BUNDLES;
2004 :
2005 0 : treap_rev_iter_t _txn0 = _cur;
2006 0 : fd_pack_ord_txn_t * txn0 = treap_rev_iter_ele( _txn0, pool );
2007 0 : int is_ib = !!(txn0->txn->flags & FD_TXN_P_FLAGS_INITIALIZER_BUNDLE);
2008 0 : bundle_idx = RC_TO_REL_BUNDLE_IDX( txn0->rewards, txn0->compute_est );
2009 :
2010 0 : if( FD_UNLIKELY( require_ib & !is_ib ) ) return TRY_BUNDLE_NO_READY_BUNDLES;
2011 :
2012 : /* At this point, we have our candidate bundle, so we'll schedule it
2013 : if we can. If we can't, we won't schedule anything. */
2014 :
2015 :
2016 0 : fd_pack_addr_use_t * bundle_temp_inserted[ FD_PACK_MAX_TXN_PER_BUNDLE * FD_TXN_ACCT_ADDR_MAX ];
2017 0 : ulong bundle_temp_inserted_cnt = 0UL;
2018 :
2019 0 : ulong bank_tile_mask = 1UL << bank_tile;
2020 :
2021 0 : int doesnt_fit = 0;
2022 0 : int has_conflict = 0;
2023 0 : ulong txn_cnt = 0UL;
2024 :
2025 0 : ulong cu_limit = pack->lim->max_cost_per_block - pack->cumulative_block_cost;
2026 0 : ulong byte_limit = pack->lim->max_data_bytes_per_block - pack->data_bytes_consumed;
2027 0 : ulong microblock_limit = pack->lim->max_microblocks_per_block - pack->microblock_cnt;
2028 :
2029 0 : FD_PACK_BITSET_DECLARE( bitset_rw_in_use );
2030 0 : FD_PACK_BITSET_DECLARE( bitset_w_in_use );
2031 0 : FD_PACK_BITSET_COPY( bitset_rw_in_use, pack->bitset_rw_in_use );
2032 0 : FD_PACK_BITSET_COPY( bitset_w_in_use, pack->bitset_w_in_use );
2033 :
2034 : /* last_use_in_txn_cnt[i+1] Keeps track of the number of accounts that
2035 : have their last reference in transaction i of the bundle. This
2036 : esoteric value is important for computing use_by_bank_txn.
2037 : last_use_in_txn_cnt[0] is garbage. */
2038 0 : ulong last_use_in_txn_cnt[ 1UL+FD_PACK_MAX_TXN_PER_BUNDLE ] = { 0UL };
2039 :
2040 0 : fd_pack_addr_use_t null_use[1] = {{{{ 0 }}, { 0 }}};
2041 :
2042 0 : while( !(doesnt_fit | has_conflict) & !treap_rev_iter_done( _cur ) ) {
2043 0 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
2044 0 : ulong this_bundle_idx = RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est );
2045 0 : if( FD_UNLIKELY( this_bundle_idx!=bundle_idx ) ) break;
2046 :
2047 0 : if( FD_UNLIKELY( cur->compute_est>cu_limit ) ) {
2048 0 : doesnt_fit = 1;
2049 0 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_CU_LIMIT, 1UL );
2050 0 : break;
2051 0 : }
2052 0 : cu_limit -= cur->compute_est;
2053 :
2054 : /* Each transaction in a bundle turns into a microblock */
2055 0 : if( FD_UNLIKELY( microblock_limit==0UL ) ) {
2056 0 : doesnt_fit = 1;
2057 0 : FD_MCNT_INC( PACK, MICROBLOCK_PER_BLOCK_LIMIT, 1UL );
2058 0 : break;
2059 0 : }
2060 0 : microblock_limit--;
2061 :
2062 0 : if( FD_UNLIKELY( cur->txn->payload_sz+MICROBLOCK_DATA_OVERHEAD>byte_limit ) ) {
2063 0 : doesnt_fit = 1;
2064 0 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_BYTE_LIMIT, 1UL );
2065 0 : break;
2066 0 : }
2067 0 : byte_limit -= cur->txn->payload_sz + MICROBLOCK_DATA_OVERHEAD;
2068 :
2069 0 : if( FD_UNLIKELY( !FD_PACK_BITSET_INTERSECT4_EMPTY( pack->bitset_rw_in_use, pack->bitset_w_in_use, cur->w_bitset, cur->rw_bitset ) ) ) {
2070 0 : has_conflict = 1;
2071 0 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_FAST_PATH, 1UL );
2072 0 : break;
2073 0 : }
2074 :
2075 : /* Don't update the actual in-use bitset, because the transactions
2076 : in the bundle are allowed to conflict with each other. */
2077 0 : FD_PACK_BITSET_OR( bitset_rw_in_use, cur->rw_bitset );
2078 0 : FD_PACK_BITSET_OR( bitset_w_in_use, cur->w_bitset );
2079 :
2080 :
2081 0 : fd_txn_t const * txn = TXN(cur->txn);
2082 0 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, cur->txn->payload );
2083 0 : fd_acct_addr_t const * alt_adj = cur->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
2084 :
2085 : /* Check conflicts between this transaction's writable accounts and
2086 : current readers */
2087 0 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
2088 0 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2089 :
2090 0 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
2091 :
2092 0 : fd_pack_addr_use_t * in_bundle_temp = acct_uses_query( pack->bundle_temp_map, acct, null_use );
2093 0 : ulong current_cost = acct_uses_query( pack->writer_costs, acct, null_use )->total_cost;
2094 0 : ulong carried_cost = (ulong)in_bundle_temp->carried_cost;
2095 0 : if( FD_UNLIKELY( current_cost + carried_cost + cur->compute_est > pack->lim->max_write_cost_per_acct ) ) {
2096 0 : doesnt_fit = 1;
2097 0 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_WRITE_COST, 1UL );
2098 0 : break;
2099 0 : }
2100 :
2101 0 : if( FD_LIKELY( in_bundle_temp==null_use ) ) { /* Not in temp bundle table yet */
2102 0 : in_bundle_temp = acct_uses_insert( pack->bundle_temp_map, acct );
2103 0 : in_bundle_temp->_ = 0UL;
2104 0 : bundle_temp_inserted[ bundle_temp_inserted_cnt++ ] = in_bundle_temp;
2105 0 : }
2106 0 : in_bundle_temp->carried_cost += (uint)cur->compute_est; /* < 2^21, but >0 */
2107 0 : in_bundle_temp->ref_cnt++;
2108 0 : last_use_in_txn_cnt[ in_bundle_temp->last_use_in ]--;
2109 0 : in_bundle_temp->last_use_in = (ushort)(txn_cnt+1UL);
2110 0 : last_use_in_txn_cnt[ in_bundle_temp->last_use_in ]++;
2111 :
2112 0 : if( FD_UNLIKELY( acct_uses_query( pack->acct_in_use, acct, null_use )->in_use_by ) ) {
2113 0 : has_conflict = 1;
2114 0 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_SLOW_PATH, 1UL );
2115 0 : break;
2116 0 : }
2117 0 : }
2118 0 : if( has_conflict | doesnt_fit ) break;
2119 :
2120 : /* Check conflicts between this transaction's readonly accounts and
2121 : current writers */
2122 0 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
2123 0 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2124 :
2125 0 : fd_acct_addr_t const * acct = ACCT_ITER_TO_PTR( iter );
2126 0 : if( fd_pack_unwritable_contains( acct ) ) continue; /* No need to track sysvars because they can't be writable */
2127 :
2128 0 : fd_pack_addr_use_t * in_bundle_temp = acct_uses_query( pack->bundle_temp_map, *acct, null_use );
2129 0 : if( FD_LIKELY( in_bundle_temp==null_use ) ) { /* Not in temp bundle table yet */
2130 0 : in_bundle_temp = acct_uses_insert( pack->bundle_temp_map, *acct );
2131 0 : in_bundle_temp->_ = 0UL;
2132 0 : bundle_temp_inserted[ bundle_temp_inserted_cnt++ ] = in_bundle_temp;
2133 0 : }
2134 0 : in_bundle_temp->ref_cnt++;
2135 0 : last_use_in_txn_cnt[ in_bundle_temp->last_use_in ]--;
2136 0 : in_bundle_temp->last_use_in = (ushort)(txn_cnt+1UL);
2137 0 : last_use_in_txn_cnt[ in_bundle_temp->last_use_in ]++;
2138 :
2139 0 : if( FD_UNLIKELY( acct_uses_query( pack->acct_in_use, *acct, null_use )->in_use_by & FD_PACK_IN_USE_WRITABLE ) ) {
2140 0 : has_conflict = 1;
2141 0 : FD_MCNT_INC( PACK, TRANSACTION_SCHEDULE_SLOW_PATH, 1UL );
2142 0 : break;
2143 0 : }
2144 0 : }
2145 :
2146 0 : if( has_conflict | doesnt_fit ) break;
2147 :
2148 0 : txn_cnt++;
2149 0 : _cur = treap_rev_iter_next( _cur, pool );
2150 0 : }
2151 0 : int retval = fd_int_if( doesnt_fit, TRY_BUNDLE_DOES_NOT_FIT,
2152 0 : fd_int_if( has_conflict, TRY_BUNDLE_HAS_CONFLICTS, TRY_BUNDLE_SUCCESS( (int)txn_cnt ) ) );
2153 :
2154 0 : if( FD_UNLIKELY( retval<=0 ) ) {
2155 0 : for( ulong i=0UL; i<bundle_temp_inserted_cnt; i++ ) {
2156 0 : acct_uses_remove( pack->bundle_temp_map, bundle_temp_inserted[ bundle_temp_inserted_cnt-i-1UL ] );
2157 0 : }
2158 0 : FD_TEST( acct_uses_key_cnt( pack->bundle_temp_map )==0UL );
2159 0 : return retval;
2160 0 : }
2161 :
2162 : /* This bundle passed validation, so now we'll take it! */
2163 0 : pack->outstanding_microblock_mask |= bank_tile_mask;
2164 :
2165 0 : treap_rev_iter_t _end = _cur;
2166 0 : treap_rev_iter_t _next;
2167 :
2168 : /* We'll carefully incrementally construct use_by_bank and
2169 : use_by_bank_txn based on the contents of bundle_temp and
2170 : last_use_in_txn_cnt. */
2171 0 : fd_pack_addr_use_t * use_by_bank = pack->use_by_bank [bank_tile];
2172 0 : ulong * use_by_bank_txn = pack->use_by_bank_txn[bank_tile];
2173 0 : ulong cum_sum = 0UL;
2174 0 : for( ulong k=0UL; k<txn_cnt; k++ ) { use_by_bank_txn[k] = cum_sum; cum_sum += last_use_in_txn_cnt[ k+1UL ]; }
2175 0 : pack->use_by_bank_cnt[bank_tile] = cum_sum;
2176 :
2177 :
2178 0 : for( _cur=_txn0; _cur!=_end; _cur=_next ) {
2179 0 : _next = treap_rev_iter_next( _cur, pool );
2180 :
2181 0 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
2182 0 : fd_txn_t const * txn = TXN(cur->txn);
2183 0 : fd_memcpy( out->payload, cur->txn->payload, cur->txn->payload_sz );
2184 0 : fd_memcpy( TXN(out), txn, fd_txn_footprint( txn->instr_cnt, txn->addr_table_lookup_cnt ) );
2185 0 : out->payload_sz = cur->txn->payload_sz;
2186 0 : out->pack_cu.requested_exec_plus_acct_data_cus = cur->txn->pack_cu.requested_exec_plus_acct_data_cus;
2187 0 : out->pack_cu.non_execution_cus = cur->txn->pack_cu.non_execution_cus;
2188 0 : out->flags = cur->txn->flags;
2189 0 : out++;
2190 :
2191 0 : pack->cumulative_block_cost += cur->compute_est;
2192 0 : pack->data_bytes_consumed += cur->txn->payload_sz + MICROBLOCK_DATA_OVERHEAD;
2193 0 : pack->microblock_cnt += 1UL;
2194 :
2195 0 : sig2txn_ele_remove_fast( pack->signature_map, cur, pack->pool );
2196 :
2197 0 : cur->root = FD_ORD_TXN_ROOT_FREE;
2198 0 : expq_remove( pack->expiration_q, cur->expq_idx );
2199 0 : treap_idx_remove( pack->pending_bundles, _cur, pack->pool );
2200 0 : trp_pool_idx_release( pack->pool, _cur );
2201 0 : pack->pending_txn_cnt--;
2202 0 : }
2203 :
2204 :
2205 0 : for( ulong i=0UL; i<bundle_temp_inserted_cnt; i++ ) {
2206 : /* In order to clear bundle_temp_map with the typical trick, we need
2207 : to iterate through bundle_temp_inserted backwards. */
2208 0 : fd_pack_addr_use_t * addr_use = bundle_temp_inserted[ bundle_temp_inserted_cnt-i-1UL ];
2209 :
2210 0 : int any_writers = addr_use->carried_cost>0U; /* Did any transaction in this bundle write lock this account address? */
2211 :
2212 0 : if( FD_LIKELY( any_writers ) ) { /* UNLIKELY? */
2213 0 : fd_pack_addr_use_t * in_wcost_table = acct_uses_query( pack->writer_costs, addr_use->key, NULL );
2214 0 : if( !in_wcost_table ) {
2215 0 : in_wcost_table = acct_uses_insert( pack->writer_costs, addr_use->key );
2216 0 : in_wcost_table->total_cost = 0UL;
2217 0 : pack->written_list[ pack->written_list_cnt ] = in_wcost_table;
2218 0 : pack->written_list_cnt = fd_ulong_min( pack->written_list_cnt+1UL, pack->written_list_max-1UL );
2219 0 : }
2220 0 : in_wcost_table->total_cost += (ulong)addr_use->carried_cost;
2221 0 : }
2222 :
2223 : /* in_use_by must be set before releasing the bit reference */
2224 0 : fd_pack_addr_use_t * use = acct_uses_query( pack->acct_in_use, addr_use->key, NULL );
2225 0 : if( !use ) { use = acct_uses_insert( pack->acct_in_use, addr_use->key ); use->in_use_by = 0UL; }
2226 0 : use->in_use_by |= bank_tile_mask | fd_ulong_if( any_writers, FD_PACK_IN_USE_WRITABLE, 0UL );
2227 0 : use->in_use_by &= ~FD_PACK_IN_USE_BIT_CLEARED;
2228 :
2229 0 : use_by_bank[ use_by_bank_txn[ addr_use->last_use_in-1UL ]++ ] = *use;
2230 :
2231 0 : for( ulong k=0UL; k<(ulong)addr_use->ref_cnt; k++ ) {
2232 0 : release_result_t ret = release_bit_reference( pack, &(addr_use->key) );
2233 0 : FD_PACK_BITSET_CLEARN( bitset_rw_in_use, ret.clear_rw_bit );
2234 0 : FD_PACK_BITSET_CLEARN( bitset_w_in_use, ret.clear_w_bit );
2235 0 : }
2236 :
2237 0 : acct_uses_remove( pack->bundle_temp_map, addr_use );
2238 0 : }
2239 :
2240 0 : FD_PACK_BITSET_COPY( pack->bitset_rw_in_use, bitset_rw_in_use );
2241 0 : FD_PACK_BITSET_COPY( pack->bitset_w_in_use, bitset_w_in_use );
2242 :
2243 0 : if( FD_UNLIKELY( is_ib ) ) {
2244 0 : pack->initializer_bundle_state = FD_PACK_IB_STATE_PENDING;
2245 0 : }
2246 0 : return retval;
2247 0 : }
2248 :
2249 :
2250 : ulong
2251 : fd_pack_schedule_next_microblock( fd_pack_t * pack,
2252 : ulong total_cus,
2253 : float vote_fraction,
2254 : ulong bank_tile,
2255 741063 : fd_txn_p_t * out ) {
2256 : /* Because we schedule bundles strictly in order, we need to limit
2257 : bundles to a single bank. Otherwise, if a bundle is executing on a
2258 : certain bank and the next bundle conflicts with it, all the other
2259 : bank tiles will be idle until the first bundle completes. */
2260 741063 : if( FD_LIKELY( bank_tile==0UL ) ) {
2261 : /* Try to schedule a bundle first */
2262 438366 : int bundle_result = fd_pack_try_schedule_bundle( pack, bank_tile, out );
2263 438366 : if( FD_UNLIKELY( bundle_result>0 ) ) return (ulong)bundle_result;
2264 438366 : if( FD_UNLIKELY( bundle_result==TRY_BUNDLE_HAS_CONFLICTS ) ) return 0UL;
2265 : /* in the NO_READY_BUNDLES or DOES_NOT_FIT case, we schedule like
2266 : normal. */
2267 438366 : }
2268 :
2269 : /* TODO: Decide if these are exactly how we want to handle limits */
2270 741063 : total_cus = fd_ulong_min( total_cus, pack->lim->max_cost_per_block - pack->cumulative_block_cost );
2271 741063 : ulong vote_cus = fd_ulong_min( (ulong)((float)total_cus * vote_fraction),
2272 741063 : pack->lim->max_vote_cost_per_block - pack->cumulative_vote_cost );
2273 741063 : ulong vote_reserved_txns = fd_ulong_min( vote_cus/FD_PACK_SIMPLE_VOTE_COST,
2274 741063 : (ulong)((float)pack->lim->max_txn_per_microblock * vote_fraction) );
2275 :
2276 :
2277 741063 : if( FD_UNLIKELY( (pack->microblock_cnt>=pack->lim->max_microblocks_per_block) ) ) {
2278 0 : FD_MCNT_INC( PACK, MICROBLOCK_PER_BLOCK_LIMIT, 1UL );
2279 0 : return 0UL;
2280 0 : }
2281 741063 : if( FD_UNLIKELY( pack->data_bytes_consumed+MICROBLOCK_DATA_OVERHEAD+FD_TXN_MIN_SERIALIZED_SZ>pack->lim->max_data_bytes_per_block) ) {
2282 0 : FD_MCNT_INC( PACK, DATA_PER_BLOCK_LIMIT, 1UL );
2283 0 : return 0UL;
2284 0 : }
2285 :
2286 741063 : ulong * use_by_bank_txn = pack->use_by_bank_txn[ bank_tile ];
2287 :
2288 741063 : ulong cu_limit = total_cus - vote_cus;
2289 741063 : ulong txn_limit = pack->lim->max_txn_per_microblock - vote_reserved_txns;
2290 741063 : ulong scheduled = 0UL;
2291 741063 : ulong byte_limit = pack->lim->max_data_bytes_per_block - pack->data_bytes_consumed - MICROBLOCK_DATA_OVERHEAD;
2292 :
2293 741063 : sched_return_t status, status1;
2294 :
2295 : /* Schedule vote transactions */
2296 741063 : status1= fd_pack_schedule_impl( pack, pack->pending_votes, vote_cus, vote_reserved_txns, byte_limit, bank_tile, pack->pending_votes_smallest, use_by_bank_txn, out+scheduled );
2297 :
2298 741063 : scheduled += status1.txns_scheduled;
2299 741063 : pack->cumulative_vote_cost += status1.cus_scheduled;
2300 741063 : pack->cumulative_block_cost += status1.cus_scheduled;
2301 741063 : pack->data_bytes_consumed += status1.bytes_scheduled;
2302 741063 : byte_limit -= status1.bytes_scheduled;
2303 741063 : use_by_bank_txn += status1.txns_scheduled;
2304 : /* Add any remaining CUs/txns to the non-vote limits */
2305 741063 : txn_limit += vote_reserved_txns - status1.txns_scheduled;
2306 741063 : cu_limit += vote_cus - status1.cus_scheduled;
2307 :
2308 :
2309 : /* Fill any remaining space with non-vote transactions */
2310 741063 : status = fd_pack_schedule_impl( pack, pack->pending, cu_limit, txn_limit, byte_limit, bank_tile, pack->pending_smallest, use_by_bank_txn, out+scheduled );
2311 :
2312 741063 : scheduled += status.txns_scheduled;
2313 741063 : pack->cumulative_block_cost += status.cus_scheduled;
2314 741063 : pack->data_bytes_consumed += status.bytes_scheduled;
2315 :
2316 741063 : ulong nonempty = (ulong)(scheduled>0UL);
2317 741063 : pack->microblock_cnt += nonempty;
2318 741063 : pack->outstanding_microblock_mask |= nonempty << bank_tile;
2319 741063 : pack->data_bytes_consumed += nonempty * MICROBLOCK_DATA_OVERHEAD;
2320 :
2321 : /* Update metrics counters */
2322 741063 : fd_pack_metrics_write( pack );
2323 741063 : FD_MGAUGE_SET( PACK, CUS_CONSUMED_IN_BLOCK, pack->cumulative_block_cost );
2324 :
2325 741063 : fd_histf_sample( pack->txn_per_microblock, scheduled );
2326 741063 : fd_histf_sample( pack->vote_per_microblock, status1.txns_scheduled );
2327 :
2328 247021 : #if FD_HAS_AVX512 && FD_PACK_USE_NON_TEMPORAL_MEMCPY
2329 247021 : _mm_sfence();
2330 247021 : #endif
2331 :
2332 741063 : return scheduled;
2333 741063 : }
2334 :
2335 274788 : ulong fd_pack_bank_tile_cnt ( fd_pack_t const * pack ) { return pack->bank_tile_cnt; }
2336 0 : ulong fd_pack_current_block_cost( fd_pack_t const * pack ) { return pack->cumulative_block_cost; }
2337 :
2338 :
2339 : void
2340 : fd_pack_set_block_limits( fd_pack_t * pack,
2341 : ulong max_microblocks_per_block,
2342 0 : ulong max_data_bytes_per_block ) {
2343 0 : pack->lim->max_microblocks_per_block = max_microblocks_per_block;
2344 0 : pack->lim->max_data_bytes_per_block = max_data_bytes_per_block;
2345 0 : }
2346 :
2347 : void
2348 : fd_pack_rebate_cus( fd_pack_t * pack,
2349 6 : fd_pack_rebate_t const * rebate ) {
2350 6 : if( FD_UNLIKELY( (rebate->ib_result!=0) & (pack->initializer_bundle_state==FD_PACK_IB_STATE_PENDING ) ) ) {
2351 0 : pack->initializer_bundle_state = fd_int_if( rebate->ib_result==1, FD_PACK_IB_STATE_READY, FD_PACK_IB_STATE_FAILED );
2352 0 : }
2353 :
2354 6 : pack->cumulative_block_cost -= rebate->total_cost_rebate;
2355 6 : pack->cumulative_vote_cost -= rebate->vote_cost_rebate;
2356 6 : pack->data_bytes_consumed -= rebate->data_bytes_rebate;
2357 6 : pack->microblock_cnt -= rebate->microblock_cnt_rebate;
2358 6 : pack->cumulative_rebated_cus += rebate->total_cost_rebate;
2359 :
2360 6 : fd_pack_addr_use_t * writer_costs = pack->writer_costs;
2361 18 : for( ulong i=0UL; i<rebate->writer_cnt; i++ ) {
2362 12 : fd_pack_addr_use_t * in_wcost_table = acct_uses_query( writer_costs, rebate->writer_rebates[i].key, NULL );
2363 12 : if( FD_UNLIKELY( !in_wcost_table ) ) FD_LOG_ERR(( "Rebate to unknown written account" ));
2364 12 : in_wcost_table->total_cost -= rebate->writer_rebates[i].rebate_cus;
2365 : /* Important: Even if this is 0, don't delete it from the table so
2366 : that the insert order doesn't get messed up. */
2367 12 : }
2368 6 : }
2369 :
2370 :
2371 : ulong
2372 : fd_pack_expire_before( fd_pack_t * pack,
2373 15 : ulong expire_before ) {
2374 15 : expire_before = fd_ulong_max( expire_before, pack->expire_before );
2375 15 : ulong deleted_cnt = 0UL;
2376 15 : fd_pack_expq_t * prq = pack->expiration_q;
2377 327 : while( (expq_cnt( prq )>0UL) & (prq->expires_at<expire_before) ) {
2378 312 : fd_pack_ord_txn_t * expired = prq->txn;
2379 :
2380 : /* fd_pack_delete_transaction also removes it from the heap */
2381 : /* All the transactions in the same bundle have the same expiration
2382 : time, so this loop will end up deleting them all, even with
2383 : delete_full_bundle set to 0. */
2384 312 : FD_TEST( delete_transaction( pack, expired, 0, 1 ) );
2385 312 : deleted_cnt++;
2386 312 : }
2387 :
2388 15 : pack->expire_before = expire_before;
2389 15 : return deleted_cnt;
2390 15 : }
2391 :
2392 : void
2393 2646 : fd_pack_end_block( fd_pack_t * pack ) {
2394 2646 : fd_histf_sample( pack->net_cus_per_block, pack->cumulative_block_cost );
2395 2646 : fd_histf_sample( pack->rebated_cus_per_block, pack->cumulative_rebated_cus );
2396 2646 : fd_histf_sample( pack->scheduled_cus_per_block, pack->cumulative_rebated_cus + pack->cumulative_block_cost );
2397 :
2398 2646 : pack->microblock_cnt = 0UL;
2399 2646 : pack->data_bytes_consumed = 0UL;
2400 2646 : pack->cumulative_block_cost = 0UL;
2401 2646 : pack->cumulative_vote_cost = 0UL;
2402 2646 : pack->cumulative_rebated_cus = 0UL;
2403 2646 : pack->outstanding_microblock_mask = 0UL;
2404 :
2405 2646 : pack->initializer_bundle_state = FD_PACK_IB_STATE_NOT_INITIALIZED;
2406 :
2407 2646 : acct_uses_clear( pack->acct_in_use );
2408 :
2409 2646 : if( FD_LIKELY( pack->written_list_cnt<pack->written_list_max-1UL ) ) {
2410 : /* The less dangerous way of doing this is to instead record the
2411 : keys we inserted and do a query followed by a delete for each
2412 : key. The downside of that is that keys are 32 bytes and a
2413 : pointer is only 8 bytes, plus the computational cost for the
2414 : query.
2415 :
2416 : However, if we're careful, we can pull this off. We require two
2417 : things. First, we started from an empty map and did nothing but
2418 : insert and update. In particular, no deletions. Second, we have
2419 : to be careful to delete in the opposite order that we inserted.
2420 : This is essentially like unwinding the inserts we did. The
2421 : common case is that the element after the one we delete will be
2422 : empty, so we'll hit that case. It's possible that there's
2423 : another independent probe sequence that will be entirely intact
2424 : starting in the element after, but we'll never hit the MAP_MOVE
2425 : case. */
2426 779364 : for( ulong i=0UL; i<pack->written_list_cnt; i++ ) {
2427 : /* Clearing the cost field here is unnecessary (since it gets
2428 : cleared on insert), but makes debugging a bit easier. */
2429 776718 : pack->written_list[ pack->written_list_cnt - 1UL - i ]->total_cost = 0UL;
2430 776718 : acct_uses_remove( pack->writer_costs, pack->written_list[ pack->written_list_cnt - 1UL - i ] );
2431 776718 : }
2432 2646 : } else {
2433 0 : acct_uses_clear( pack->writer_costs );
2434 0 : }
2435 2646 : pack->written_list_cnt = 0UL;
2436 :
2437 : /* compressed_slot_number is > FD_PACK_SKIP_CNT, which means +1 is the
2438 : max unless it overflows. */
2439 2646 : pack->compressed_slot_number = fd_ushort_max( (ushort)(pack->compressed_slot_number+1), (ushort)(FD_PACK_SKIP_CNT+1) );
2440 :
2441 2646 : FD_PACK_BITSET_CLEAR( pack->bitset_rw_in_use );
2442 2646 : FD_PACK_BITSET_CLEAR( pack->bitset_w_in_use );
2443 :
2444 9234 : for( ulong i=0UL; i<pack->bank_tile_cnt; i++ ) pack->use_by_bank_cnt[i] = 0UL;
2445 :
2446 : /* If our stake is low and we don't become leader often, end_block
2447 : might get called on the order of O(1/hr), which feels too
2448 : infrequent to do anything related to metrics. However, we only
2449 : update the histograms when we are leader, so this is actually a
2450 : good place to copy them. */
2451 2646 : FD_MHIST_COPY( PACK, TOTAL_TRANSACTIONS_PER_MICROBLOCK_COUNT, pack->txn_per_microblock );
2452 2646 : FD_MHIST_COPY( PACK, VOTES_PER_MICROBLOCK_COUNT, pack->vote_per_microblock );
2453 :
2454 2646 : FD_MGAUGE_SET( PACK, CUS_CONSUMED_IN_BLOCK, 0UL );
2455 2646 : FD_MHIST_COPY( PACK, CUS_SCHEDULED, pack->scheduled_cus_per_block );
2456 2646 : FD_MHIST_COPY( PACK, CUS_REBATED, pack->rebated_cus_per_block );
2457 2646 : FD_MHIST_COPY( PACK, CUS_NET, pack->net_cus_per_block );
2458 2646 : }
2459 :
2460 : static void
2461 : release_tree( treap_t * treap,
2462 : sig2txn_t * signature_map,
2463 0 : fd_pack_ord_txn_t * pool ) {
2464 0 : treap_fwd_iter_t next;
2465 0 : for( treap_fwd_iter_t it=treap_fwd_iter_init( treap, pool ); !treap_fwd_iter_idx( it ); it=next ) {
2466 0 : next = treap_fwd_iter_next( it, pool );
2467 0 : ulong idx = treap_fwd_iter_idx( it );
2468 0 : pool[ idx ].root = FD_ORD_TXN_ROOT_FREE;
2469 0 : treap_idx_remove ( treap, idx, pool );
2470 0 : sig2txn_idx_remove_fast( signature_map, idx, pool );
2471 0 : trp_pool_idx_release ( pool, idx );
2472 0 : }
2473 0 : }
2474 :
2475 : void
2476 0 : fd_pack_clear_all( fd_pack_t * pack ) {
2477 0 : pack->pending_txn_cnt = 0UL;
2478 0 : pack->microblock_cnt = 0UL;
2479 0 : pack->cumulative_block_cost = 0UL;
2480 0 : pack->cumulative_vote_cost = 0UL;
2481 0 : pack->cumulative_rebated_cus = 0UL;
2482 :
2483 0 : pack->pending_smallest->cus = ULONG_MAX;
2484 0 : pack->pending_smallest->bytes = ULONG_MAX;
2485 0 : pack->pending_votes_smallest->cus = ULONG_MAX;
2486 0 : pack->pending_votes_smallest->bytes = ULONG_MAX;
2487 :
2488 0 : release_tree( pack->pending, pack->signature_map, pack->pool );
2489 0 : release_tree( pack->pending_votes, pack->signature_map, pack->pool );
2490 0 : release_tree( pack->pending_bundles, pack->signature_map, pack->pool );
2491 :
2492 0 : ulong extra_depth = fd_ulong_if( !!pack->bundle_meta_sz, 1UL+2UL*FD_PACK_MAX_TXN_PER_BUNDLE, 1UL );
2493 0 : for( ulong i=0UL; i<pack->pack_depth+extra_depth; i++ ) {
2494 0 : if( FD_UNLIKELY( pack->pool[ i ].root!=FD_ORD_TXN_ROOT_FREE ) ) {
2495 0 : fd_pack_ord_txn_t * const del = pack->pool + i;
2496 0 : fd_txn_t * txn = TXN( del->txn );
2497 0 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, del->txn->payload );
2498 0 : fd_acct_addr_t const * alt_adj = del->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
2499 0 : fd_acct_addr_t penalty_acct = *ACCT_IDX_TO_PTR( FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( del->root ) );
2500 0 : fd_pack_penalty_treap_t * penalty_treap = penalty_map_query( pack->penalty_treaps, penalty_acct, NULL );
2501 0 : FD_TEST( penalty_treap );
2502 0 : release_tree( penalty_treap->penalty_treap, pack->signature_map, pack->pool );
2503 0 : }
2504 0 : }
2505 :
2506 0 : pack->compressed_slot_number = (ushort)(FD_PACK_SKIP_CNT+1);
2507 :
2508 0 : expq_remove_all( pack->expiration_q );
2509 :
2510 0 : acct_uses_clear( pack->acct_in_use );
2511 0 : acct_uses_clear( pack->writer_costs );
2512 :
2513 0 : penalty_map_clear( pack->penalty_treaps );
2514 :
2515 0 : FD_PACK_BITSET_CLEAR( pack->bitset_rw_in_use );
2516 0 : FD_PACK_BITSET_CLEAR( pack->bitset_w_in_use );
2517 0 : bitset_map_clear( pack->acct_to_bitset );
2518 0 : pack->bitset_avail[ 0 ] = FD_PACK_BITSET_SLOWPATH;
2519 0 : for( ulong i=0UL; i<FD_PACK_BITSET_MAX; i++ ) pack->bitset_avail[ i+1UL ] = (ushort)i;
2520 0 : pack->bitset_avail_cnt = FD_PACK_BITSET_MAX;
2521 :
2522 0 : for( ulong i=0UL; i<pack->bank_tile_cnt; i++ ) pack->use_by_bank_cnt[i] = 0UL;
2523 0 : }
2524 :
2525 :
2526 : /* If delete_full_bundle is non-zero and the transaction to delete is
2527 : part of a bundle, the rest of the bundle it is part of will be
2528 : deleted as well.
2529 : If move_from_penalty_treap is non-zero and the transaction to delete
2530 : is in the pending treap, move the best transaction in any of the
2531 : conflicting penalty treaps to the pending treap (if there is one). */
2532 : int
2533 : delete_transaction( fd_pack_t * pack,
2534 : fd_pack_ord_txn_t * containing,
2535 : int delete_full_bundle,
2536 494937 : int move_from_penalty_treap ) {
2537 :
2538 494937 : fd_txn_t * txn = TXN( containing->txn );
2539 494937 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, containing->txn->payload );
2540 494937 : fd_acct_addr_t const * alt_adj = containing->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
2541 :
2542 494937 : treap_t * root = NULL;
2543 494937 : int root_idx = containing->root;
2544 494937 : fd_pack_penalty_treap_t * penalty_treap = NULL;
2545 494937 : switch( root_idx & FD_ORD_TXN_ROOT_TAG_MASK ) {
2546 0 : case FD_ORD_TXN_ROOT_FREE: /* Should be impossible */ return 0;
2547 492254 : case FD_ORD_TXN_ROOT_PENDING: root = pack->pending; break;
2548 0 : case FD_ORD_TXN_ROOT_PENDING_VOTE: root = pack->pending_votes; break;
2549 0 : case FD_ORD_TXN_ROOT_PENDING_BUNDLE: root = pack->pending_bundles; break;
2550 2683 : case FD_ORD_TXN_ROOT_PENALTY( 0 ): {
2551 2683 : fd_acct_addr_t penalty_acct = *ACCT_IDX_TO_PTR( FD_ORD_TXN_ROOT_PENALTY_ACCT_IDX( root_idx ) );
2552 2683 : penalty_treap = penalty_map_query( pack->penalty_treaps, penalty_acct, NULL );
2553 2683 : FD_TEST( penalty_treap );
2554 2683 : root = penalty_treap->penalty_treap;
2555 2683 : break;
2556 2683 : }
2557 494937 : }
2558 :
2559 494937 : if( FD_UNLIKELY( delete_full_bundle & (root==pack->pending_bundles) ) ) {
2560 : /* When we delete, the structure of the treap may move around, but
2561 : pointers to inside the pool will remain valid */
2562 0 : fd_pack_ord_txn_t * bundle_ptrs[ FD_PACK_MAX_TXN_PER_BUNDLE-1UL ];
2563 0 : fd_pack_ord_txn_t * pool = pack->pool;
2564 0 : ulong cnt = 0UL;
2565 0 : ulong bundle_idx = RC_TO_REL_BUNDLE_IDX( containing->rewards, containing->compute_est );
2566 :
2567 : /* Iterate in both directions from the current transaction */
2568 0 : for( treap_fwd_iter_t _cur=treap_fwd_iter_next( (treap_fwd_iter_t)treap_idx_fast( containing, pool ), pool );
2569 0 : !treap_fwd_iter_done( _cur ); _cur=treap_fwd_iter_next( _cur, pool ) ) {
2570 0 : fd_pack_ord_txn_t * cur = treap_fwd_iter_ele( _cur, pool );
2571 0 : if( FD_LIKELY( bundle_idx==RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est ) ) ) {
2572 0 : bundle_ptrs[ cnt++ ] = cur;
2573 0 : } else {
2574 0 : break;
2575 0 : }
2576 0 : FD_TEST( cnt<FD_PACK_MAX_TXN_PER_BUNDLE );
2577 0 : }
2578 :
2579 0 : for( treap_rev_iter_t _cur=treap_rev_iter_next( (treap_rev_iter_t)treap_idx_fast( containing, pool ), pool );
2580 0 : !treap_rev_iter_done( _cur ); _cur=treap_rev_iter_next( _cur, pool ) ) {
2581 0 : fd_pack_ord_txn_t * cur = treap_rev_iter_ele( _cur, pool );
2582 0 : if( FD_LIKELY( bundle_idx==RC_TO_REL_BUNDLE_IDX( cur->rewards, cur->compute_est ) ) ) {
2583 0 : bundle_ptrs[ cnt++ ] = cur;
2584 0 : } else {
2585 0 : break;
2586 0 : }
2587 0 : FD_TEST( cnt<FD_PACK_MAX_TXN_PER_BUNDLE );
2588 0 : }
2589 :
2590 : /* Delete them each, setting delete_full_bundle to 0 to avoid
2591 : infinite recursion. */
2592 0 : for( ulong k=0UL; k<cnt; k++ ) delete_transaction( pack, bundle_ptrs[ k ], 0, 0 );
2593 0 : }
2594 :
2595 :
2596 494937 : if( FD_UNLIKELY( move_from_penalty_treap & (root==pack->pending) ) ) {
2597 :
2598 492254 : fd_pack_ord_txn_t * best = NULL;
2599 492254 : fd_pack_penalty_treap_t * best_penalty = NULL;
2600 :
2601 492254 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
2602 986230 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2603 493976 : fd_pack_penalty_treap_t * p_trp = penalty_map_query( pack->penalty_treaps, *ACCT_ITER_TO_PTR( iter ), NULL );
2604 493976 : if( FD_UNLIKELY( p_trp ) ) {
2605 1235 : fd_pack_ord_txn_t * best_in_trp = treap_rev_iter_ele( treap_rev_iter_init( p_trp->penalty_treap, pack->pool ), pack->pool );
2606 1235 : if( FD_UNLIKELY( !best || COMPARE_WORSE( best, best_in_trp ) ) ) {
2607 651 : best = best_in_trp;
2608 651 : best_penalty = p_trp;
2609 651 : }
2610 1235 : }
2611 493976 : }
2612 :
2613 492254 : if( FD_LIKELY( best ) ) {
2614 : /* move best to the main treap */
2615 651 : treap_ele_remove( best_penalty->penalty_treap, best, pack->pool );
2616 651 : best->root = FD_ORD_TXN_ROOT_PENDING;
2617 651 : treap_ele_insert( pack->pending, best, pack->pool );
2618 :
2619 651 : pack->pending_smallest->cus = fd_ulong_min( pack->pending_smallest->cus, best->compute_est );
2620 651 : pack->pending_smallest->bytes = fd_ulong_min( pack->pending_smallest->bytes, best->txn_e->txnp->payload_sz );
2621 :
2622 651 : if( FD_UNLIKELY( !treap_ele_cnt( best_penalty->penalty_treap ) ) ) {
2623 9 : treap_delete( treap_leave( best_penalty->penalty_treap ) );
2624 9 : penalty_map_remove( pack->penalty_treaps, best_penalty );
2625 9 : }
2626 651 : }
2627 492254 : }
2628 :
2629 494937 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_ALL );
2630 1999341 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2631 1504404 : if( FD_UNLIKELY( fd_pack_unwritable_contains( ACCT_ITER_TO_PTR( iter ) ) ) ) continue;
2632 :
2633 1009467 : release_result_t ret = release_bit_reference( pack, ACCT_ITER_TO_PTR( iter ) );
2634 1009467 : FD_PACK_BITSET_CLEARN( pack->bitset_rw_in_use, ret.clear_rw_bit );
2635 1009467 : FD_PACK_BITSET_CLEARN( pack->bitset_w_in_use, ret.clear_w_bit );
2636 1009467 : }
2637 494937 : expq_remove( pack->expiration_q, containing->expq_idx );
2638 494937 : containing->root = FD_ORD_TXN_ROOT_FREE;
2639 494937 : treap_ele_remove( root, containing, pack->pool );
2640 494937 : sig2txn_ele_remove_fast( pack->signature_map, containing, pack->pool );
2641 494937 : trp_pool_ele_release( pack->pool, containing );
2642 :
2643 494937 : pack->pending_txn_cnt--;
2644 :
2645 494937 : if( FD_UNLIKELY( penalty_treap && treap_ele_cnt( root )==0UL ) ) {
2646 0 : penalty_map_remove( pack->penalty_treaps, penalty_treap );
2647 0 : }
2648 :
2649 494937 : return 1;
2650 494937 : }
2651 :
2652 : int
2653 : fd_pack_delete_transaction( fd_pack_t * pack,
2654 57 : fd_ed25519_sig_t const * sig0 ) {
2655 57 : int cnt = 0;
2656 57 : ulong next = ULONG_MAX;
2657 57 : for( ulong idx = sig2txn_idx_query_const( pack->signature_map, (wrapped_sig_t const *)sig0, ULONG_MAX, pack->pool );
2658 90 : idx!=ULONG_MAX; idx=next ) {
2659 : /* Iterating while deleting, not just this element, but perhaps the
2660 : whole bundle, feels a bit dangerous, but is actually fine because
2661 : a bundle can't contain two transactions with the same signature.
2662 : That means we know next is not part of the same bundle as idx,
2663 : which means that deleting idx will not delete next. */
2664 33 : next = sig2txn_idx_next_const( idx, ULONG_MAX, pack->pool );
2665 33 : cnt += delete_transaction( pack, pack->pool+idx, 1, 1 );
2666 33 : }
2667 :
2668 57 : return cnt;
2669 57 : }
2670 :
2671 :
2672 : int
2673 : fd_pack_verify( fd_pack_t * pack,
2674 0 : void * scratch ) {
2675 : /* Invariants:
2676 : sig2txn_query has exact same contents as all treaps combined
2677 : root matches treap
2678 : Keys of acct_to_bitset is exactly union of all accounts in all
2679 : transactions in treaps, with ref counted appropriately
2680 : bits in bitset_avail is complement of bits allocated in
2681 : acct_to_bitset
2682 : expires_at consistent between treap, prq */
2683 :
2684 : /* TODO:
2685 : bitset_{r}w_in_use = bitset_map_query( everything in acct_in_use that doesn't have FD_PACK_IN_USE_BIT_CLEARED )
2686 : use_by_bank does not contain duplicates
2687 : use_by_bank consistent with acct_in_use
2688 : bitset_w_in_use & bitset_rw_in_use == bitset_w_in_use
2689 : elements in pool but not in a treap have root set to free
2690 : all penalty treaps have at least one transaction */
2691 0 : #define VERIFY_TEST( cond, ... ) do { \
2692 0 : if( FD_UNLIKELY( !(cond) ) ) { \
2693 0 : FD_LOG_WARNING(( __VA_ARGS__ )); \
2694 0 : return -(__LINE__); \
2695 0 : } \
2696 0 : } while( 0 )
2697 :
2698 0 : ulong max_acct_in_treap = pack->pack_depth * FD_TXN_ACCT_ADDR_MAX;
2699 0 : int lg_acct_in_trp = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_treap ) );
2700 0 : void * _bitset_map_copy = scratch;
2701 0 : void * _bitset_map_orig = bitset_map_leave( pack->acct_to_bitset );
2702 0 : fd_memcpy( _bitset_map_copy, _bitset_map_orig, bitset_map_footprint( lg_acct_in_trp ) );
2703 :
2704 0 : fd_pack_bitset_acct_mapping_t * bitset_copy = bitset_map_join( _bitset_map_copy );
2705 :
2706 : /* Check that each bit is in exactly one place */
2707 0 : FD_PACK_BITSET_DECLARE( processed ); FD_PACK_BITSET_CLEAR( processed );
2708 0 : FD_PACK_BITSET_DECLARE( bit ); FD_PACK_BITSET_CLEAR( bit );
2709 0 : FD_PACK_BITSET_DECLARE( full ); FD_PACK_BITSET_CLEAR( full );
2710 :
2711 0 : if( FD_UNLIKELY( pack->bitset_avail[0]!=FD_PACK_BITSET_SLOWPATH ) ) return -1;
2712 0 : for( ulong i=1UL; i<=pack->bitset_avail_cnt; i++ ) {
2713 0 : FD_PACK_BITSET_CLEAR( bit );
2714 0 : FD_PACK_BITSET_SETN( bit, pack->bitset_avail[ i ] );
2715 0 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, processed, processed ),
2716 0 : "bit %hu in avail set twice", pack->bitset_avail[ i ] );
2717 0 : FD_PACK_BITSET_OR( processed, bit );
2718 0 : }
2719 :
2720 0 : ulong total_references = 0UL;
2721 0 : for( ulong i=0UL; i<bitset_map_slot_cnt( bitset_copy ); i++ ) {
2722 0 : if( !bitset_map_key_inval( bitset_copy[ i ].key ) ) {
2723 0 : VERIFY_TEST( bitset_copy[ i ].ref_cnt>0UL, "account address in table with 0 ref count" );
2724 :
2725 0 : total_references += bitset_copy[ i ].ref_cnt;
2726 :
2727 0 : FD_PACK_BITSET_CLEAR( bit );
2728 0 : FD_PACK_BITSET_SETN( bit, bitset_copy[ i ].bit );
2729 0 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, processed, processed ), "bit %hu used twice", bitset_copy[ i ].bit );
2730 0 : FD_PACK_BITSET_OR( processed, bit );
2731 0 : }
2732 0 : }
2733 0 : for( ulong i=0UL; i<FD_PACK_BITSET_MAX; i++ ) {
2734 0 : FD_PACK_BITSET_CLEAR( bit );
2735 0 : FD_PACK_BITSET_SETN( bit, i );
2736 0 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, processed, processed ), "bit %lu missing", i );
2737 0 : FD_PACK_BITSET_SETN( full, i );
2738 0 : }
2739 :
2740 :
2741 0 : fd_pack_ord_txn_t * pool = pack->pool;
2742 0 : treap_t * treaps[ 2 ] = { pack->pending, pack->pending_votes };
2743 0 : ulong txn_cnt = 0UL;
2744 :
2745 0 : for( ulong k=0UL; k<2; k++ ) {
2746 0 : treap_t * treap = treaps[ k ];
2747 :
2748 0 : for( treap_rev_iter_t _cur=treap_rev_iter_init( treap, pool ); !treap_rev_iter_done( _cur );
2749 0 : _cur=treap_rev_iter_next( _cur, pool ) ) {
2750 0 : txn_cnt++;
2751 0 : fd_pack_ord_txn_t const * cur = treap_rev_iter_ele_const( _cur, pool );
2752 0 : fd_txn_t const * txn = TXN(cur->txn);
2753 0 : fd_acct_addr_t const * accts = fd_txn_get_acct_addrs( txn, cur->txn->payload );
2754 0 : fd_acct_addr_t const * alt_adj = cur->txn_e->alt_accts - fd_txn_account_cnt( txn, FD_TXN_ACCT_CAT_IMM );
2755 :
2756 0 : fd_ed25519_sig_t const * sig0 = fd_txn_get_signatures( txn, cur->txn->payload );
2757 :
2758 0 : fd_pack_ord_txn_t const * in_tbl = sig2txn_ele_query_const( pack->signature_map, (wrapped_sig_t const *)sig0, NULL, pool );
2759 0 : VERIFY_TEST( in_tbl, "signature missing from sig2txn" );
2760 0 : VERIFY_TEST( (ulong)(cur->root)==k+1, "treap element had bad root" );
2761 0 : VERIFY_TEST( cur->expires_at>=pack->expire_before, "treap element expired" );
2762 :
2763 0 : fd_pack_expq_t const * eq = pack->expiration_q + cur->expq_idx;
2764 0 : VERIFY_TEST( eq->txn==cur, "expq inconsistent" );
2765 0 : VERIFY_TEST( eq->expires_at==cur->expires_at, "expq expires_at inconsistent" );
2766 :
2767 0 : FD_PACK_BITSET_DECLARE( complement );
2768 0 : FD_PACK_BITSET_COPY( complement, full );
2769 0 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_WRITABLE );
2770 0 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2771 0 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
2772 :
2773 0 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( bitset_copy, acct, NULL );
2774 0 : VERIFY_TEST( q, "account in transaction missing from bitset mapping" );
2775 0 : VERIFY_TEST( q->ref_cnt>0UL, "account in transaction ref_cnt already 0" );
2776 0 : q->ref_cnt--;
2777 0 : total_references--;
2778 :
2779 0 : FD_PACK_BITSET_CLEAR( bit );
2780 0 : FD_PACK_BITSET_SETN( bit, q->bit );
2781 0 : if( q->bit<FD_PACK_BITSET_MAX ) {
2782 0 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, cur->rw_bitset, cur->rw_bitset ), "missing from rw bitset" );
2783 0 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, cur->w_bitset, cur->w_bitset ), "missing from w bitset" );
2784 0 : }
2785 0 : FD_PACK_BITSET_CLEARN( complement, q->bit );
2786 0 : }
2787 0 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( complement, complement, cur->w_bitset, cur->w_bitset ), "extra in w bitset" );
2788 :
2789 0 : for( fd_txn_acct_iter_t iter=fd_txn_acct_iter_init( txn, FD_TXN_ACCT_CAT_READONLY );
2790 0 : iter!=fd_txn_acct_iter_end(); iter=fd_txn_acct_iter_next( iter ) ) {
2791 :
2792 0 : fd_acct_addr_t acct = *ACCT_ITER_TO_PTR( iter );
2793 0 : if( FD_UNLIKELY( fd_pack_unwritable_contains( &acct ) ) ) continue;
2794 0 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( bitset_copy, acct, NULL );
2795 0 : VERIFY_TEST( q, "account in transaction missing from bitset mapping" );
2796 0 : VERIFY_TEST( q->ref_cnt>0UL, "account in transaction ref_cnt already 0" );
2797 0 : q->ref_cnt--;
2798 0 : total_references--;
2799 :
2800 0 : FD_PACK_BITSET_CLEAR( bit );
2801 0 : FD_PACK_BITSET_SETN( bit, q->bit );
2802 0 : if( q->bit<FD_PACK_BITSET_MAX ) {
2803 0 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, cur->rw_bitset, cur->rw_bitset ), "missing from rw bitset" );
2804 0 : }
2805 0 : FD_PACK_BITSET_CLEARN( complement, q->bit );
2806 0 : }
2807 0 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( complement, complement, cur->rw_bitset, cur->rw_bitset ), "extra in rw bitset" );
2808 0 : }
2809 0 : }
2810 :
2811 0 : bitset_map_leave( bitset_copy );
2812 :
2813 0 : VERIFY_TEST( total_references==0UL, "extra references in bitset mapping" );
2814 0 : ulong sig2txn_key_cnt = 0UL;
2815 0 : for( sig2txn_iter_t iter = sig2txn_iter_init( pack->signature_map, pool );
2816 0 : !sig2txn_iter_done( iter, pack->signature_map, pool );
2817 0 : iter = sig2txn_iter_next( iter, pack->signature_map, pool ) ) {
2818 0 : sig2txn_key_cnt++;
2819 0 : }
2820 0 : VERIFY_TEST( txn_cnt==sig2txn_key_cnt, "extra signatures in sig2txn" );
2821 :
2822 0 : bitset_map_join( _bitset_map_orig );
2823 :
2824 0 : ulong max_acct_in_flight = pack->bank_tile_cnt * (FD_TXN_ACCT_ADDR_MAX * pack->lim->max_txn_per_microblock + 1UL);
2825 0 : int lg_uses_tbl_sz = fd_ulong_find_msb( fd_ulong_pow2_up( 2UL*max_acct_in_flight ) );
2826 :
2827 0 : void * _acct_in_use_copy = scratch;
2828 0 : void * _acct_in_use_orig = acct_uses_leave( pack->acct_in_use );
2829 0 : fd_memcpy( _acct_in_use_copy, _acct_in_use_orig, acct_uses_footprint( lg_uses_tbl_sz ) );
2830 :
2831 0 : fd_pack_addr_use_t * acct_in_use_copy = acct_uses_join( _acct_in_use_copy );
2832 :
2833 0 : FD_PACK_BITSET_DECLARE( w_complement );
2834 0 : FD_PACK_BITSET_DECLARE( rw_complement );
2835 0 : FD_PACK_BITSET_COPY( w_complement, full );
2836 0 : FD_PACK_BITSET_COPY( rw_complement, full );
2837 :
2838 0 : FD_PACK_BITSET_DECLARE( rw_bitset ); FD_PACK_BITSET_COPY( rw_bitset, pack->bitset_rw_in_use );
2839 0 : FD_PACK_BITSET_DECLARE( w_bitset ); FD_PACK_BITSET_COPY( w_bitset, pack->bitset_w_in_use );
2840 :
2841 :
2842 0 : ulong const EMPTY_MASK = ~(FD_PACK_IN_USE_WRITABLE | FD_PACK_IN_USE_BIT_CLEARED);
2843 :
2844 0 : for( ulong bank=0UL; bank<pack->bank_tile_cnt; bank++ ) {
2845 :
2846 0 : fd_pack_addr_use_t const * base = pack->use_by_bank[ bank ];
2847 0 : ulong bank_mask = 1UL << bank;
2848 :
2849 0 : for( ulong i=0UL; i<pack->use_by_bank_cnt[ bank ]; i++ ) {
2850 0 : fd_pack_addr_use_t * use = acct_uses_query( acct_in_use_copy, base[i].key, NULL );
2851 0 : VERIFY_TEST( use, "acct in use by bank not in acct_in_use, or in uses_by_bank twice" );
2852 :
2853 0 : VERIFY_TEST( use->in_use_by & bank_mask, "acct in uses_by_bank doesn't have corresponding bit set in acct_in_use, or it was in the list twice" );
2854 :
2855 0 : fd_pack_bitset_acct_mapping_t * q = bitset_map_query( pack->acct_to_bitset, base[i].key, NULL );
2856 : /* The normal case is that the acct->bit mapping is preserved
2857 : while in use by other transactions in the pending list. This
2858 : might not always happen though. It's okay for the mapping to
2859 : get deleted while the acct is in use, which is noted with
2860 : BIT_CLEARED. If that is set, the mapping may not exist, or it
2861 : may have been re-created, perhaps with a different bit. */
2862 0 : if( q==NULL ) VERIFY_TEST( use->in_use_by & FD_PACK_IN_USE_BIT_CLEARED, "acct in use not in acct_to_bitset, but not marked as cleared" );
2863 0 : else if( !(use->in_use_by & FD_PACK_IN_USE_BIT_CLEARED) ) {
2864 0 : FD_PACK_BITSET_CLEAR( bit );
2865 0 : FD_PACK_BITSET_SETN( bit, q->bit );
2866 0 : if( q->bit<FD_PACK_BITSET_MAX ) {
2867 0 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, rw_bitset, rw_bitset ), "missing from rw bitset" );
2868 0 : if( use->in_use_by & FD_PACK_IN_USE_WRITABLE ) {
2869 0 : VERIFY_TEST( !FD_PACK_BITSET_INTERSECT4_EMPTY( bit, bit, w_bitset, w_bitset ), "missing from w bitset" );
2870 0 : FD_PACK_BITSET_CLEARN( w_complement, q->bit );
2871 0 : }
2872 0 : }
2873 0 : FD_PACK_BITSET_CLEARN( rw_complement, q->bit );
2874 0 : }
2875 0 : if( use->in_use_by & FD_PACK_IN_USE_WRITABLE ) VERIFY_TEST( (use->in_use_by & EMPTY_MASK)==bank_mask, "writable, but in use by multiple" );
2876 :
2877 0 : use->in_use_by &= ~bank_mask;
2878 0 : if( !(use->in_use_by & EMPTY_MASK) ) acct_uses_remove( acct_in_use_copy, use );
2879 0 : }
2880 0 : }
2881 0 : VERIFY_TEST( acct_uses_key_cnt( acct_in_use_copy )==0UL, "stray uses in acct_in_use" );
2882 0 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( rw_complement, rw_complement, rw_bitset, rw_bitset ), "extra in rw bitset" );
2883 0 : VERIFY_TEST( FD_PACK_BITSET_INTERSECT4_EMPTY( w_complement, w_complement, w_bitset, w_bitset ), "extra in w bitset" );
2884 :
2885 0 : acct_uses_leave( acct_in_use_copy );
2886 :
2887 0 : acct_uses_join( _acct_in_use_orig );
2888 0 : return 0;
2889 0 : }
2890 :
2891 0 : void * fd_pack_leave ( fd_pack_t * pack ) { FD_COMPILER_MFENCE(); return (void *)pack; }
2892 0 : void * fd_pack_delete( void * mem ) { FD_COMPILER_MFENCE(); return mem; }
|