LCOV - code coverage report
Current view: top level - util/alloc - fd_alloc.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 490 553 88.6 %
Date: 2024-11-13 11:58:15 Functions: 31 34 91.2 %

          Line data    Source code
       1             : #include "fd_alloc.h"
       2             : #include "fd_alloc_cfg.h"
       3             : #include "../sanitize/fd_asan.h"
       4             : /* Note: this will still compile on platforms without FD_HAS_ATOMIC.  It
       5             :    should only be used single threaded in those use cases.  (The code
       6             :    does imitate at a very low level the operations required by
       7             :    FD_HAS_ATOMIC but this is to minimize amount of code differences to
       8             :    test.) */
       9             : 
      10             : /* sizeclass APIs *****************************************************/
      11             : 
      12             : /* fd_alloc_preferred_sizeclass returns the tightest fitting sizeclass
      13             :    for the given footprint.  The caller promises there is at least one
      14             :    possible size class (i.e. that footprint is in
      15             :    [0,FD_ALLOC_FOOTPRINT_SMALL_THRESH].  The return will be in
      16             :    [0,FD_ALLOC_SIZECLASS_CNT). */
      17             : 
      18             : FD_STATIC_ASSERT( 64UL<FD_ALLOC_SIZECLASS_CNT && FD_ALLOC_SIZECLASS_CNT<=128UL, limits );
      19             : 
      20             : static inline ulong
      21   170946714 : fd_alloc_preferred_sizeclass( ulong footprint ) {
      22   170946714 :   ulong l = 0UL;
      23   170946714 :   ulong h = FD_ALLOC_SIZECLASS_CNT-1UL;
      24             : 
      25             :   /* Fixed count loop without early exit to make it easy for compiler to
      26             :      unroll and nominally eliminate all branches for fast, highly
      27             :      deterministic performance with no consumption of BTB resources.
      28             :      Note that 7 assumes 64<=FD_ALLOC_SIZECLASS_CNT-1<128.  FIXME: check
      29             :      the compiler is doing the right thing here with unrolling and
      30             :      branch elimination. */
      31             : 
      32  1367573712 :   for( ulong r=0UL; r<7UL; r++ ) {
      33             : 
      34             :     /* At this point sizeclasses<l are known to be inadequate and
      35             :        sizeclasses>=h are known to be suitable.  Sizeclasses in [l,h)
      36             :        have not been tested. */
      37             : 
      38  1196626998 :     ulong m = (l+h)>>1; /* Note: no overflow for reasonable sizeclass_cnt and l<=m<=h */
      39  1196626998 :     int   c = (((ulong)fd_alloc_sizeclass_cfg[ m ].block_footprint)>=footprint);
      40  1196626998 :     l = fd_ulong_if( c, l, m+1UL ); /* cmov */
      41  1196626998 :     h = fd_ulong_if( c, m, h     ); /* cmov */
      42  1196626998 :   }
      43             : 
      44   170946714 :   return h;
      45   170946714 : }
      46             : 
      47             : /* fd_alloc_preferred_sizeclass_cgroup returns preferred sizeclass
      48             :    concurrency group for a join with the given cgroup_idx.  The caller
      49             :    promises sizeclass is in [0,FD_ALLOC_SIZECLASS_CNT). */
      50             : 
      51             : static inline ulong
      52             : fd_alloc_preferred_sizeclass_cgroup( ulong sizeclass,
      53   182582613 :                                      ulong cgroup_idx ) {
      54   182582613 :   return cgroup_idx & (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].cgroup_mask;
      55   182582613 : }
      56             : 
      57             : /* fd_alloc_block_set *************************************************/
      58             : 
      59             : /* A fd_alloc_block_set specifies a set of blocks in a superblock. */
      60             : 
      61             : #define SET_NAME fd_alloc_block_set
      62             : #define SET_TYPE ulong
      63             : #define SET_MAX  64
      64             : #include "../tmpl/fd_smallset.c"
      65             : 
      66             : /* fd_alloc_block_set_{add,sub} inserts / removes blocks to / from
      67             :    block set pointed to by set.  The caller promises that blocks are not
      68             :    / are already in the block set.  This operation is a compiler fence.
      69             :    Further, if FD_HAS_ATOMIC, this operation is done atomically.
      70             :    Returns the value of the block_set just before the operation.  Note:
      71             :    atomic add/sub has slightly better asm on x86 than atomic or/and/nand
      72             :    (compiler quality issue, not an architecture issue) and generates the
      73             :    same results provided the caller promises are met. */
      74             : 
      75             : #if FD_HAS_ATOMIC
      76             : 
      77             : static inline fd_alloc_block_set_t
      78             : fd_alloc_block_set_add( fd_alloc_block_set_t * set,
      79   170814462 :                         fd_alloc_block_set_t   blocks ) {
      80   170814462 :   fd_alloc_block_set_t ret;
      81   170814462 :   FD_COMPILER_MFENCE();
      82   170814462 :   ret = FD_ATOMIC_FETCH_AND_ADD( set, blocks );
      83   170814462 :   FD_COMPILER_MFENCE();
      84   170814462 :   return ret;
      85   170814462 : }
      86             : 
      87             : static inline fd_alloc_block_set_t
      88             : fd_alloc_block_set_sub( fd_alloc_block_set_t * set,
      89   170946714 :                         fd_alloc_block_set_t   blocks ) {
      90   170946714 :   fd_alloc_block_set_t ret;
      91   170946714 :   FD_COMPILER_MFENCE();
      92   170946714 :   ret = FD_ATOMIC_FETCH_AND_SUB( set, blocks );
      93   170946714 :   FD_COMPILER_MFENCE();
      94   170946714 :   return ret;
      95   170946714 : }
      96             : 
      97             : #else
      98             : 
      99             : static inline fd_alloc_block_set_t
     100             : fd_alloc_block_set_add( fd_alloc_block_set_t * set,
     101             :                         fd_alloc_block_set_t   blocks ) {
     102             :   fd_alloc_block_set_t ret;
     103             :   FD_COMPILER_MFENCE();
     104             :   ret = FD_VOLATILE_CONST( *set );
     105             :   FD_VOLATILE( *set ) = ret + blocks;
     106             :   FD_COMPILER_MFENCE();
     107             :   return ret;
     108             : }
     109             : 
     110             : static inline fd_alloc_block_set_t
     111             : fd_alloc_block_set_sub( fd_alloc_block_set_t * set,
     112             :                         fd_alloc_block_set_t   blocks ) {
     113             :   fd_alloc_block_set_t ret;
     114             :   FD_COMPILER_MFENCE();
     115             :   ret = FD_VOLATILE_CONST( *set );
     116             :   FD_VOLATILE( *set ) = ret - blocks;
     117             :   FD_COMPILER_MFENCE();
     118             :   return ret;
     119             : }
     120             : 
     121             : #endif
     122             : 
     123             : /* fd_alloc_superblock ************************************************/
     124             : 
     125     1867701 : #define FD_ALLOC_SUPERBLOCK_ALIGN (16UL) /* Guarantees free_blocks and next_gaddr aligned are on the same cache line */
     126             : 
     127             : struct __attribute__((aligned(FD_ALLOC_SUPERBLOCK_ALIGN))) fd_alloc_superblock {
     128             :   fd_alloc_block_set_t free_blocks; /* which blocks in this superblock are allocated */
     129             :   ulong                next_gaddr;  /* if on the inactive superblock stack, next inactive superblock or NULL, ignored o.w. */
     130             :   /* FIXME: could put a cgroup hint in the lsb of next gaddr given
     131             :      it is already aligned 16. */
     132             :   /* Storage for blocks follows */
     133             : };
     134             : 
     135             : typedef struct fd_alloc_superblock fd_alloc_superblock_t;
     136             : 
     137             : /* fd_alloc ***********************************************************/
     138             : 
     139             : /* FD_ALLOC_MAGIC is an ideally unique number that specifies the precise
     140             :    memory layout of a fd_alloc */
     141             : 
     142             : #if FD_HAS_X86 /* Technically platforms with 16B compare-exchange */
     143             : 
     144      219366 : #define FD_ALLOC_MAGIC (0xF17EDA2C37A110C1UL) /* FIRE DANCER ALLOC version 1 */
     145             : 
     146             : #define VOFF_NAME      fd_alloc_vgaddr
     147             : #define VOFF_TYPE      uint128
     148   149842986 : #define VOFF_VER_WIDTH 64
     149             : #include "../tmpl/fd_voff.c"
     150             : 
     151             : #else /* Platforms without 16B compare-exchange */
     152             : 
     153             : #define FD_ALLOC_MAGIC (0xF17EDA2C37A110C0UL) /* FIRE DANCER ALLOC version 0 */
     154             : 
     155             : /* TODO: Overaligning superblocks on these targets to get a wider
     156             :    version width */
     157             : 
     158             : #define VOFF_NAME      fd_alloc_vgaddr
     159             : #define VOFF_TYPE      ulong
     160             : #define VOFF_VER_WIDTH 4
     161             : #include "../tmpl/fd_voff.c"
     162             : 
     163             : #endif
     164             : 
     165             : struct __attribute__((aligned(FD_ALLOC_ALIGN))) fd_alloc {
     166             :   ulong magic;    /* ==FD_ALLOC_MAGIC */
     167             :   ulong wksp_off; /* Offset of the first byte of this structure from the start of the wksp */
     168             :   ulong tag;      /* tag that will be used by this allocator.  Positive. */
     169             : 
     170             :   /* Padding to 128 byte alignment here */
     171             : 
     172             :   /* active_slot[ sizeclass + FD_ALLOC_SIZECLASS_CNT*cgroup ] is the
     173             :      gaddr of the active superblock for sizeclass allocations done by a
     174             :      member of concurrency group cgroup or 0 if there is no active
     175             :      superblock currently for that sizeclass,cgroup pair.
     176             : 
     177             :      inactive_stack[ sizeclass ] is top of inactive stack for sizeclass
     178             :      or 0 if the stack is empty.  This is versioned offset with a 64-bit
     179             :      version number in the least significant bits and a 64-bit gaddr in
     180             :      the upper bits.  At 64-bits wide, note that version number reuse
     181             :      will not occur on any human timescales.  (FIXME: consider using a
     182             :      ulong to be more portable to platforms without 128-bit CAS support.
     183             :      E.g. 20-bit version and a 44-bit gaddr and restricting maximum
     184             :      supported workspace to ~16 TiB.)
     185             : 
     186             :      Note that this is compact but organized such that concurrent
     187             :      operations from different cgroups are unlikely to create false
     188             :      sharing. */
     189             : 
     190             :   ulong active_slot[ FD_ALLOC_SIZECLASS_CNT*(FD_ALLOC_JOIN_CGROUP_HINT_MAX+1UL) ] __attribute__((aligned(128UL)));
     191             : 
     192             :   /* Padding to 128 byte alignment here */
     193             : 
     194             :   fd_alloc_vgaddr_t inactive_stack[ FD_ALLOC_SIZECLASS_CNT ] __attribute__((aligned(128UL)));
     195             : };
     196             : 
     197             : /* fd_alloc_private_wksp returns the wksp backing alloc.  Assumes alloc
     198             :    is a non-NULL pointer in the caller's address space to the fd_alloc
     199             :    (not a join handle). */
     200             : 
     201             : FD_FN_PURE static inline fd_wksp_t *
     202   293750658 : fd_alloc_private_wksp( fd_alloc_t * alloc ) {
     203   293750658 :   return (fd_wksp_t *)(((ulong)alloc) - alloc->wksp_off);
     204   293750658 : }
     205             : 
     206             : /* fd_alloc_private_active_slot_replace replaces the value currently in
     207             :    the slot pointed to by active_slot with new_superblock_gaddr and
     208             :    returns the superblock_gaddr previously in there.  This is a compiler
     209             :    fence.  If FD_HAS_ATOMIC, this will be done atomically. */
     210             : 
     211             : static inline ulong
     212             : fd_alloc_private_active_slot_replace( ulong * active_slot,
     213   784324590 :                                       ulong   new_superblock_gaddr ) {
     214   784324590 :   ulong old_superblock_gaddr;
     215   784324590 :   FD_COMPILER_MFENCE();
     216   784324590 : # if FD_HAS_ATOMIC
     217   784324590 :   old_superblock_gaddr = FD_ATOMIC_XCHG( active_slot, new_superblock_gaddr );
     218             : # else
     219             :   old_superblock_gaddr = FD_VOLATILE_CONST( *active_slot );
     220             :   FD_VOLATILE( *active_slot ) = new_superblock_gaddr;
     221             : # endif
     222   784324590 :   FD_COMPILER_MFENCE();
     223   784324590 :   return old_superblock_gaddr;
     224   784324590 : }
     225             : 
     226             : /* fd_alloc_private_inactive_stack_push pushes superblock at the
     227             :    workspace global address superblock_gaddr in workspace wksp onto the
     228             :    stack inactive stack.  This is a compiler fence.  If FD_HAS_ATOMIC,
     229             :    this will be done atomically. */
     230             : 
     231             : static inline void
     232             : fd_alloc_private_inactive_stack_push( fd_alloc_vgaddr_t * inactive_stack,
     233             :                                       fd_wksp_t *         wksp,
     234     1366200 :                                       ulong               superblock_gaddr ) {
     235     1366200 :   fd_alloc_superblock_t * superblock = (fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, superblock_gaddr );
     236             : 
     237     1366200 :   for(;;) {
     238             : 
     239             :     /* Read the top of the inactive stack. */
     240             : 
     241     1366200 :     fd_alloc_vgaddr_t old;
     242     1366200 :     FD_COMPILER_MFENCE();
     243     1366200 :     old = FD_VOLATILE_CONST( *inactive_stack );
     244     1366200 :     FD_COMPILER_MFENCE();
     245             : 
     246     1366200 :     ulong top_ver   = (ulong)fd_alloc_vgaddr_ver( old );
     247     1366200 :     ulong top_gaddr = (ulong)fd_alloc_vgaddr_off( old );
     248             : 
     249             :     /* Try to push the top of the inactive stack */
     250             : 
     251     1366200 :     fd_alloc_vgaddr_t new = fd_alloc_vgaddr( top_ver+1UL, superblock_gaddr );
     252             : 
     253     1366200 :     FD_COMPILER_MFENCE();
     254     1366200 :     FD_VOLATILE( superblock->next_gaddr ) = top_gaddr;
     255     1366200 :     FD_COMPILER_MFENCE();
     256             : 
     257     1366200 : #   if FD_HAS_ATOMIC
     258     1366200 :     if( FD_LIKELY( FD_ATOMIC_CAS( inactive_stack, old, new )==old ) ) break;
     259             : #   else
     260             :     if( FD_LIKELY( FD_VOLATILE_CONST( *inactive_stack )==old ) ) { FD_VOLATILE( *inactive_stack ) = new; break; }
     261             : #   endif
     262             : 
     263             :     /* Hmmm ... that failed ... try again */
     264             : 
     265           0 :     FD_SPIN_PAUSE();
     266           0 :   }
     267             : 
     268     1366200 :   FD_COMPILER_MFENCE();
     269     1366200 : }
     270             : 
     271             : /* fd_alloc_private_inactive_stack_pop pops superblock off the top of
     272             :    the inactive stack.  Returns the non-zero wksp superblock gaddr of
     273             :    the popped stack top on success or 0 on failure (i.e. inactive stack
     274             :    is empty).  This is a compiler fence.  If FD_HAS_ATOMIC, this will be
     275             :    done atomically. */
     276             : 
     277             : static inline ulong
     278             : fd_alloc_private_inactive_stack_pop( fd_alloc_vgaddr_t * inactive_stack,
     279   140176095 :                                      fd_wksp_t *         wksp ) {
     280   140176095 :   ulong top_gaddr;
     281             : 
     282   140176095 :   for(;;) {
     283             : 
     284             :     /* Read the top of the inactive stack.  Return if the inactive stack
     285             :        is empty. */
     286             : 
     287   140176095 :     fd_alloc_vgaddr_t old;
     288   140176095 :     FD_COMPILER_MFENCE();
     289   140176095 :     old = FD_VOLATILE_CONST( *inactive_stack );
     290   140176095 :     FD_COMPILER_MFENCE();
     291             : 
     292             :     /**/  top_gaddr = (ulong)fd_alloc_vgaddr_off( old );
     293   140176095 :     ulong top_ver   = (ulong)fd_alloc_vgaddr_ver( old );
     294   140176095 :     if( FD_UNLIKELY( !top_gaddr ) ) break;
     295             : 
     296             :     /* Try to pop the top of the inactive stack. */
     297             : 
     298     1366173 :     fd_alloc_superblock_t * top = (fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, top_gaddr );
     299             : 
     300     1366173 :     ulong next_gaddr;
     301     1366173 :     FD_COMPILER_MFENCE();
     302     1366173 :     next_gaddr = FD_VOLATILE_CONST( top->next_gaddr );
     303     1366173 :     FD_COMPILER_MFENCE();
     304             : 
     305     1366173 :     fd_alloc_vgaddr_t new = fd_alloc_vgaddr( top_ver+1UL, next_gaddr );
     306             : 
     307     1366173 : #   if FD_HAS_ATOMIC
     308     1366173 :     if( FD_LIKELY( FD_ATOMIC_CAS( inactive_stack, old, new )==old ) ) break;
     309             : #   else
     310             :     if( FD_LIKELY( FD_VOLATILE_CONST( *inactive_stack )==old ) ) { FD_VOLATILE( *inactive_stack ) = new; break; }
     311             : #   endif
     312             : 
     313             :     /* Hmmm ... that failed ... try again */
     314             : 
     315           0 :     FD_SPIN_PAUSE();
     316           0 :   }
     317             : 
     318   140176095 :   FD_COMPILER_MFENCE();
     319             : 
     320   140176095 :   return top_gaddr;
     321   140176095 : }
     322             : 
     323             : /* fd_alloc_hdr_t *****************************************************/
     324             : 
     325             : /* An fd_alloc_hdr_t is a small header prepended to an allocation that
     326             :    describes the allocation.  Because fd_alloc supports arbitrary
     327             :    allocation alignments, these headers might be stored at unaligned
     328             :    positions. */
     329             : 
     330             : typedef uint fd_alloc_hdr_t;
     331             : 
     332             : /* FD_ALLOC_HDR_LARGE_* give the fd_alloc_hdr_t used for mallocs done by
     333             :    the allocator of last resort (i.e. fd_wksp_alloc).  The least
     334             :    significant 7 bits must be FD_ALLOC_SIZECLASS_LARGE so that free can
     335             :    detect an allocation is done by that allocator.  The most significant
     336             :    24 bits are a magic number to help with analytics.  Bit 7 indicates
     337             :    whether this allocation is direct user allocation or holds a
     338             :    superblock that aggregates many small allocations together. */
     339             : 
     340           0 : #define FD_ALLOC_HDR_LARGE_DIRECT     ((fd_alloc_hdr_t)(0xFDA11C00U | (fd_alloc_hdr_t)FD_ALLOC_SIZECLASS_LARGE))
     341             : #define FD_ALLOC_HDR_LARGE_SUPERBLOCK ((fd_alloc_hdr_t)(0xFDA11C80U | (fd_alloc_hdr_t)FD_ALLOC_SIZECLASS_LARGE))
     342             : 
     343             : /* fd_alloc_hdr_load loads the header for the allocation whose first
     344             :    byte is at laddr in the caller's address space.  The header will be
     345             :    observed at some point of time between when this call was made and
     346             :    returned and this implies that the allocation at laddr must be at
     347             :    least until the caller stops using the header. */
     348             : 
     349             : FD_FN_PURE static inline fd_alloc_hdr_t
     350   171603138 : fd_alloc_hdr_load( void const * laddr ) {
     351   171603138 :   return FD_LOAD( fd_alloc_hdr_t, ((ulong)laddr) - sizeof(fd_alloc_hdr_t) );
     352   171603138 : }
     353             : 
     354             : /* fd_alloc_hdr_sizeclass returns the sizeclass of an allocation
     355             :    described by hdr.  This will be in [0,FD_ALLOC_SIZECLASS_CNT) or
     356             :    FD_ALLOC_SIZECLASS_LARGE.  sizeclass==FD_ALLOC_SIZECLASS_LARGE
     357             :    indicates that the allocation was done via fd_wksp_alloc directly.
     358             :    Small allocations are clustered into superblocks for performance and
     359             :    packing efficiency.  All allocations in a superblock are from the
     360             :    same sizeclass. */
     361             : 
     362   171609420 : FD_FN_CONST static inline ulong fd_alloc_hdr_sizeclass( fd_alloc_hdr_t hdr ) { return (ulong)(hdr & 127U); }
     363             : 
     364             : /* fd_alloc_hdr_{superblock,block} return details about a small
     365             :    allocation whose first byte is at laddr in the caller's address space
     366             :    and is described by hdr.  In particular:
     367             : 
     368             :    fd_alloc_hdr_superblock( hdr, laddr ) returns the location in the
     369             :    caller's address space of the superblock containing the allocation.
     370             :    The lifetime of a superblock is at least as long as the allocation at
     371             :    laddr.
     372             : 
     373             :    fd_alloc_hdr_block_idx( hdr ) returns the superblock block index of the
     374             :    block containing the allocation.  Note that this will be in:
     375             :      [0,fd_alloc_sizeclass_block_cnt(sizeclass))
     376             :    and that:
     377             :      fd_alloc_sizeclass_block_cnt(sizeclass)<=fd_alloc_block_set_max()<=64. */
     378             : 
     379             : FD_FN_CONST static inline fd_alloc_superblock_t *
     380             : fd_alloc_hdr_superblock( fd_alloc_hdr_t hdr,
     381   170815200 :                          void *         laddr ) {
     382   170815200 :   return (fd_alloc_superblock_t *)(((ulong)laddr) - ((ulong)(hdr >> 13)));
     383   170815200 : }
     384             : 
     385   170815209 : FD_FN_CONST static inline ulong fd_alloc_hdr_block_idx( fd_alloc_hdr_t hdr ) { return (ulong)((hdr >> 7) & 63U); }
     386             : 
     387             : /* fd_alloc_hdr_is_large returns 1 if hdr is for a large allocation and
     388             :    0 if not.  fd_alloc_hdr_sizeclass( hdr )==FD_ALLOC_SIZECLASS_LARGE
     389             :    also works but does not test the magic number in the most significant
     390             :    bits. */
     391             : 
     392             : FD_FN_CONST static inline int
     393           0 : fd_alloc_hdr_is_large( fd_alloc_hdr_t hdr ) {
     394           0 :   return fd_uint_clear_bit( hdr, 7 )==FD_ALLOC_HDR_LARGE_DIRECT;
     395           0 : }
     396             : 
     397             : /* fd_alloc_hdr_large_is_superblock returns 1 if hdr (which is assumed
     398             :    to be for a large allocation) is for a large allocation that holds a
     399             :    superblock. */
     400             : 
     401             : FD_FN_CONST static inline int
     402           0 : fd_alloc_hdr_large_is_superblock( fd_alloc_hdr_t hdr ) {
     403           0 :   return fd_uint_extract_bit( hdr, 7 );
     404           0 : }
     405             : 
     406             : /* fd_alloc_hdr_store stores a fd_alloc_hdr_t describing a small
     407             :    sizeclass allocation contained within block of superblock in the
     408             :    sizeof(fd_alloc_hdr_t) bytes immediately preceding the byte pointed
     409             :    to by laddr in the caller's address space.  The caller promises that
     410             :    these bytes are somewhere within the block.
     411             : 
     412             :    fd_alloc_hdr_store_large similarly stores a fd_alloc_hdr_t describing
     413             :    a large allocation. */
     414             : 
     415             : static inline void *
     416             : fd_alloc_hdr_store( void *                  laddr,
     417             :                     fd_alloc_superblock_t * superblock,
     418             :                     ulong                   block_idx,
     419   170946714 :                     ulong                   sizeclass ) {
     420   170946714 :   FD_STORE( fd_alloc_hdr_t, ((ulong)laddr) - sizeof(fd_alloc_hdr_t),
     421   170946714 :             (fd_alloc_hdr_t)( ((((ulong)laddr) - ((ulong)superblock)) << 13) |    /* Bits 31:13 - 19 bit: superblock offset */
     422   170946714 :                               (block_idx                              <<  7) |    /* Bits 12: 7 -  6 bit: block */
     423   170946714 :                               sizeclass                                      ) ); /* Bits  6: 0 -  7 bit: sizeclass */
     424   170946714 :   return laddr;
     425   170946714 : }
     426             : 
     427             : static inline void *
     428             : fd_alloc_hdr_store_large( void * laddr,
     429      788988 :                           int    is_superblock ) { /* in [0,1] */
     430      788988 :   FD_STORE( fd_alloc_hdr_t, ((ulong)laddr) - sizeof(fd_alloc_hdr_t),
     431      788988 :             FD_ALLOC_HDR_LARGE_DIRECT | (((fd_alloc_hdr_t)is_superblock) << 7) );
     432      788988 :   return laddr;
     433      788988 : }
     434             : 
     435             : /* Misc ***************************************************************/
     436             : 
     437             : /* fd_alloc_private_join_alloc returns the local address of the alloc
     438             :    for a join. */
     439             : 
     440             : FD_FN_CONST fd_alloc_t *
     441   344311854 : fd_alloc_private_join_alloc( fd_alloc_t * join ) {
     442   344311854 :   return (fd_alloc_t *)(((ulong)join) & ~FD_ALLOC_JOIN_CGROUP_HINT_MAX);
     443   344311854 : }
     444             : 
     445             : /* Constructors *******************************************************/
     446             : 
     447             : ulong
     448      219366 : fd_alloc_align( void ) {
     449      219366 :   return alignof(fd_alloc_t);
     450      219366 : }
     451             : 
     452             : ulong
     453      219366 : fd_alloc_footprint( void ) {
     454      219366 :   return sizeof(fd_alloc_t);
     455      219366 : }
     456             : 
     457             : ulong
     458           0 : fd_alloc_superblock_footprint(void){
     459           0 :   return sizeof(fd_alloc_superblock_t);
     460           0 : }
     461             : 
     462             : void *
     463             : fd_alloc_new( void * shmem,
     464      219378 :               ulong  tag ) {
     465             : 
     466             :   /* Check input arguments */
     467             : 
     468      219378 :   if( FD_UNLIKELY( !shmem ) ) {
     469           3 :     FD_LOG_WARNING(( "NULL shmem" ));
     470           3 :     return NULL;
     471           3 :   }
     472             : 
     473      219375 :   if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shmem, alignof(fd_alloc_t) ) ) ) {
     474           3 :     FD_LOG_WARNING(( "misaligned shmem" ));
     475           3 :     return NULL;
     476           3 :   }
     477             : 
     478      219372 :   fd_wksp_t * wksp = fd_wksp_containing( shmem );
     479      219372 :   if( FD_UNLIKELY( !wksp ) ) {
     480           3 :     FD_LOG_WARNING(( "shmem must be in a workspace" ));
     481           3 :     return NULL;
     482           3 :   }
     483             : 
     484      219369 :   if( FD_UNLIKELY( !tag ) ) {
     485           3 :     FD_LOG_WARNING(( "bad tag" ));
     486           3 :     return NULL;
     487           3 :   }
     488             : 
     489      219366 :   fd_alloc_t * alloc = (fd_alloc_t *)shmem;
     490      219366 :   fd_memset( alloc, 0, sizeof(fd_alloc_t) );
     491             : 
     492      219366 :   alloc->wksp_off = (ulong)alloc - (ulong)wksp;
     493      219366 :   alloc->tag      = tag;
     494             : 
     495      219366 :   FD_COMPILER_MFENCE();
     496      219366 :   FD_VOLATILE( alloc->magic ) = FD_ALLOC_MAGIC;
     497      219366 :   FD_COMPILER_MFENCE();
     498             : 
     499      219366 :   return shmem;
     500      219369 : }
     501             : 
     502             : fd_alloc_t *
     503             : fd_alloc_join( void * shalloc,
     504      219414 :                ulong  cgroup_hint ) {
     505      219414 :   fd_alloc_t * alloc = shalloc;
     506             : 
     507      219414 :   if( FD_UNLIKELY( !alloc ) ) {
     508           3 :     FD_LOG_WARNING(( "NULL shalloc" ));
     509           3 :     return NULL;
     510           3 :   }
     511             : 
     512      219411 :   if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)alloc, alignof(fd_alloc_t) ) ) ) {
     513           3 :     FD_LOG_WARNING(( "misaligned shalloc" ));
     514           3 :     return NULL;
     515           3 :   }
     516             : 
     517      219408 :   if( FD_UNLIKELY( alloc->magic!=FD_ALLOC_MAGIC ) ) {
     518           3 :     FD_LOG_WARNING(( "bad magic" ));
     519           3 :     return NULL;
     520           3 :   }
     521      219405 :   return fd_alloc_join_cgroup_hint_set( alloc, cgroup_hint );
     522      219408 : }
     523             : 
     524             : void *
     525      219102 : fd_alloc_leave( fd_alloc_t * join ) {
     526             : 
     527      219102 :   if( FD_UNLIKELY( !join ) ) {
     528           3 :     FD_LOG_WARNING(( "NULL join" ));
     529           3 :     return NULL;
     530           3 :   }
     531      219099 :   return fd_alloc_private_join_alloc( join );
     532      219102 : }
     533             : 
     534             : void *
     535      219069 : fd_alloc_delete( void * shalloc ) {
     536             : 
     537      219069 :   if( FD_UNLIKELY( !shalloc ) ) {
     538           3 :     FD_LOG_WARNING(( "NULL shalloc" ));
     539           3 :     return NULL;
     540           3 :   }
     541             : 
     542      219066 :   if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shalloc, alignof(fd_alloc_t) ) ) ) {
     543           3 :     FD_LOG_WARNING(( "misaligned shalloc" ));
     544           3 :     return NULL;
     545           3 :   }
     546             : 
     547      219063 :   fd_alloc_t * alloc = (fd_alloc_t *)shalloc;
     548             : 
     549      219063 :   if( FD_UNLIKELY( alloc->magic!=FD_ALLOC_MAGIC ) ) {
     550           3 :     FD_LOG_WARNING(( "bad magic" ));
     551           3 :     return NULL;
     552           3 :   }
     553             : 
     554      219060 :   FD_COMPILER_MFENCE();
     555      219060 :   FD_VOLATILE( alloc->magic ) = 0UL;
     556      219060 :   FD_COMPILER_MFENCE();
     557             : 
     558             :   /* Clean up as much as we can.  For each sizeclass, delete all active
     559             :      superblocks and all inactive superblocks.  We will not be able
     560             :      cleanup any superblocks that are fully allocated (and any
     561             :      outstanding large allocations) as we rely on the application to
     562             :      implicitly track them (because they nominally will eventually call
     563             :      free on them).  So hopefully the application did a good job and
     564             :      cleaned up all their outstanding stuff before calling delete. */
     565             : 
     566      219060 :   fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
     567             : 
     568    27820620 :   for( ulong sizeclass=0UL; sizeclass<FD_ALLOC_SIZECLASS_CNT; sizeclass++ ) {
     569             : 
     570    27601560 :     ulong cgroup_cnt = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].cgroup_mask + 1UL;
     571   469226520 :     for( ulong cgroup_idx=0UL; cgroup_idx<cgroup_cnt; cgroup_idx++ ) {
     572   441624960 :       ulong superblock_gaddr =
     573   441624960 :         fd_alloc_private_active_slot_replace( alloc->active_slot + sizeclass + FD_ALLOC_SIZECLASS_CNT*cgroup_idx, 0UL );
     574   441624960 :       if( FD_UNLIKELY( superblock_gaddr ) )
     575     1427424 :         fd_alloc_free( alloc, (fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, superblock_gaddr ) );
     576   441624960 :     }
     577             : 
     578    27601560 :     fd_alloc_vgaddr_t * inactive_stack = alloc->inactive_stack + sizeclass;
     579    27601575 :     for(;;) {
     580    27601575 :       ulong superblock_gaddr = fd_alloc_private_inactive_stack_pop( inactive_stack, wksp );
     581    27601575 :       if( FD_LIKELY( !superblock_gaddr ) ) break;
     582          15 :       fd_alloc_free( alloc, (fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, superblock_gaddr ) );
     583          15 :     }
     584             : 
     585    27601560 :   }
     586             : 
     587      219060 :   return shalloc;
     588      219063 : }
     589             : 
     590             : fd_wksp_t *
     591          36 : fd_alloc_wksp( fd_alloc_t * join ) {
     592          36 :   return FD_LIKELY( join ) ? fd_alloc_private_wksp( fd_alloc_private_join_alloc( join ) ) : NULL;
     593          36 : }
     594             : 
     595             : ulong
     596          15 : fd_alloc_tag( fd_alloc_t * join ) {
     597          15 :   return FD_LIKELY( join ) ? fd_alloc_private_join_alloc( join )->tag : 0UL;
     598          15 : }
     599             : 
     600             : void *
     601             : fd_alloc_malloc_at_least( fd_alloc_t * join,
     602             :                           ulong        align,
     603             :                           ulong        sz,
     604   171412341 :                           ulong *      max ) {
     605             : 
     606   171412341 :   if( FD_UNLIKELY( !max ) ) return NULL;
     607             : 
     608             :   /* Handle default align, NULL alloc, 0 size, non-power-of-two align
     609             :      and unreasonably large sz.  footprint has room for fd_alloc_hdr_t,
     610             :      sz bytes with enough padding to allow for the arbitrary alignment
     611             :      of blocks in a superblock.  Note that footprint is guaranteed not
     612             :      to overflow if align is a power of 2 as align at most 2^63 and
     613             :      sizeof is 4 and we abort is align is not a power of 2.  So we don't
     614             :      need to do elaborate overflow checking. */
     615             : 
     616   171412341 :   fd_alloc_t * alloc = fd_alloc_private_join_alloc( join );
     617             : 
     618   171412341 :   align = fd_ulong_if( !align, FD_ALLOC_MALLOC_ALIGN_DEFAULT, align );
     619             : 
     620             : # if FD_HAS_DEEPASAN
     621             :   /* The header is prepended and needs to be unpoisoned. Ensure that
     622             :      there is padding for the alloc_hdr to be properly aligned. We
     623             :      want to exit silently if the sz passed in is 0. The alignment must be
     624             :      at least 8. */
     625             :   ulong fd_alloc_hdr_footprint = fd_ulong_align_up( sizeof(fd_alloc_hdr_t), FD_ASAN_ALIGN );
     626             :   if( FD_LIKELY(sz && sz < ULONG_MAX) ) {
     627             :     sz = fd_ulong_align_up( sz, FD_ASAN_ALIGN );
     628             :   }
     629             :   align = fd_ulong_if( align < FD_ASAN_ALIGN, FD_ASAN_ALIGN, align );
     630             : # else
     631   171412341 :   ulong fd_alloc_hdr_footprint = sizeof(fd_alloc_hdr_t);
     632   171412341 : # endif
     633             : 
     634   171412341 :   ulong footprint = sz + fd_alloc_hdr_footprint + align - 1UL;
     635             : 
     636   171412341 :   if( FD_UNLIKELY( (!alloc) | (!fd_ulong_is_pow2( align )) | (!sz) | (footprint<=sz) ) ) {
     637          66 :     *max = 0UL;
     638          66 :     return NULL;
     639          66 :   }
     640             : 
     641   171412275 :   fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
     642             : 
     643             :   /* At this point, alloc is non-NULL and backed by wksp, align is a
     644             :      power-of-2, footprint is a reasonable non-zero value.  If the
     645             :      footprint is large, just allocate the memory directly, prepend the
     646             :      appropriate header and return.  TODO: consider clearing alignment
     647             :      padding for better alloc parameter recovery in diagnostics here? */
     648             : 
     649   171412275 :   if( FD_UNLIKELY( footprint > FD_ALLOC_FOOTPRINT_SMALL_THRESH ) ) {
     650             : 
     651      465561 :     ulong glo;
     652      465561 :     ulong ghi;
     653      465561 :     ulong wksp_gaddr = fd_wksp_alloc_at_least( wksp, 1UL, footprint, alloc->tag, &glo, &ghi );
     654      465561 :     if( FD_UNLIKELY( !wksp_gaddr ) ) {
     655           0 :       *max = 0UL;
     656           0 :       return NULL;
     657           0 :     }
     658             : 
     659      465561 :     ulong alloc_gaddr = fd_ulong_align_up( wksp_gaddr + sizeof(fd_alloc_hdr_t), align );
     660      465561 :     *max = (ghi - glo) - (alloc_gaddr - wksp_gaddr);
     661      465561 :     return fd_alloc_hdr_store_large( fd_wksp_laddr_fast( wksp, alloc_gaddr ), 0 /* !sb */ );
     662      465561 :   }
     663             : 
     664             :   /* At this point, the footprint is small.  Determine the preferred
     665             :      sizeclass for this allocation and the preferred active superblock
     666             :      to use for this sizeclass and join. */
     667             : 
     668   170946714 :   ulong sizeclass = fd_alloc_preferred_sizeclass( footprint );
     669   170946714 :   ulong cgroup    = fd_alloc_preferred_sizeclass_cgroup( sizeclass, fd_alloc_join_cgroup_hint( join ) );
     670             : 
     671   170946714 :   ulong * active_slot = alloc->active_slot + sizeclass + FD_ALLOC_SIZECLASS_CNT*cgroup;
     672             : 
     673             :   /* Try to get exclusive access to the preferred active superblock.
     674             :      Note that all all active superblocks have at least one free block.
     675             : 
     676             :      TODO: consider doing something test-and_test-and-set?  E.g.:
     677             :        superblock_gaddr = FD_VOLATILE_CONST( *active_slot );
     678             :        if( FD_LIKELY( superblock_gaddr ) ) superblock_gaddr = fd_alloc_replace_active( active_slot, 0UL );
     679             :      This would avoid an atomic operation if there currently isn't an
     680             :      active superblock for this sizeclass and cgroup. */
     681             : 
     682   170946714 :   ulong superblock_gaddr = fd_alloc_private_active_slot_replace( active_slot, 0UL );
     683             : 
     684             :   /* At this point, if superblock_gaddr is non-zero, we have exclusive
     685             :      access to the superblock and only we can allocate blocks from it.
     686             :      (Other threads could free blocks to it concurrently though.)
     687             : 
     688             :      If superblock_gaddr is zero, there was no preferred active
     689             :      superblock for this sizeclass,cgroup pair when we looked.  So, we
     690             :      try to pop the inactive superblock stack for this sizeclass.  Note
     691             :      that all inactive superblocks also have at least one free block.
     692             : 
     693             :      If that fails, we try to allocate a new superblock to hold this
     694             :      allocation.  If we are able to do so, obviously the new superblock
     695             :      would have at least one free block for this allocation.  (Yes,
     696             :      malloc calls itself recursively.  The base case is the large
     697             :      allocation above.  Note that we expand out the base case explicitly
     698             :      here so we can distinguish user large allocations from gigantic
     699             :      superblock allocations in analytics without having to change the
     700             :      APIs or use the public API as a wrapper.)
     701             : 
     702             :      If that fails, we are in trouble and fail (we are either out of
     703             :      memory or have too much wksp fragmentation). */
     704             : 
     705   170946714 :   if( FD_UNLIKELY( !superblock_gaddr ) ) {
     706             : 
     707     2779116 :     superblock_gaddr = fd_alloc_private_inactive_stack_pop( alloc->inactive_stack + sizeclass, wksp );
     708             : 
     709     2779116 :     if( FD_UNLIKELY( !superblock_gaddr ) ) {
     710             : 
     711     1544274 :       fd_alloc_superblock_t * superblock;
     712             : 
     713     1544274 :       ulong superblock_footprint = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].superblock_footprint;
     714     1544274 :       if( FD_UNLIKELY( superblock_footprint > FD_ALLOC_FOOTPRINT_SMALL_THRESH ) ) {
     715             : 
     716      323427 :         ulong wksp_footprint = superblock_footprint + fd_alloc_hdr_footprint + FD_ALLOC_SUPERBLOCK_ALIGN - 1UL;
     717      323427 :         ulong wksp_gaddr     = fd_wksp_alloc( wksp, 1UL, wksp_footprint, alloc->tag );
     718      323427 :         if( FD_UNLIKELY( !wksp_gaddr ) ) {
     719           0 :           *max = 0UL;
     720           0 :           return NULL;
     721           0 :         }
     722             : 
     723      323427 :         superblock_gaddr = fd_ulong_align_up( wksp_gaddr + fd_alloc_hdr_footprint, FD_ALLOC_SUPERBLOCK_ALIGN );
     724             : #       if FD_HAS_DEEPASAN
     725             :         /* At this point, a new superblock is allocated from the wksp and the header
     726             :            is prepended. The alignment needs to be taken into account: the padding
     727             :            should also be unpoisoned.
     728             : 
     729             :            Due to ASan requiring 8 byte word alignment for poisoning regions, must
     730             :            guarantee 8 bytes for the header instead of just sizeof(fd_alloc_hdr_t).
     731             :            We have a worst case padding of 15 bytes. Due to forced alignment in
     732             :            fd_wksp_alloc of at least 8 bytes, in the worst case we will use 8 bytes
     733             :            to align up the superblock_gaddr. The remaining 7 padding bytes will be
     734             :            used to safely allow for the superblock_footprint to be aligned up to
     735             :            an 8 byte multiple.  */
     736             :         void * unpoison_laddr = fd_wksp_laddr_fast( wksp, superblock_gaddr - fd_alloc_hdr_footprint );
     737             :         ulong aligned_superblock_footprint = fd_ulong_align_up( superblock_footprint, FD_ASAN_ALIGN );
     738             :         fd_asan_unpoison( unpoison_laddr, aligned_superblock_footprint + fd_alloc_hdr_footprint );
     739             : #       endif
     740             : 
     741      323427 :         superblock = (fd_alloc_superblock_t *)
     742      323427 :           fd_alloc_hdr_store_large( fd_wksp_laddr_fast( wksp, superblock_gaddr ), 1 /* sb */ );
     743             : 
     744     1220847 :       } else {
     745             : 
     746             :         /* TODO: consider having user facing API wrap an internal API so
     747             :            that recursive calls don't have to do redundant error
     748             :            trapping?
     749             : 
     750             :            TODO: consider using at_least semantics to adapt the actual
     751             :            size of the superblock to the location it is stored
     752             :            (currently should be irrelevant as the sizeclasses are
     753             :            designed to tightly nest). */
     754             : 
     755     1220847 :         superblock = fd_alloc_malloc( join, FD_ALLOC_SUPERBLOCK_ALIGN, superblock_footprint );
     756     1220847 :         if( FD_UNLIKELY( !superblock ) ) {
     757           0 :           *max = 0UL;
     758           0 :           return NULL;
     759           0 :         }
     760     1220847 :         superblock_gaddr = fd_wksp_gaddr_fast( wksp, superblock );
     761             : 
     762     1220847 :       }
     763             : 
     764     1544274 :       FD_COMPILER_MFENCE();
     765     1544274 :       FD_VOLATILE( superblock->free_blocks ) = fd_ulong_mask_lsb( (int)(uint)fd_alloc_sizeclass_cfg[ sizeclass ].block_cnt );
     766     1544274 :       FD_VOLATILE( superblock->next_gaddr  ) = 0UL;
     767     1544274 :       FD_COMPILER_MFENCE();
     768             : 
     769     1544274 :     }
     770     2779116 :   }
     771             : 
     772             :   /* At this point, we have a superblock with space for at least one
     773             :      allocation and only we can allocate blocks from it.  Other threads
     774             :      could free blocks in this superblock concurrently though.  As such,
     775             :      we can non-atomically find a set bit in free_blocks (there will be
     776             :      at least one and no other thread will clear it behind our back) but
     777             :      we must atomically clear the bit we found so we don't mess up the
     778             :      other bits that might be concurrently set by free. */
     779             : 
     780   170946714 :   fd_alloc_superblock_t * superblock = (fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, superblock_gaddr );
     781             : 
     782   170946714 :   fd_alloc_block_set_t free_blocks;
     783   170946714 :   FD_COMPILER_MFENCE();
     784   170946714 :   free_blocks = FD_VOLATILE_CONST( superblock->free_blocks );
     785   170946714 :   FD_COMPILER_MFENCE();
     786             : 
     787   170946714 :   ulong                block_idx = fd_alloc_block_set_first( free_blocks );
     788   170946714 :   fd_alloc_block_set_t block     = fd_alloc_block_set_ele( block_idx );
     789             : 
     790   170946714 :   free_blocks = fd_alloc_block_set_sub( &superblock->free_blocks, block );
     791             : 
     792             :   /* At this point, free_blocks gives the set of free blocks in the
     793             :      superblock immediately before the allocation occurred. */
     794             : 
     795   170946714 :   if( FD_LIKELY( free_blocks!=block ) ) {
     796             : 
     797             :     /* At this point, we know the superblock has at least one
     798             :        allocated block in it (the one we just allocated) and one free
     799             :        block in it.  And this will hold true until we put this
     800             :        superblock back into circulation.  Specifically, nobody can free
     801             :        the block we just allocated until we return to tell them about it
     802             :        and nobody can allocate any remaining free blocks until we get
     803             :        this superblock back into circulation.  To get this superblock
     804             :        back into circulation, we make it the active superblock for this
     805             :        sizeclass,cgroup pair. */
     806             : 
     807   159310806 :     ulong displaced_superblock_gaddr = fd_alloc_private_active_slot_replace( active_slot, superblock_gaddr );
     808             : 
     809             :     /* And if this displaced a previously active superblock (e.g.
     810             :        another thread made an different superblock the active one while
     811             :        we were doing the above), we add the displaced superblock to the
     812             :        sizeclass's inactive superblocks.  Note that any such displaced
     813             :        superblock also has at least one free block in it (the active
     814             :        superblock always has at least one free block) that nobody can
     815             :        allocate from as, at this point, it is not in circulation.  Thus,
     816             :        pushing it onto the superblock's inactive stack will preserve the
     817             :        invariant that all inactive superblocks have at least one free
     818             :        block. */
     819             : 
     820   159310806 :     if( FD_UNLIKELY( displaced_superblock_gaddr ) )
     821           0 :       fd_alloc_private_inactive_stack_push( alloc->inactive_stack + sizeclass, wksp, displaced_superblock_gaddr );
     822             : 
     823   159310806 :   } //else {
     824             : 
     825             :     /* The superblock had no more free blocks immediately after the
     826             :        allocation occurred.  We should not make this superblock the
     827             :        preferred superblock or push it onto the sizeclass's nonfull
     828             :        superblock stack; it would break the invariants that all
     829             :        superblocks in circulation for a sizeclass have at least one
     830             :        free block.
     831             : 
     832             :        And, as this superblock had no free blocks, we don't need to
     833             :        track the superblock anyway as malloc can't use the superblock
     834             :        until some of the blocks in it have been freed.  As such, this
     835             :        superblock will not be used in a malloc until after the next call
     836             :        to free on a block in this superblock returns this superblock to
     837             :        circulation.  Note that this superblock will have at least one
     838             :        allocated block until after this function returns (the block we
     839             :        just allocated) and thus cannot ever be considered as a deletion
     840             :        candidate until after this function returns and this allocation
     841             :        is freed.
     842             : 
     843             :        As discussed in free below, we could update a superblock cgroup
     844             :        hint here (such that the when the superblock goes back into
     845             :        circulation, it will be put into circulation as the active
     846             :        superblock for this cgroup to encourage for additional mallocs
     847             :        from this thread for good spatial locality).  This doesn't need
     848             :        to be atomic.  Even though a concurrent free on another thread
     849             :        might get this into superblock into circulation before this
     850             :        executes (and thus also have other mallocs occurred that changed
     851             :        the active_hint), it doesn't matter.  So long as the hint is a
     852             :        sane value at all points in time, free will work fine. */
     853             : 
     854             :   //}
     855             : 
     856             :   /* Carve the requested allocation out of the newly allocated block,
     857             :      prepend the allocation header for use by free and return.  TODO:
     858             :      considering clearing alignment padding for better alloc parameter
     859             :      recovery in diagnostics here? */
     860             : 
     861   170946714 :   ulong block_footprint = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].block_footprint;
     862   170946714 :   ulong block_laddr     = (ulong)superblock + sizeof(fd_alloc_superblock_t) + block_idx*block_footprint;
     863   170946714 :   ulong alloc_laddr     = fd_ulong_align_up( block_laddr + fd_alloc_hdr_footprint, align );
     864             : 
     865             : # if FD_HAS_DEEPASAN
     866             :   /* The block and the header must be unpoisoned to accomodate the block
     867             :      footprint. The block footprint is determined by the sizeclass which
     868             :      provides the minimum size that accomodates the footprint which is the
     869             :      sz that's passed in, the padded fd_alloc_hdr, and the worst case amount
     870             :      of alignment bytes. Because sz % FD_ASAN_ALIGN == 0, it is known that
     871             :      we will have unused bytes at the end of the block since alloc_laddr %
     872             :      FD_ASAN_ALIGN == 0. To ensure ASAN alignment, the range of bytes used
     873             :      in the block can be safely rounded down.
     874             :   */
     875             : 
     876             :   void* laddr = (void*)(alloc_laddr - fd_alloc_hdr_footprint);
     877             : 
     878             :   ulong block_hi_addr = block_laddr + block_footprint;
     879             :   ulong block_unpoison_sz = fd_ulong_align_dn( block_hi_addr - alloc_laddr, FD_ASAN_ALIGN );
     880             :   fd_asan_unpoison( laddr, block_unpoison_sz + fd_alloc_hdr_footprint );
     881             : # endif
     882             : 
     883   170946714 :   *max = block_footprint - (alloc_laddr - block_laddr);
     884             : 
     885   170946714 :   return fd_alloc_hdr_store( (void *)alloc_laddr, superblock, block_idx, sizeclass );
     886   170946714 : }
     887             : 
     888             : void
     889             : fd_alloc_free( fd_alloc_t * join,
     890   172679754 :                void *       laddr ) {
     891             : 
     892             :   /* Handle NULL alloc and/or NULL laddr */
     893             : 
     894   172679754 :   fd_alloc_t * alloc  = fd_alloc_private_join_alloc( join );
     895   172679754 :   if( FD_UNLIKELY( (!alloc) | (!laddr) ) ) return;
     896             : 
     897             :   /* At this point, we have a non-NULL alloc and a pointer to the first
     898             :      byte to an allocation done by it.  Load the allocation header and
     899             :      extract the sizeclass.  If the sizeclass indicates this is a large
     900             :      allocation, use fd_wksp_free to free this allocation (note that
     901             :      fd_wksp_free works for any byte within the wksp allocation). */
     902             : 
     903   171603138 :   fd_alloc_hdr_t hdr       = fd_alloc_hdr_load( laddr );
     904   171603138 :   ulong          sizeclass = fd_alloc_hdr_sizeclass( hdr );
     905             : 
     906   171603138 :   if( FD_UNLIKELY( sizeclass==FD_ALLOC_SIZECLASS_LARGE ) ) {
     907      788676 :     fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
     908      788676 :     fd_wksp_free( wksp, fd_wksp_gaddr_fast( wksp, laddr ) );
     909      788676 :     return;
     910      788676 :   }
     911             : 
     912             :   /* At this point, we have a small allocation.  Determine the
     913             :      superblock and block index from the header and then free the block. */
     914             : 
     915   170814462 :   fd_alloc_superblock_t * superblock  = fd_alloc_hdr_superblock( hdr, laddr );
     916   170814462 :   ulong                   block_idx   = fd_alloc_hdr_block_idx( hdr );
     917   170814462 :   fd_alloc_block_set_t    block       = fd_alloc_block_set_ele( block_idx );
     918   170814462 :   fd_alloc_block_set_t    free_blocks = fd_alloc_block_set_add( &superblock->free_blocks, block );
     919             : 
     920             : 
     921             : # if FD_HAS_DEEPASAN
     922             :   /* The portion of the block which is used for the header and the allocation
     923             :      should get poisoned. The alloc's laddr is already at least 8 byte aligned.
     924             :      The 8 bytes prior to the start of the laddr are used by the fd_alloc_hdr_t.
     925             :      These should get poisoned as the block is freed again. The region used by
     926             :      the allocation should also get poisoned: [laddr,block_laddr+block_footprint].
     927             :      However, we know that the size of the initial allocation was also 8 byte
     928             :      aligned so we align down the size of the range to poison safely.  */
     929             :   ulong block_footprint        = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].block_footprint;
     930             :   ulong block_laddr            = (ulong)superblock + sizeof(fd_alloc_superblock_t) + block_idx*block_footprint;
     931             :   ulong block_hi_addr          = fd_ulong_align_dn( block_laddr + block_footprint, FD_ASAN_ALIGN );
     932             :   ulong fd_alloc_hdr_footprint = fd_ulong_align_up( sizeof(fd_alloc_hdr_t), FD_ASAN_ALIGN );
     933             :   ulong fd_alloc_hdr_laddr     = (ulong)laddr - fd_alloc_hdr_footprint;
     934             :   ulong sz                     = block_hi_addr - (ulong)laddr + fd_alloc_hdr_footprint;
     935             :   fd_asan_poison( (void*)fd_alloc_hdr_laddr, sz );
     936             : # endif
     937             : 
     938             :   /* At this point, free_blocks is the set of free blocks just before
     939             :      the free. */
     940             : 
     941   170814462 :   ulong free_cnt = fd_alloc_block_set_cnt( free_blocks );
     942   170814462 :   if( FD_UNLIKELY( !free_cnt ) ) {
     943             : 
     944             :     /* The superblock containing this block had no free blocks
     945             :        immediately before we freed the allocation.  Thus, at this point,
     946             :        nobody can allocate any blocks from this superblock (the
     947             :        superblock is neither an active superblock nor on the inactive
     948             :        stack as per the note in malloc above) and we need to get the
     949             :        superblock back into circulation for reuse.  It is okay if other
     950             :        threads concurrently free other blocks in this superblock while
     951             :        we are doing this (they know the superblock is either in
     952             :        circulation or is being put into circulation).
     953             : 
     954             :        Since there is at least one free block in the superblock and
     955             :        nobody can allocate from it until it is circulation, putting it
     956             :        into circulation preserves the invariant that all superblocks in
     957             :        circulation have at least one free block.
     958             : 
     959             :        We have a bunch of options for putting this superblock back into
     960             :        circulation:
     961             : 
     962             :        - By pushing it onto the inactive stack
     963             :        - By making it the active superblock of the caller's cgroup
     964             :        - By making it the active superblock of the cgroup that most did
     965             :          the most recent malloc from it.
     966             :        - By making it the active superblock based on explicitly provided
     967             :          hint.
     968             :        - ...
     969             : 
     970             :        The first option is simplest to implement and balanced between
     971             :        common use cases single threaded, malloc/free pairs have thread
     972             :        affinity, and pipelined use cases.  (E.g. single threaded will
     973             :        take an extra time to hop from inactive and active and potential
     974             :        has slightly worse overallocation, similar story for paired.
     975             :        Cache affinity in these two cases might be slightly degraded from
     976             :        empty superblocks hopping between concurrency groups via the
     977             :        inactive stack, pipelined naturally gets the page back to the
     978             :        malloc-ing thread albeit with a brief hop through the inactive
     979             :        stack though).
     980             : 
     981             :        The second option is about as simple and optimizes the
     982             :        single/threaded and paired use cases as this thread is also the
     983             :        thread likely the same thread that malloc'd this.  Pipelined is
     984             :        marginally worse as the superblock will have to take two hops
     985             :        before it gets reused again (from the free-ing thread active
     986             :        superblock to the inactive stack to the malloc-ing active
     987             :        superblock).
     988             : 
     989             :        The third and fourth options can simultaneously get all options
     990             :        optimized but they require extra plumbing (either under the hood
     991             :        as per the note in malloc above from to the caller to get the
     992             :        extra context).
     993             : 
     994             :        Currently we do the second option for simplicity and optimal
     995             :        behaviors in the single threaded and paired use cases. */
     996             : 
     997    11635899 :     fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
     998             : 
     999    11635899 :     ulong   cgroup      = fd_alloc_preferred_sizeclass_cgroup( sizeclass, fd_alloc_join_cgroup_hint( join ) );
    1000    11635899 :     ulong * active_slot = alloc->active_slot + sizeclass + FD_ALLOC_SIZECLASS_CNT*cgroup;
    1001             : 
    1002    11635899 :     ulong displaced_superblock_gaddr = fd_alloc_private_active_slot_replace( active_slot, fd_wksp_gaddr_fast( wksp, superblock ) );
    1003             : 
    1004             :     /* If this displaced an already active superblock, we need to push
    1005             :        the displaced superblock onto the inactive stack (note that the
    1006             :        superblock cannot be the same as the currently active superblock
    1007             :        because the superblock was not in circulation before). */
    1008             : 
    1009    11635899 :     if( FD_UNLIKELY( displaced_superblock_gaddr ) )
    1010     1348251 :       fd_alloc_private_inactive_stack_push( alloc->inactive_stack + sizeclass, wksp, displaced_superblock_gaddr );
    1011    11635899 :     return;
    1012    11635899 :   }
    1013             : 
    1014             :   /* This point, we know the superblock had some free blocks before our
    1015             :      free and thus is currently in circulation. */
    1016             : 
    1017   159178563 :   free_cnt++; /* Number of free blocks immediately after above free */
    1018             : 
    1019   159178563 :   ulong block_cnt = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].block_cnt;
    1020   159178563 :   if( FD_UNLIKELY( free_cnt==block_cnt ) ) {
    1021             : 
    1022             :     /* None of the blocks were in use after the above free.  We might
    1023             :        consider freeing it to reclaim space for other sizeclasses or
    1024             :        large allocations.  But we don't mind having a few totally empty
    1025             :        superblocks in circulation for a sizeclass as this prevents
    1026             :        things like:
    1027             : 
    1028             :          addr = malloc(sz);
    1029             :          free(addr);
    1030             :          addr = malloc(sz);
    1031             :          free(addr)
    1032             :          ...
    1033             :          addr = malloc(sz);
    1034             :          free(addr)
    1035             : 
    1036             :        from repeatedly needing to invoke malloc recursively to recreate
    1037             :        superblock hierarchies that were prematurely freed.
    1038             : 
    1039             :        Regardless, since this superblock is in circulation, we can't be
    1040             :        sure it is safe to delete because something might be malloc-ing
    1041             :        from it concurrently.  Thus, we are going to keep this superblock
    1042             :        in circulation as is.
    1043             : 
    1044             :        But, since we know we have at least 1 completely empty superblock
    1045             :        in circulation now, to prevent the unbounded accumulation of
    1046             :        completely empty superblocks, we will try to get an inactive
    1047             :        superblock and, if that is empty, delete that.
    1048             : 
    1049             :        This is pretty tricky as it is possible other threads are
    1050             :        concurrently trying to pop the inactive stack to do a malloc.  If
    1051             :        we actually unmapped the memory here, such a thread could seg
    1052             :        fault if it stalls after it reads the top of the stack but before
    1053             :        it queries the top for the next_gaddr (and we'd have to use
    1054             :        another strategy).  But that is not an issue here as the
    1055             :        underlying wksp memory is still mapped post-deletion regardless.
    1056             : 
    1057             :        Likewise, though the post deletion top->next_gaddr read will get
    1058             :        a stale value in this scenario, it highly likely will not be
    1059             :        injected into the inactive_stack because the CAS will detect that
    1060             :        inactive_stack top has changed and fail.
    1061             : 
    1062             :        And, lastly, we version inactive_stack top such that, even if
    1063             :        somehow we had a thread stall in pop after reading
    1064             :        top->next_gaddr / other threads do other operations that
    1065             :        ultimately keep top the same change the value of top->next_gaddr
    1066             :        / stalled thread resumes, the version number on the stalled
    1067             :        thread will be wrong cause the CAS to fail.  (There is a
    1068             :        theoretical risk of version number reuse but the version number
    1069             :        is wide enough to make that risk zero on any practical
    1070             :        timescale.) */
    1071             : 
    1072   109694100 :     fd_wksp_t * wksp                     = fd_alloc_private_wksp( alloc );
    1073   109694100 :     ulong       deletion_candidate_gaddr = fd_alloc_private_inactive_stack_pop( alloc->inactive_stack + sizeclass, wksp );
    1074             : 
    1075   109694100 :     if( FD_UNLIKELY( !deletion_candidate_gaddr ) ) return; /* No deletion_candidate, unclear branch prob */
    1076             : 
    1077      130560 :     fd_alloc_superblock_t * deletion_candidate = (fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, deletion_candidate_gaddr );
    1078             : 
    1079      130560 :     ulong deletion_candidate_free_blocks;
    1080      130560 :     FD_COMPILER_MFENCE();
    1081      130560 :     deletion_candidate_free_blocks = FD_VOLATILE_CONST( deletion_candidate->free_blocks );
    1082      130560 :     FD_COMPILER_MFENCE();
    1083             : 
    1084      130560 :     if( FD_UNLIKELY( fd_alloc_block_set_cnt( deletion_candidate_free_blocks )==block_cnt ) ) /* Candidate empty -> delete it */
    1085      113367 :       fd_alloc_free( join, deletion_candidate );
    1086       17193 :     else /* Candidate not empty -> return it to circulation */
    1087       17193 :       fd_alloc_private_inactive_stack_push( alloc->inactive_stack + sizeclass, wksp, deletion_candidate_gaddr );
    1088             : 
    1089      130560 :     return;
    1090   109694100 :   }
    1091   159178563 : }
    1092             : 
    1093             : void
    1094         399 : fd_alloc_compact( fd_alloc_t * join ) {
    1095         399 :   fd_alloc_t * alloc = fd_alloc_private_join_alloc( join );
    1096         399 :   if( FD_UNLIKELY( !alloc ) ) {
    1097           0 :     FD_LOG_WARNING(( "bad join" ));
    1098           0 :     return;
    1099           0 :   }
    1100             : 
    1101         399 :   fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
    1102             : 
    1103             :   /* We scan each sizeclass (in monotonically increasing order) for
    1104             :      completely empty superblocks that thus can be freed.  This has the
    1105             :      pleasant side effect that, as smaller empty superblocks get freed,
    1106             :      it can potentially allow for any larger superblocks in which they
    1107             :      are nested to be subsequently freed.  If no other operations are
    1108             :      running concurrently, any remaining gigantic superblocks should
    1109             :      contain at least one application allocation somewhere in them. */
    1110             : 
    1111       50673 :   for( ulong sizeclass=0UL; sizeclass<FD_ALLOC_SIZECLASS_CNT; sizeclass++ ) {
    1112       50274 :     ulong               block_cnt      = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].block_cnt;
    1113       50274 :     ulong               cgroup_cnt     = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].cgroup_mask + 1UL;
    1114       50274 :     fd_alloc_vgaddr_t * inactive_stack = alloc->inactive_stack + sizeclass;
    1115             : 
    1116             :     /* For each active superblock in this sizeclass */
    1117             : 
    1118      854658 :     for( ulong cgroup_idx=0UL; cgroup_idx<cgroup_cnt; cgroup_idx++ ) {
    1119      804384 :       ulong * active_slot      = alloc->active_slot + sizeclass + FD_ALLOC_SIZECLASS_CNT*cgroup_idx;
    1120      804384 :       ulong   superblock_gaddr = fd_alloc_private_active_slot_replace( active_slot, 0UL );
    1121      804384 :       if( !superblock_gaddr ) continue; /* application dependent branch prob */
    1122             : 
    1123        1893 :       fd_alloc_superblock_t * superblock = (fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, superblock_gaddr );
    1124             : 
    1125             :       /* At this point, we have atomically acquired the cgroup_idx's
    1126             :          active superblock, there is no active superblock for
    1127             :          cgroup_idx, and it has at least one free block.  Since the
    1128             :          superblock is out of circulation for malloc, nobody will malloc
    1129             :          anything from this superblock behind our back.  And if this
    1130             :          superblock is completely empty, nobody will call free on any
    1131             :          blocks in this superblock behind our back.  Thus we can free
    1132             :          this superblock safely.
    1133             : 
    1134             :          If there are some blocks still allocated, we put the superblock
    1135             :          back into circulation (we know from the above it still has at
    1136             :          least one free block, preserving the invariant).  This might
    1137             :          displace a superblock that another thread made active behind
    1138             :          our back.  We push any such superblock block onto the inactive
    1139             :          stack (it also will have at least one free block for the same
    1140             :          reasons). */
    1141             : 
    1142        1893 :       if( fd_alloc_block_set_cnt( superblock->free_blocks )==block_cnt ) fd_alloc_free( join, superblock );
    1143        1827 :       else {
    1144        1827 :         ulong displaced_superblock_gaddr = fd_alloc_private_active_slot_replace( active_slot, superblock_gaddr );
    1145        1827 :         if( FD_UNLIKELY( displaced_superblock_gaddr ) )
    1146           0 :           fd_alloc_private_inactive_stack_push( inactive_stack, wksp, displaced_superblock_gaddr );
    1147        1827 :       }
    1148        1893 :     }
    1149             : 
    1150             :     /* Drain the inactive stack for this sizeclass.  All empty
    1151             :        superblocks found will be freed (safe for the same reasons as
    1152             :        above).  All remaining superblocks will be pushed onto a local
    1153             :        stack (and every one will have at least one free block it while
    1154             :        there for the same reasons).  After the inactive stack drain, we
    1155             :        drain the local stack back into the inactive stack to get all
    1156             :        these remaining superblocks back into circulation (also safe for
    1157             :        the same reasons) and with same relative ordering (not required).
    1158             :        We technically don't need to use a lockfree push / pop for the
    1159             :        local stack but no sense in implementing a second version for
    1160             :        this mostly diagnostic / teardown oriented use case. */
    1161             : 
    1162       50274 :     fd_alloc_vgaddr_t local_stack[1];
    1163             : 
    1164       50274 :     local_stack[0] = fd_alloc_vgaddr( 0UL, 0UL );
    1165             : 
    1166       50652 :     for(;;) {
    1167       50652 :       ulong superblock_gaddr = fd_alloc_private_inactive_stack_pop( inactive_stack, wksp );
    1168       50652 :       if( !superblock_gaddr ) break; /* application dependent branch prob */
    1169         378 :       fd_alloc_superblock_t * superblock = (fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, superblock_gaddr );
    1170             : 
    1171         378 :       if( fd_alloc_block_set_cnt( superblock->free_blocks )==block_cnt ) fd_alloc_free( join, superblock );
    1172         378 :       else fd_alloc_private_inactive_stack_push( local_stack, wksp, superblock_gaddr );
    1173         378 :     }
    1174             : 
    1175       50652 :     for(;;) {
    1176       50652 :       ulong superblock_gaddr = fd_alloc_private_inactive_stack_pop( local_stack, wksp );
    1177       50652 :       if( !superblock_gaddr ) break; /* application dependent branch prob */
    1178             : 
    1179         378 :       fd_alloc_private_inactive_stack_push( inactive_stack, wksp, superblock_gaddr );
    1180         378 :     }
    1181       50274 :   }
    1182         399 : }
    1183             : 
    1184             : #include "../wksp/fd_wksp_private.h"
    1185             : 
    1186             : int
    1187         204 : fd_alloc_is_empty( fd_alloc_t * join ) {
    1188         204 :   fd_alloc_t * alloc = fd_alloc_private_join_alloc( join );
    1189         204 :   if( FD_UNLIKELY( !alloc ) ) return 0;
    1190             : 
    1191         204 :   fd_alloc_compact( join );
    1192             : 
    1193             :   /* At this point (assuming no concurrent operations on this alloc),
    1194             :      all remaining large allocations contain at least one user
    1195             :      allocation.  Thus if there are any large allocs remaining for this
    1196             :      alloc, we know the alloc is not empty.  Since the wksp alloc that
    1197             :      holds the alloc itself might use the tag the used for large
    1198             :      allocations, we handle that as well.  We do this in a brute force
    1199             :      way to avoid taking a lock (note that this calculation should
    1200             :      really only be done as non-performance critical diagnostic and then
    1201             :      on a quiescent system). */
    1202             : 
    1203         204 :   fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
    1204             : 
    1205         204 :   ulong alloc_lo  = fd_wksp_gaddr_fast( wksp, alloc );
    1206         204 :   ulong alloc_hi  = alloc_lo + FD_ALLOC_FOOTPRINT;
    1207         204 :   ulong alloc_tag = alloc->tag;
    1208             : 
    1209         204 :   ulong                     part_max = wksp->part_max;
    1210         204 :   fd_wksp_private_pinfo_t * pinfo    = fd_wksp_private_pinfo( wksp );
    1211             : 
    1212         204 :   ulong i;
    1213      344316 :   for( i=0UL; i<part_max; i++ ) {
    1214      344304 :     if( FD_LIKELY( pinfo[ i ].tag!=alloc_tag ) ) continue; /* optimize for no leak case */
    1215         195 :     ulong gaddr_lo = pinfo[ i ].gaddr_lo;
    1216         195 :     ulong gaddr_hi = pinfo[ i ].gaddr_hi;
    1217         195 :     if( FD_UNLIKELY( !((gaddr_lo<=alloc_lo) & (alloc_hi<=gaddr_hi)) ) ) break; /* optimize for no leak case */
    1218         195 :   }
    1219             : 
    1220         204 :   return i==part_max;
    1221         204 : }
    1222             : 
    1223             : /**********************************************************************/
    1224             : 
    1225             : #include <stdio.h>
    1226             : 
    1227        8178 : #define TRAP(x) do { int _cnt = (x); if( _cnt<0 ) { return _cnt; } cnt += _cnt; } while(0)
    1228             : 
    1229             : /* fd_alloc_superblock_fprintf pretty prints to the given stream
    1230             :    exhaustive details about the current state of the given superblock in
    1231             :    wksp at superblock_gaddr.  The superblock's parameters are given by
    1232             :    sizeclass, block_cnt, block_footprint.  Diagnostics will be accumulated
    1233             :    ctr.  Returns behavior matches fprintf return behavior (i.e. the
    1234             :    number of characters output to stream or a negative error code).
    1235             :    This is meant to be called exclusively from fd_alloc_fprintf and does
    1236             :    no error trapping of its inputs. */
    1237             : 
    1238             : static int
    1239             : fd_alloc_superblock_fprintf( fd_wksp_t * wksp,             /* non-NULL */
    1240             :                              ulong       superblock_gaddr, /* valid gaddr for wksp */
    1241             :                              ulong       sizeclass,        /* valid sizeclass */
    1242             :                              ulong       block_cnt,        /* matches sizeclass cfg */
    1243             :                              ulong       block_footprint,  /* matches sizeclass cfg */
    1244             :                              FILE *      stream,           /* non-NULL */
    1245        1488 :                              ulong *     ctr ) {           /* non-NULL, room for 2 */
    1246        1488 :   int cnt = 0;
    1247             : 
    1248             :   /* Print the block header */
    1249             : 
    1250        1488 :   fd_alloc_superblock_t * superblock = fd_wksp_laddr_fast( wksp, superblock_gaddr );
    1251             : 
    1252        1488 :   fd_alloc_block_set_t free_blocks = superblock->free_blocks;
    1253             : 
    1254        1488 :   ulong msb = fd_ulong_shift_right( free_blocks, (int)block_cnt ); /* Note: block_cnt==64 possible */
    1255        1488 :   if( FD_UNLIKELY( msb ) ) ctr[0]++;
    1256        1488 :   TRAP( fprintf( stream, "free_blocks 0x%lx (%s)\n", free_blocks, msb==0UL ? "good" : "bad" ) );
    1257             : 
    1258             :   /* For each block */
    1259             : 
    1260       41337 :   for( ulong block_idx=0UL; block_idx<block_cnt; block_idx++ ) {
    1261       39849 :     ulong gaddr_lo = superblock_gaddr + sizeof(fd_alloc_superblock_t) + block_idx*block_footprint;
    1262       39849 :     ulong gaddr_hi = gaddr_lo + block_footprint;
    1263             : 
    1264       39849 :     if( fd_alloc_block_set_test( free_blocks, block_idx ) ) { /* Free block */
    1265             : 
    1266             :     //TRAP( fprintf( stream, "\t\t\tblock %2lu: gaddr %s:%lu-%s:%lu, malloc_est -, align_est -, sz_est - (free)\n",
    1267             :     //               block_idx, wksp->name, gaddr_lo, wksp->name, gaddr_hi-1UL ) );
    1268             : 
    1269       39111 :     } else { /* Used block */
    1270             : 
    1271             :       /* Search the partition for a plausible fd_alloc_hdr_t.  This
    1272             :          process nearly identical to the one described for large
    1273             :          allocations below. */
    1274             : 
    1275        6282 :       for( ulong align_est = 1UL << fd_ulong_find_msb( block_footprint - sizeof(fd_alloc_hdr_t) );;) {
    1276        6282 :         ulong   gaddr_est = fd_ulong_align_up( gaddr_lo + sizeof(fd_alloc_hdr_t), align_est );
    1277        6282 :         uchar * laddr_est = (uchar *)fd_wksp_laddr_fast( wksp, gaddr_est );
    1278        6282 :         fd_alloc_hdr_t hdr = FD_LOAD( fd_alloc_hdr_t, laddr_est - sizeof(fd_alloc_hdr_t) );
    1279             : 
    1280        6282 :         if( fd_alloc_hdr_sizeclass ( hdr )           ==sizeclass &&
    1281        6282 :             fd_alloc_hdr_block_idx ( hdr )           ==block_idx &&
    1282        6282 :             fd_alloc_hdr_superblock( hdr, laddr_est )==superblock ) {
    1283         738 :           ulong sz_est = block_footprint - sizeof(fd_alloc_hdr_t) - align_est + 1UL;
    1284         738 :           TRAP( fprintf( stream, "\t\t\tblock %2lu: gaddr %s:%lu-%s:%lu, malloc_est %s:%lu, align_est %lu, sz_est %lu\n",
    1285         738 :                          block_idx, wksp->name, gaddr_lo, wksp->name, gaddr_hi-1UL, wksp->name, gaddr_est, align_est, sz_est ) );
    1286         738 :           ctr[1]++;
    1287         738 :           break;
    1288         738 :         }
    1289             : 
    1290        5544 :         align_est >>= 1;
    1291        5544 :         if( FD_UNLIKELY( !align_est ) ) {
    1292           0 :           TRAP( fprintf( stream, "\t\t\tblock %2lu: gaddr %s:%lu-%s:%lu, malloc_est -, align est -, sz_est - (bad)\n",
    1293           0 :                          block_idx, wksp->name, gaddr_lo, wksp->name, gaddr_hi-1UL ) );
    1294           0 :           ctr[0]++;
    1295           0 :           break;
    1296           0 :         }
    1297        5544 :       }
    1298         738 :     }
    1299       39849 :   }
    1300             : 
    1301        1488 :   return cnt;
    1302        1488 : }
    1303             : 
    1304             : /* fd_alloc_fprintf pretty prints to the given stream exhaustive details
    1305             :    about the current state of the fd_alloc corresponding to the current
    1306             :    local join.  As this function is typically for debugging,
    1307             :    end-of-program diagnostics, etc, this function assumes there are no
    1308             :    concurrent operations on the fd_alloc while running.  Note also it
    1309             :    might not be able to find all small allocations and determine precise
    1310             :    details about allocations in general.  It will however be able to
    1311             :    find all large allocations assuming the user tagged the structure
    1312             :    properly (and that includes finding all the gigantic superblocks in
    1313             :    which all individual small allocations are contained).  Return
    1314             :    behavior matches fprintf return behavior (i.e. the number of
    1315             :    characters printed to stream or a negative error code). */
    1316             : 
    1317             : int
    1318             : fd_alloc_fprintf( fd_alloc_t * join,
    1319          12 :                   FILE *       stream ) {
    1320          12 :   if( FD_UNLIKELY( !stream ) ) return 0; /* NULL stream, can't print anything */
    1321             : 
    1322          12 :   int cnt = 0;
    1323             : 
    1324          12 :   ulong ctr[6];
    1325          12 :   ctr[0] = 0UL; /* errors detected */
    1326          12 :   ctr[1] = 0UL; /* small alloc found */
    1327          12 :   ctr[2] = 0UL; /* wksp partitions used */
    1328          12 :   ctr[3] = 0UL; /* wksp bytes used */
    1329          12 :   ctr[4] = 0UL; /* wksp partitions used for large alloc */
    1330          12 :   ctr[5] = 0UL; /* wksp bytes used for large alloc */
    1331             : 
    1332          12 :   fd_alloc_t * alloc       = fd_alloc_private_join_alloc( join );
    1333          12 :   ulong        cgroup_hint = fd_alloc_join_cgroup_hint  ( join );
    1334             : 
    1335          12 :   if( FD_UNLIKELY( !alloc ) ) { /* NULL join passed */
    1336             : 
    1337           0 :     TRAP( fprintf( stream, "alloc: gaddr -, join_cgroup_hint %lu, magic 0x0 (bad)\n", cgroup_hint ) );
    1338           0 :     ctr[0]++;
    1339             : 
    1340          12 :   } else { /* Normal join */
    1341             : 
    1342             :     /* Count the space used by alloc metadata. */
    1343             : 
    1344          12 :     ctr[2] += 1UL;
    1345          12 :     ctr[3] += FD_ALLOC_FOOTPRINT;
    1346             : 
    1347          12 :     fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
    1348             : 
    1349             :     /* Print the summary header */
    1350             : 
    1351          12 :     TRAP( fprintf( stream, "alloc: gaddr %s:%lu, join_cgroup_hint %lu, magic 0x%lx (%s)\n",
    1352          12 :                    wksp->name, fd_wksp_gaddr_fast( wksp, alloc ), cgroup_hint,
    1353          12 :                    alloc->magic, alloc->magic==FD_ALLOC_MAGIC ? "good" : "bad" ) );
    1354          12 :     if( FD_UNLIKELY( alloc->magic!=FD_ALLOC_MAGIC ) ) ctr[0]++;
    1355             : 
    1356             :     /* Print known details about each sizeclass */
    1357             : 
    1358          12 :     ulong block_footprint = 0UL;
    1359        1524 :     for( ulong sizeclass=0UL; sizeclass<FD_ALLOC_SIZECLASS_CNT; sizeclass++ ) {
    1360        1512 :       ulong block_footprint_prev = block_footprint;
    1361        1512 :       /**/  block_footprint      = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].block_footprint;
    1362             : 
    1363        1512 :       ulong superblock_footprint = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].superblock_footprint;
    1364        1512 :       ulong block_cnt            = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].block_cnt;
    1365        1512 :       ulong cgroup_cnt           = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].cgroup_mask + 1UL;
    1366             : 
    1367        1512 :       fd_alloc_vgaddr_t inactive_stack = alloc->inactive_stack[ sizeclass ];
    1368             : 
    1369        1512 :       ulong inactive_stack_ver   = (ulong)fd_alloc_vgaddr_ver( inactive_stack );
    1370        1512 :       ulong inactive_stack_gaddr = (ulong)fd_alloc_vgaddr_off( inactive_stack );
    1371             : 
    1372             :       /* Omit sizeclasses that have no superblocks in circulation */
    1373             : 
    1374        1512 :       int do_print = !!inactive_stack_gaddr;
    1375        1512 :       if( !do_print ) {
    1376        2076 :         for( ulong cgroup_idx=0UL; cgroup_idx<cgroup_cnt; cgroup_idx++ ) {
    1377        2040 :           if( alloc->active_slot[ sizeclass + FD_ALLOC_SIZECLASS_CNT*cgroup_idx ] ) {
    1378        1464 :             do_print = 1;
    1379        1464 :             break;
    1380        1464 :           }
    1381        2040 :         }
    1382        1500 :         if( !do_print ) continue;
    1383        1500 :       }
    1384             : 
    1385             :       /* Print size class header */
    1386             : 
    1387        1476 :       TRAP( fprintf( stream,
    1388        1476 :                      "\tsizeclass %lu: superblock_footprint %lu, block_footprint %lu-%lu, block_cnt %lu, cgroup_cnt %lu\n",
    1389        1476 :                      sizeclass, superblock_footprint, block_footprint_prev+1UL, block_footprint, block_cnt, cgroup_cnt ) );
    1390             : 
    1391             :       /* Print inactive stack top */
    1392             : 
    1393        1476 :       TRAP( fprintf( stream, "\t\tinactive_stack: gaddr %s:%lu, version %lu\n",
    1394        1476 :                      wksp->name, inactive_stack_gaddr, inactive_stack_ver ) );
    1395             : 
    1396             :       /* Print active superblock details */
    1397             : 
    1398        1476 :       ulong superblock_gaddr;
    1399             : 
    1400       25092 :       for( ulong cgroup_idx=0UL; cgroup_idx<cgroup_cnt; cgroup_idx++ ) {
    1401       23616 :         superblock_gaddr = alloc->active_slot[ sizeclass + FD_ALLOC_SIZECLASS_CNT*cgroup_idx ];
    1402       23616 :         if( !superblock_gaddr ) {
    1403             :         //TRAP( fprintf( stream, "\t\tcgroup 2lu active superblock -, next -\n", cgroup_idx ) );
    1404       22143 :           continue;
    1405       22143 :         }
    1406        1473 :         ulong next_gaddr = ((fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, superblock_gaddr))->next_gaddr;
    1407        1473 :         TRAP( fprintf( stream, "\t\tsuperblock: cgroup_idx %lu, gaddr %s:%lu, next %s:%lu (ignored), ",
    1408        1473 :                        cgroup_idx, wksp->name, superblock_gaddr, wksp->name, next_gaddr ) );
    1409        1473 :         TRAP( fd_alloc_superblock_fprintf( wksp, superblock_gaddr, sizeclass, block_cnt, block_footprint, stream, ctr ) );
    1410        1473 :       }
    1411             : 
    1412             :       /* Print inactive superblock details */
    1413             : 
    1414        1476 :       superblock_gaddr = inactive_stack_gaddr;
    1415        1491 :       while( superblock_gaddr ) {
    1416          15 :         ulong next_gaddr = ((fd_alloc_superblock_t *)fd_wksp_laddr_fast( wksp, superblock_gaddr))->next_gaddr;
    1417          15 :         TRAP( fprintf( stream, "\t\tsuperblock: cgroup_idx -, gaddr %s:%lu, next %s:%lu, ",
    1418          15 :                        wksp->name, superblock_gaddr, wksp->name, next_gaddr ) );
    1419          15 :         TRAP( fd_alloc_superblock_fprintf( wksp, superblock_gaddr, sizeclass, block_cnt, block_footprint, stream, ctr ) );
    1420          15 :         superblock_gaddr = next_gaddr;
    1421          15 :       }
    1422        1476 :     }
    1423             : 
    1424             :     /* Scan the wksp partition table for partitions that match this
    1425             :        allocation tag.  Like the is_empty diagnostic, we do this in a
    1426             :        brute force way that is not algo efficient to avoid taking a
    1427             :        lock. */
    1428             : 
    1429          12 :     ulong wksp_lo   = wksp->gaddr_lo;
    1430          12 :     ulong wksp_hi   = wksp->gaddr_hi;
    1431             : 
    1432          12 :     ulong alloc_lo  = fd_wksp_gaddr_fast( wksp, alloc );
    1433          12 :     ulong alloc_hi  = alloc_lo + FD_ALLOC_FOOTPRINT;
    1434          12 :     ulong alloc_tag = alloc->tag;
    1435             : 
    1436          12 :     ulong                     part_max = wksp->part_max;
    1437          12 :     fd_wksp_private_pinfo_t * pinfo    = fd_wksp_private_pinfo( wksp );
    1438             : 
    1439          12 :     ulong i;
    1440          48 :     for( i=0UL; i<part_max; i++ ) {
    1441          48 :       if( pinfo[ i ].tag!=alloc_tag ) continue; /* skip ones that don't match */
    1442          12 :       ulong gaddr_lo = pinfo[ i ].gaddr_lo;
    1443          12 :       ulong gaddr_hi = pinfo[ i ].gaddr_hi;
    1444          12 :       if( FD_UNLIKELY( (wksp_lo<=gaddr_lo) & (gaddr_lo<gaddr_hi) & (gaddr_hi<=wksp_hi) ) ) break; /* malformed */
    1445             : 
    1446             :       /* If the user used the same tag for both the alloc metadata and the
    1447             :          alloc allocations, skip the metadata partition */
    1448             : 
    1449           0 :       if( FD_UNLIKELY( (gaddr_lo<=alloc_lo) & (alloc_hi<=gaddr_lo) ) ) continue; /* skip partition containing metadata */
    1450             : 
    1451           0 :       ulong part_footprint = gaddr_hi - gaddr_lo;
    1452             : 
    1453           0 :       if( FD_UNLIKELY( part_footprint<=sizeof(fd_alloc_hdr_t) ) ) continue; /* skip obviously not an allocation */
    1454             : 
    1455             :       /* Search the partition for a plausible fd_alloc_hdr_t.  There
    1456             :          will be at least one if the partition isn't bad.  We scan
    1457             :          potential alignments in reverse order such that noise in the
    1458             :          alignment padding from older and less aligned large allocs will
    1459             :          not trigger the detection logic.  It is still possible for this
    1460             :          logic to spuriously fire if the user just happened to have some
    1461             :          data that looked like a suitable header (hence these are
    1462             :          estimates).  The estimates could be improved if we clear zero
    1463             :          padding before the headers as noted above at a greater time
    1464             :          overhead in fd_alloc and scanned alignments forward.  Once we
    1465             :          have a plausible location and alignment, we can compute bounds
    1466             :          to how large a size was used.  We use the upper bound the size
    1467             :          estimate for simplicity (it would take a lot more space and
    1468             :          time overhead in normal operation to track this explicitly). */
    1469             : 
    1470           0 :       for( ulong align_est = 1UL << fd_ulong_find_msb( part_footprint-sizeof(fd_alloc_hdr_t) );;) {
    1471             : 
    1472             :         /* Look at a potential header location */
    1473             : 
    1474           0 :         ulong   gaddr_est = fd_ulong_align_up( gaddr_lo + sizeof(fd_alloc_hdr_t), align_est );
    1475           0 :         uchar * laddr_est = (uchar *)fd_wksp_laddr_fast( wksp, gaddr_est );
    1476           0 :         fd_alloc_hdr_t hdr = FD_LOAD( fd_alloc_hdr_t, laddr_est - sizeof(fd_alloc_hdr_t) );
    1477             : 
    1478             :         /* If it matches what a header at this location should be,
    1479             :            print out the estimated allocation details. */
    1480             : 
    1481           0 :         if( fd_alloc_hdr_is_large( hdr ) ) {
    1482           0 :           int is_superblock = fd_alloc_hdr_large_is_superblock( hdr );
    1483             : 
    1484           0 :           ulong sz_est = part_footprint - sizeof(fd_alloc_hdr_t) - align_est + 1UL;
    1485           0 :           TRAP( fprintf( stream, "\tlarge: gaddr %s:%lu-%s:%lu, malloc_est %s:%lu, align_est %lu, sz_est %lu %s\n",
    1486           0 :                          wksp->name, gaddr_lo, wksp->name, gaddr_hi-1UL, wksp->name, gaddr_est, align_est, sz_est,
    1487           0 :                          is_superblock ? "(superblock)" : "(large)" ) );
    1488           0 :           ctr[2]++;
    1489           0 :           ctr[3] += part_footprint;
    1490           0 :           ctr[4] += fd_ulong_if( is_superblock, 0UL, 1UL            );
    1491           0 :           ctr[5] += fd_ulong_if( is_superblock, 0UL, part_footprint );
    1492             : 
    1493           0 :           break;
    1494           0 :         }
    1495             : 
    1496             :         /* Nope ... try the next.  If no more potential locations, the
    1497             :            partition is corrupt. */
    1498             : 
    1499           0 :         align_est >>= 1;
    1500           0 :         if( FD_UNLIKELY( !align_est ) ) {
    1501           0 :           TRAP( fprintf( stream, "\tlarge: gaddr %s:%lu-%s:%lu, malloc_est -, align_est -, sz_est - (bad)\n",
    1502           0 :                          wksp->name, gaddr_lo, wksp->name, gaddr_hi-1UL ) );
    1503           0 :           ctr[0]++;
    1504           0 :           break;
    1505           0 :         }
    1506           0 :       }
    1507           0 :     }
    1508          12 :   }
    1509             : 
    1510             :   /* Print summary statistics */
    1511             : 
    1512          12 :   TRAP( fprintf( stream,
    1513          12 :                  "\terrors detected       %21lu\n"
    1514          12 :                  "\tsmall alloc found     %21lu\n"
    1515          12 :                  "\twksp part cnt         %21lu\n"
    1516          12 :                  "\twksp used             %21lu\n"
    1517          12 :                  "\tlarge alloc cnt       %21lu\n"
    1518          12 :                  "\tlarge alloc wksp used %21lu\n",
    1519          12 :                  ctr[0], ctr[1], ctr[2], ctr[3], ctr[4], ctr[5] ) );
    1520             : 
    1521          12 :   return cnt;
    1522          12 : }
    1523             : 
    1524             : /* Virtual function table
    1525             :    TODO type pun functions instead of using virtual wrappers? */
    1526             : 
    1527             : static void *
    1528             : fd_alloc_malloc_virtual( void * self,
    1529             :                          ulong  align,
    1530      473115 :                          ulong  sz ) {
    1531      473115 :   return fd_alloc_malloc( (fd_alloc_t *)self, align, sz );
    1532      473115 : }
    1533             : 
    1534             : static void
    1535             : fd_alloc_free_virtual( void * self,
    1536     1538184 :                        void * addr ) {
    1537     1538184 :   fd_alloc_free( (fd_alloc_t *)self, addr );
    1538     1538184 : }
    1539             : 
    1540             : const fd_valloc_vtable_t
    1541             : fd_alloc_vtable = {
    1542             :   .malloc = fd_alloc_malloc_virtual,
    1543             :   .free   = fd_alloc_free_virtual
    1544             : };
    1545             : 
    1546             : #undef TRAP

Generated by: LCOV version 1.14