LCOV - code coverage report
Current view: top level - util/shmem - fd_shmem.h (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 14 22 63.6 %
Date: 2024-11-13 11:58:15 Functions: 4 1914 0.2 %

          Line data    Source code
       1             : #ifndef HEADER_fd_src_util_shmem_fd_shmem_h
       2             : #define HEADER_fd_src_util_shmem_fd_shmem_h
       3             : 
       4             : /* APIs for NUMA aware and page size aware manipulation of complex
       5             :    interprocess shared memory topologies.  This API is designed to
       6             :    interoperate with the fd_shmem_cfg command and control script for
       7             :    host configuration.  fd must be booted to use the APIs in this
       8             :    module. */
       9             : 
      10             : #include "../log/fd_log.h"
      11             : 
      12             : /* FD_SHMEM_JOIN_MAX gives the maximum number of unique fd shmem regions
      13             :    that can be in mapped concurrently into the thread group's local
      14             :    address space.  Should be positive.  Powers of two minus 1 have good
      15             :    Feng Shui but this is not strictly required. */
      16             : 
      17             : #define FD_SHMEM_JOIN_MAX (255UL)
      18             : 
      19             : /* FD_SHMEM_JOIN_MODE_* are used to specify how a memory region should
      20             :    be initially mapped into the thread group's local address space by
      21             :    fd_shmem_join. */
      22             : 
      23         195 : #define FD_SHMEM_JOIN_MODE_READ_ONLY   (0)
      24      111711 : #define FD_SHMEM_JOIN_MODE_READ_WRITE  (1)
      25             : 
      26             : /* FD_SHMEM_{NUMA,CPU}_MAX give the maximum number of numa nodes and
      27             :    logical cpus supported by fd_shmem.
      28             :    FD_SHMEM_CPU_MAX>=FD_SHMEM_NUMA_MAX>0. */
      29             : 
      30        1050 : #define FD_SHMEM_NUMA_MAX (1024UL)
      31        1140 : #define FD_SHMEM_CPU_MAX  (1024UL)
      32             : 
      33             : /* FD_SHMEM_{UNKNOWN,NORMAL,HUGE,GIGANTIC}_{PAGE_LG_SZ,PAGE_SZ} give the
      34             :    log2 page size / page size on a hosted x86 target.  These are
      35             :    explicit to workaround various compiler limitations in common use
      36             :    cases. */
      37             : 
      38           0 : #define FD_SHMEM_UNKNOWN_LG_PAGE_SZ  (-1)
      39           0 : #define FD_SHMEM_NORMAL_LG_PAGE_SZ   (12)
      40           0 : #define FD_SHMEM_HUGE_LG_PAGE_SZ     (21)
      41           0 : #define FD_SHMEM_GIGANTIC_LG_PAGE_SZ (30)
      42             : 
      43           0 : #define FD_SHMEM_UNKNOWN_PAGE_SZ           (0UL)
      44        6198 : #define FD_SHMEM_NORMAL_PAGE_SZ         (4096UL)
      45        6636 : #define FD_SHMEM_HUGE_PAGE_SZ        (2097152UL)
      46       11253 : #define FD_SHMEM_GIGANTIC_PAGE_SZ (1073741824UL)
      47             : 
      48             : /* FD_SHMEM_NAME_MAX gives the maximum number of bytes needed to hold
      49             :    the cstr with the name fd_shmem region.  That is, a valid fd_shmem
      50             :    region name will have a strlen in [1,FD_SHMEM_NAME_MAX).  (Harmonized
      51             :    with FD_LOG_NAME_MAX but this is not strictly required.) */
      52             : 
      53      333267 : #define FD_SHMEM_NAME_MAX FD_LOG_NAME_MAX
      54             : 
      55             : /* FD_SHMEM_PAGE_SZ_CSTR_MAX is the size of a buffer large enough to
      56             :    hold an shmem page sz cstr (==strlen("gigantic")+1). */
      57             : 
      58             : #define FD_SHMEM_PAGE_SZ_CSTR_MAX (9UL)
      59             : 
      60             : /* fd_shmem_private_key_t is for internal use (tmpl/fd_map
      61             :    interoperability). */
      62             : 
      63             : struct fd_shmem_private_key {
      64             :   char cstr[ FD_SHMEM_NAME_MAX ];
      65             : };
      66             : 
      67             : typedef struct fd_shmem_private_key fd_shmem_private_key_t;
      68             : 
      69             : /* A fd_shmem_join_info_t used by various APIs to provide low level
      70             :    details about a join. */
      71             : 
      72             : struct fd_shmem_join_info {
      73             :   long   ref_cnt;  /* Number of joins, -1L indicates a join/leave is in progress.
      74             :                       Will be -1 the join is in join/leave func and positive otherwise. */
      75             :   void * join;     /* Local join handle (i.e. what join_func returned).  Will be NULL in a call join func. */
      76             :   void * shmem;    /* Location in the thread group local address space of name.  Will be non-NULL and page_sz aligned. */
      77             :   ulong  page_sz;  /* Page size used for the region.  Will be a supported page size (e.g. non-zero integer power-of-two) */
      78             :   ulong  page_cnt; /* Number of pages in the region.  Will be non-zero, page_sz*page_cnt will not overflow */
      79             :   int    mode;     /* Will be in FD_SHMEM_JOIN_MODE_{READ_ONLY,READ_WRITE}.  Attempting to execute and (if read-only) write in the
      80             :                       shmem region will fault the thread group. */
      81             :   uint   hash;     /* Will be (uint)fd_hash( 0UL, name, FD_SHMEM_NAME_MAX ) */
      82             :   union {
      83             :     char                   name[ FD_SHMEM_NAME_MAX ]; /* cstr with the region name at join time (guaranteed '\0' terminated) */
      84             :     fd_shmem_private_key_t key;                       /* For easy interoperability tmpl/fd_map.h */
      85             :   };
      86             : };
      87             : 
      88             : typedef struct fd_shmem_join_info fd_shmem_join_info_t;
      89             : 
      90             : /* A fd_shmem_joinleave_func_t is optionally used by fd_shmem_join /
      91             :    fd_shmem_leave to wrap / unwrap a shared memory region with
      92             :    additional thread group local context when it is mapped / unmapped. */
      93             : 
      94             : typedef void *
      95             : (*fd_shmem_joinleave_func_t)( void *                       context,
      96             :                               fd_shmem_join_info_t const * join_info );
      97             : 
      98             : /* A fd_shmem_info_t used by various APIs to provide low level details
      99             :    of a shared memory region. */
     100             : 
     101             : struct fd_shmem_info {
     102             :   ulong page_sz;  /* page size of the region, will be a supported page size (e.g. non-zero, integer power of two) */
     103             :   ulong page_cnt; /* number of pages in the region, will be positive, page_sz*page_cnt will not overflow */
     104             : };
     105             : 
     106             : typedef struct fd_shmem_info fd_shmem_info_t;
     107             : 
     108             : FD_PROTOTYPES_BEGIN
     109             : 
     110             : /* User APIs **********************************************************/
     111             : 
     112             : /* fd_shmem_{join,leave} joins/leaves the caller to/from a named fd
     113             :    shared memory region.
     114             : 
     115             :    It is very convenient to be able to join the same region multiple
     116             :    times within a thread group.  And it is safe and reasonably efficient
     117             :    to do so (O(1) but neither lockfree nor ultra HPC).  To facilitate
     118             :    this, when a join requires mapping the region into the thread group's
     119             :    local address space (e.g. the first join to the region in the thread
     120             :    group), this will try to discover the page size that is backing the
     121             :    region (if there multiple regions with same name, this will try to
     122             :    join the one backed by the largest page size).  Then the region is
     123             :    mapped into the address appropriately for the given access mode
     124             :    (FD_SHMEM_JOIN_MODE_{READ_ONLY,READ_WRITE}).  Lastly, any user
     125             :    provided fd_shmem_join_func_t is called on the mapping.
     126             : 
     127             :    A fd_shmem_join_func_t is meant to do any additional local address
     128             :    translations and what not as a one-time upfront cost on behalf of all
     129             :    subsequent joins.  It is called if the underlying shared memory needs
     130             :    to be mapped into the thread group's address space and ignored
     131             :    otherwise.  The input to a join_func is a pointer to any user context
     132             :    (i.e. the context passed to fd_shmem_join) and a pointer to
     133             :    information about the region (lifetime is the duration of the call
     134             :    and should not be assumed to be longer).
     135             : 
     136             :    On success, a join_func returns the join that wraps the shmem (often
     137             :    just a shmem); it should be one-to-one with shmem (i.e. while a
     138             :    thread group is joined, name cstr / shmem / join uniquely identify
     139             :    the name cstr / shmem / join).  On failure, a join_func returns NULL
     140             :    (ideally without impacting thread group state while logging details
     141             :    about the failure).
     142             : 
     143             :    Pass NULL for join_func if no special handling is needed.  The join
     144             :    handle will be just a pointer to the first byte of the region's local
     145             :    mapping.
     146             : 
     147             :    All joins should be paired with a leave.
     148             : 
     149             :    On success, if opt_info is non-NULL, *opt_info will also provide
     150             :    additional details about the join (i.e. the same details one would
     151             :    get if querying the join atomically with respect to join operations
     152             :    immediately afterward).  On failure, *opt_info is ignored.
     153             : 
     154             :    fd_shmem_leave is just the inverse of this.  It can fail for a few
     155             :    reasons, including if the mmap cannot be close()'d for any reason.
     156             :    IT will log extensive details if there is any wonkiness under the
     157             :    hood.  The caller may wish to proceed even if it fails.
     158             : 
     159             :    IMPORTANT!  It is safe to have join/leave functions themselves call
     160             :    fd_shmem_join/fd_shmem_leave to join additional regions as necessary.
     161             :    This allows very complex interdependent shared memory topologies to
     162             :    be constructed in a natural way.  The only restriction (beyond the
     163             :    total number of regions that can be joined) is that there can't be
     164             :    join/leave cycles (e.g. fd_shmem_join("region1") calls join_func
     165             :    region1_join("region1") which calls fd_shmem_join("region2") which
     166             :    calls join_func region2_join("region2") which calls
     167             :    shmem_join("region1")).  Such cycles will be detected, logged and
     168             :    failed. */
     169             : 
     170             : void *
     171             : fd_shmem_join( char const *               name,
     172             :                int                        mode,
     173             :                fd_shmem_joinleave_func_t  join_func,
     174             :                void *                     context,
     175             :                fd_shmem_join_info_t *     opt_info );
     176             : 
     177             : int
     178             : fd_shmem_leave( void *                    join,
     179             :                 fd_shmem_joinleave_func_t leave_func,
     180             :                 void *                    context );
     181             : 
     182             : /* FIXME: CONSIDER OPTION FOR SLIGHTLY MORE ALGO EFFICIENT LEAVE BY NAME
     183             :    VARIANT? */
     184             : 
     185             : /* fd_shmem_join_query_by_{name,join,addr} queries if the cstr pointed
     186             :    by name is already joined by the caller's thread group / the join
     187             :    handle is a valid current join handle / [addr,addr+sz-1] overlaps (at
     188             :    least partially) with a shared memory region of a current join.
     189             : 
     190             :    On success, returns 0 and, if opt_info non-NULL, *opt_info will hold
     191             :    details about the join (as observed at a point between when the call
     192             :    was made and when it returned).  On failure, returns a non-zero
     193             :    strerror friendly error code (these do not log anything so they can
     194             :    be use in situations where the query might fail in normal operation
     195             :    without being excessively chatty in the log).  Reasons for failure
     196             :    include name is not valid (EINVAL) and there is no join currently
     197             :    (ENOENT).
     198             : 
     199             :    For query_by_addr, returns ENOENT if sz is 0 (no overlap with an
     200             :    empty set) and EINVAL if the address range wraps around the end of
     201             :    address space.  If there are multiple joins overlapped by the range,
     202             :    returns 0 and, if opt_info is non-NULL, *opt_info will have details
     203             :    about one of the joins (it is undefined which join).  Note it is
     204             :    impossible for a range to overlap multiple joins when sz==1.
     205             : 
     206             :    query by name is a reasonably fast O(1).  query by join and by addr
     207             :    are theoretically O(FD_SHMEM_JOIN_MAX) but still quite fast
     208             :    practically. */
     209             : 
     210             : int
     211             : fd_shmem_join_query_by_name( char const *           name,
     212             :                              fd_shmem_join_info_t * opt_info );
     213             : 
     214             : int
     215             : fd_shmem_join_query_by_join( void const *           join,
     216             :                              fd_shmem_join_info_t * opt_info );
     217             : 
     218             : int
     219             : fd_shmem_join_query_by_addr( void const *           addr,
     220             :                              ulong                  sz,
     221             :                              fd_shmem_join_info_t * opt_info );
     222             : 
     223             : /* fd_shmem_join_anonymous treats with region pointed to by mem (which
     224             :    must be non-NULL with page_sz alignment and page_sz*page_cnt
     225             :    footprint) as a shmem join with the local join handle join, cstr name
     226             :    and mode.
     227             : 
     228             :    Other code in the thread group can fd_shmem_join( name, ... ) as
     229             :    though the fd_shmem_join_anonymous was done for the mapping join for
     230             :    name in the thread group.  This is useful to allow memory regions
     231             :    procured out-of-band (e.g. a private anonymous mmap, interfacing with
     232             :    custom hardware that provides its own functions for getting access to
     233             :    its memory, etc) as a normal join.
     234             : 
     235             :    Returns 0 on failure and a strerror friendly error code on failure
     236             :    (logs details).  Reasons for failure include EINVAL: bad name (NULL /
     237             :    too short / too long / bad characters / already joined), bad join
     238             :    (NULL join / already joined), bad mem (NULL mem / unaligned mem /
     239             :    already joined), unsupported page_sz, zero page cnt, unsupported mode
     240             :    (not FD_SHMEM_JOIN_MODE_{READ_ONLY,READ_WRITE}.
     241             : 
     242             :    This will shadow any named shared memory region in the calling thread
     243             :    group (but not other thread groups).
     244             : 
     245             :    fd_shmem_leave_anonymous is just the inverse of this.  Returns 0 on
     246             :    success and a non-zero strerror friendly error code on failure (logs
     247             :    details on failure).  On success, if opt_info is non-NULL, *opt_info
     248             :    will contain details about the former join (e.g. determine details
     249             :    like the original name, mode, mem, page_sz and page_cnt of the join,
     250             :    ... opt_info->ref_cnt will be zero).  It is untouched otherwise.
     251             :    Reasons for failure include EINVAL: join is obviously not an
     252             :    anonymous join with a reference count of 1.
     253             : 
     254             :    IMPORTANT!  The join will have a ref cnt of 1 on return from
     255             :    join_anonymous.  The final leave of something joined by
     256             :    fd_shmem_join_anonymous should done only by fd_shmem_leave_anonymous.
     257             :    Conversely, fd_shmem_leave_anonymous should only be used for the
     258             :    final leave of any anonymous join. */
     259             : 
     260             : int
     261             : fd_shmem_join_anonymous( char const * name,
     262             :                          int          mode,
     263             :                          void *       join,
     264             :                          void *       mem,
     265             :                          ulong        page_sz,
     266             :                          ulong        page_cnt );
     267             : 
     268             : int
     269             : fd_shmem_leave_anonymous( void *                 join,
     270             :                           fd_shmem_join_info_t * opt_info );
     271             : 
     272             : /* Administrative APIs ************************************************/
     273             : 
     274             : /* Numa topology API */
     275             : 
     276             : /* fd_shmem_{numa,cpu}_cnt returns the number of numa nodes / logical
     277             :    cpus configured in system.  numa nodes are indexed in
     278             :    [0,fd_shmem_numa_cnt()) where fd_shmem_numa_cnt() is in
     279             :    [1,FD_SHMEM_NUMA_MAX] and similarly for logical cpus.  This value is
     280             :    determined at thread group boot.  cpu_cnt>=numa_cnt. */
     281             : 
     282             : FD_FN_PURE ulong fd_shmem_numa_cnt( void );
     283             : FD_FN_PURE ulong fd_shmem_cpu_cnt ( void );
     284             : 
     285             : /* fd_shmem_numa_idx returns the closest numa node to the given logical
     286             :    cpu_idx.  Given a cpu_idx in [0,fd_shmem_cpu_cnt()), returns a value
     287             :    in [0,fd_shmem_numa_cnt()).  Returns ULONG_MAX otherwise.  The cpu ->
     288             :    numa mapping is determined at thread group boot. */
     289             : 
     290             : FD_FN_PURE ulong fd_shmem_numa_idx( ulong cpu_idx );
     291             : 
     292             : /* fd_shmem_cpu_idx returns the smallest cpu_idx of a cpu close to
     293             :    numa_idx.  Given a numa_idx in [0,fd_shmem_numa_cnt()), returns a
     294             :    value in [0,fd_shmem_cpu_cnt()).  Returns ULONG_MAX otherwise.  The
     295             :    numa -> cpu mapping is determined at thread group boot. */
     296             : 
     297             : FD_FN_PURE ulong fd_shmem_cpu_idx( ulong numa_idx );
     298             : 
     299             : /* fd_shmem_numa_validate returns 0 if all the pages in the page_cnt
     300             :    page_sz pages pointed to by mem are on a numa node near cpu_idx and a
     301             :    strerror friendly non-zero error code otherwise (logs details).
     302             :    Pages in mem will be queried (potentially non-atomically) over some
     303             :    point in time between when the call was made and when the call
     304             :    returns. */
     305             : 
     306             : int
     307             : fd_shmem_numa_validate( void const * mem,
     308             :                         ulong        page_sz,
     309             :                         ulong        page_cnt,
     310             :                         ulong        cpu_idx );
     311             : 
     312             : /* Creation/destruction APIs */
     313             : 
     314             : /* fd_shmem_create_multi creates a shared memory region whose name is
     315             :    given by the cstr pointed to by name backed by page_sz pages.  The
     316             :    region will consist of sub_cnt subregions, indexed [0,sub_cnt).  Each
     317             :    subregion will have page_cnt pages near cpu_idx and the region will
     318             :    be the concatenation of these subregions in the order specified.
     319             :    mode specifies the permissions for this region (the usual POSIX open
     320             :    umask caveats apply).
     321             : 
     322             :    Returns 0 on success and an strerror friendly error code on failure
     323             :    (also logs extensive details on error).  Reasons for failure include
     324             :    name is invalid (EINVAL), page_sz is invalid (EINVAL), page_cnt is
     325             :    zero (EINVAL), cnt*page_sz overflows an off_t (EINVAL), open fails
     326             :    (errno of the open, e.g. region with the same name and page_sz in the
     327             :    thread domain already exists), ftruncate fails (errno of ftruncate,
     328             :    e.g. no suitable memory available near cpu_idx), etc.
     329             : 
     330             :    Note that each page_sz has its own namespace.  As such, names are
     331             :    unique over caller's shared memory domain for a given page_sz.  Names
     332             :    can be reused between two different page_sz (and such will correspond
     333             :    to two unrelated mappings).  Generally, it is a good idea to have
     334             :    unique names over all page_sz but this is not strictly required (the
     335             :    APIs may not work particularly well in this case though).
     336             : 
     337             :    fd_shmem_create is a simple wrapper around fd_shmem_create_multi for
     338             :    applications that just want to create a shared memory region that
     339             :    contains only 1 subregion. */
     340             : 
     341             : int                                                /* 0 on success, strerror compatible error code on failure */
     342             : fd_shmem_create_multi( char const *  name,         /* Should point to cstr with a valid name for a shared memory region */
     343             :                        ulong         page_sz,      /* Should be a FD_SHMEM_{NORMAL,HUGE,GIGANTIC}_PAGE_SZ */
     344             :                        ulong         sub_cnt,      /* Should be positive */
     345             :                        ulong const * sub_page_cnt, /* Indexed [0,sub_cnt), 0 < sum(page_cnt)*page_sz <= ULONG_MAX */
     346             :                        ulong const * sub_cpu_idx,  /* Indexed [0,sub_cnt), each should be in [0,fd_shmem_cpu_cnt()) */
     347             :                        ulong         mode );       /* E.g. 0660 for user rw, group rw, world none */
     348             : 
     349             : /* fd_shmem_update_multi updates a shared memory region created by
     350             :    fd_shmem_create_multi in place, to be as-if it was created with
     351             :    the provided parameters instead.
     352             :    
     353             :    This can be preferable to deleting and recreating the shmem region
     354             :    because it prevents needing to zero all of the underlying memory.
     355             :    
     356             :    WARNING: The memory returned will not be zeroed and the user will
     357             :    be able to read any contents that were in the previous workspace. */
     358             : 
     359             : int
     360             : fd_shmem_update_multi( char const *  name,
     361             :                        ulong         page_sz,
     362             :                        ulong         sub_cnt,
     363             :                        ulong const * sub_page_cnt,
     364             :                        ulong const * sub_cpu_idx,
     365             :                        ulong         mode );
     366             : 
     367             : static inline int
     368             : fd_shmem_create( char const * name,
     369             :                  ulong        page_sz,
     370             :                  ulong        page_cnt,
     371             :                  ulong        cpu_idx,
     372           0 :                  ulong        mode ) {
     373           0 :   return fd_shmem_create_multi( name, page_sz, 1UL, &page_cnt, &cpu_idx, mode );
     374           0 : }
     375             : 
     376             : /* fd_shmem_unlink removes the name of the page_sz backed shared memory
     377             :    region in the thread group's shared memory domain such that it can no
     378             :    longer be mapped into a thread group's address space.  The pages used
     379             :    for that region will be freed once there are no longer in use by any
     380             :    existing thread group.
     381             : 
     382             :    Return 0 on success and strerror friendly error code on failure (also
     383             :    logs extensive details on error).  Reasons for failure include name
     384             :    is invalid (EINVAL), page_sz is invalid (EINVAL), unlink failed
     385             :    (error of the unlink, e.g. there is no region backed by page_sz pages
     386             :    in the thread group's shared memory domain currently with that name),
     387             :    etc. */
     388             : 
     389             : int
     390             : fd_shmem_unlink( char const * name,
     391             :                  ulong        page_sz );
     392             : 
     393             : /* fd_shmem_info returns info about the given page_sz backed shared
     394             :    memory region in the thread groups' shared memory domain.  If the
     395             :    page_sz is zero, the page size will be discovered.  If there are
     396             :    multiple regions with different page sizes but the same name, the
     397             :    region backed by the largest (non-atomic) page size will be queried.
     398             : 
     399             :    Returns 0 on success and a strerror friendly error code on failure
     400             :    (logs extensive details on error with the exception of ENOENT / there
     401             :    is no region with that name so that existence checks can be done
     402             :    without generating excessive log chatter).  Reasons for failure
     403             :    include name is invalid (EINVAL), page_sz is invalid (EINVAL), open
     404             :    failed (error of the open, e.g. there is no region), stat failed
     405             :    (error of the stat) or the mounts have been corrupted (EFAULT).
     406             : 
     407             :    On success, if opt_buf is non-NULL, *opt_buf will contain additional
     408             :    metadata about the region as observed at some point between when the
     409             :    call was made and when it returned.  On failure, *opt_buf not be
     410             :    touched. */
     411             : 
     412             : int
     413             : fd_shmem_info( char const *      name,
     414             :                ulong             page_sz,
     415             :                fd_shmem_info_t * opt_info );
     416             : 
     417             : /* Raw page allocation */
     418             : 
     419             : /* fd_shmem_acquire_multi acquires the page_sz pages to create a memory
     420             :    region for the private use of the caller's thread group.  The region
     421             :    will consist of sub_cnt subregions, indexed [0,sub_cnt).  Each
     422             :    subregion will have page_cnt pages near cpu_idx and the region will
     423             :    be the concatenation of these subregions in the order specified.
     424             :    The lifetime of a page in the allocation is until the thread group
     425             :    terminates or the page is explicitly released.  Returns a pointer to
     426             :    the location in the local address space of the mapped pages on
     427             :    success and NULL on failure (logs details).  Reasons for failure
     428             :    include page_sz is invalid, page_cnt is zero, cnt*page_sz overflows
     429             :    an off_t, etc.
     430             : 
     431             :    fd_shmem_acquire is a simple wrapper around fd_shmem_acquire_multi
     432             :    for applications that just want to a create a shared memory region
     433             :    that contains only 1 subregion. */
     434             : 
     435             : void *
     436             : fd_shmem_acquire_multi( ulong         page_sz,       /* Should be a FD_SHMEM_{NORMAL,HUGE,GIGANTIC}_PAGE_SZ */
     437             :                         ulong         sub_cnt,       /* Should be positive */
     438             :                         ulong const * sub_page_cnt,  /* Indexed [0,sub_cnt), 0 < sum(page_cnt)*page_sz <= ULONG_MAX */
     439             :                         ulong const * sub_cpu_idx ); /* Indexed [0,sub_cnt), each should be in [0,fd_shmem_cpu_cnt()) */
     440             : 
     441             : static inline void *
     442             : fd_shmem_acquire( ulong page_sz,
     443             :                   ulong page_cnt,
     444          36 :                   ulong cpu_idx ) {
     445          36 :   return fd_shmem_acquire_multi( page_sz, 1UL, &page_cnt, &cpu_idx );
     446          36 : }
     447             : 
     448             : /* fd_shmem_release releases page_cnt page_sz pages of memory allocated
     449             :    by fd_shmem_acquire.  This always succeeds from the caller's POV but
     450             :    logs details if there is any wonkiness under the hood.  It is fine to
     451             :    release subregions of individual previous acquisitions.
     452             :    
     453             :    Returns 0 if successful, -1 for any errors. */
     454             : 
     455             : int
     456             : fd_shmem_release( void * mem,
     457             :                   ulong  page_sz,
     458             :                   ulong  page_cnt );
     459             : 
     460             : /* Parsing APIs */
     461             : 
     462             : /* fd_shmem_name_len:  If name points at a cstr holding a valid name,
     463             :    returns strlen( name ) (which is guaranteed to be in
     464             :    [1,FD_SHMEM_NAME_MAX)).  Returns 0 otherwise (e.g. name is NULL, name
     465             :    is too short, name is too long, name contains characters other than
     466             :    [0-9,A-Z,a-z,'_','-','.'], name doesn't start with a [0-9,A-Z,a-z],
     467             :    etc). */
     468             : 
     469             : FD_FN_PURE ulong fd_shmem_name_len( char const * name );
     470             : 
     471             : /* fd_shmem_page_sz_valid:  Returns 1 if page_sz is a valid page size
     472             :    or 0 otherwise. */
     473             : 
     474             : FD_FN_CONST static inline int
     475        4947 : fd_shmem_is_page_sz( ulong page_sz ) {
     476        4947 :   return (page_sz==FD_SHMEM_NORMAL_PAGE_SZ) | (page_sz==FD_SHMEM_HUGE_PAGE_SZ) | (page_sz==FD_SHMEM_GIGANTIC_PAGE_SZ);
     477        4947 : }
     478             : 
     479             : /* fd_cstr_to_shmem_lg_page_sz:  Convert a cstr pointed to by cstr to
     480             :    a shmem log2 page size (guaranteed to be one of
     481             :    FD_SHMEM_*_LG_PAGE_SZ) via case insensitive comparison with various
     482             :    tokens and (if none match) fd_cstr_to_int.  Returns
     483             :    FD_SHMEM_UNKNOWN_LG_PAGE_SZ (-1 ... the only negative return
     484             :    possible) if it can't figure this out. */
     485             : 
     486             : FD_FN_PURE int
     487             : fd_cstr_to_shmem_lg_page_sz( char const * cstr );
     488             : 
     489             : /* fd_shmem_lg_page_sz_to_cstr:  Return a pointer to a cstr
     490             :    corresponding to a shmem log2 page sz.  The pointer is guaranteed to
     491             :    be non-NULL with an infinite lifetime.  If lg_page_sz is not a valid
     492             :    shmem log2 page size, the cstr will be "unknown".  Otherwise, the
     493             :    returned cstr is guaranteed to be compatible with
     494             :    fd_cstr_to_shmem_lg_page_sz / fd_cstr_to_shmem_page_sz.  strlen of
     495             :    the returned result will be in in [1,FD_SHMEM_PAGE_SZ_CSTR_MAX]. */
     496             : 
     497             : FD_FN_CONST char const *
     498             : fd_shmem_lg_page_sz_to_cstr( int lg_page_sz );
     499             : 
     500             : /* fd_cstr_to_shmem_page_sz:  Convert a cstr pointed to by cstr to a
     501             :    shmem page size (guaranteed to be one of the FD_SHMEM_*_PAGE_SZ
     502             :    values) via case insensitive comparison with various token and (if
     503             :    non match) via fd_cstr_to_ulong.  Returns FD_SHMEM_UNKNOWN_PAGE_SZ
     504             :    (0UL, the only non-integral power of 2 return possible) if it can't
     505             :    figure this out. */
     506             : 
     507             : FD_FN_PURE ulong
     508             : fd_cstr_to_shmem_page_sz( char const * cstr );
     509             : 
     510             : /* fd_shmem_page_sz_to_cstr:  Return a pointer to a cstr corresponding
     511             :    to a shmem page sz.  The pointer is guaranteed to be non-NULL with an
     512             :    infinite lifetime.  If page_sz is not a valid shmem page size, the
     513             :    cstr will be "unknown".  Otherwise, the returned cstr is guaranteed
     514             :    to be compatible with fd_cstr_to_shmem_lg_page_sz /
     515             :    fd_cstr_to_shmem_page_sz.  strlen of the returned result in
     516             :    [1,FD_SHMEM_PAGE_SZ_CSTR_MAX].  */
     517             : 
     518             : FD_FN_CONST char const *
     519             : fd_shmem_page_sz_to_cstr( ulong page_sz );
     520             : 
     521             : /* These functions are for fd_shmem internal use only. */
     522             : 
     523             : void
     524             : fd_shmem_private_boot( int *    pargc,
     525             :                        char *** pargv );
     526             : 
     527             : void
     528             : fd_shmem_private_halt( void );
     529             : 
     530             : FD_PROTOTYPES_END
     531             : 
     532             : #endif /* HEADER_fd_src_util_shmem_fd_shmem_h */
     533             : 

Generated by: LCOV version 1.14