Line data Source code
1 : #ifndef HEADER_fd_src_util_shmem_fd_shmem_h
2 : #define HEADER_fd_src_util_shmem_fd_shmem_h
3 :
4 : /* APIs for NUMA aware and page size aware manipulation of complex
5 : interprocess shared memory topologies. This API is designed to
6 : interoperate with the fd_shmem_cfg command and control script for
7 : host configuration. fd must be booted to use the APIs in this
8 : module. */
9 :
10 : #include "../log/fd_log.h"
11 :
12 : /* FD_SHMEM_JOIN_MAX gives the maximum number of unique fd shmem regions
13 : that can be in mapped concurrently into the thread group's local
14 : address space. Should be positive. Powers of two minus 1 have good
15 : Feng Shui but this is not strictly required. */
16 :
17 : #define FD_SHMEM_JOIN_MAX (255UL)
18 :
19 : /* FD_SHMEM_JOIN_MODE_* are used to specify how a memory region should
20 : be initially mapped into the thread group's local address space by
21 : fd_shmem_join. */
22 :
23 6 : #define FD_SHMEM_JOIN_MODE_READ_ONLY (0)
24 4416 : #define FD_SHMEM_JOIN_MODE_READ_WRITE (1)
25 :
26 : /* FD_SHMEM_{NUMA,CPU}_MAX give the maximum number of numa nodes and
27 : logical cpus supported by fd_shmem.
28 : FD_SHMEM_CPU_MAX>=FD_SHMEM_NUMA_MAX>0. */
29 :
30 318 : #define FD_SHMEM_NUMA_MAX (1024UL)
31 1275 : #define FD_SHMEM_CPU_MAX (1024UL)
32 :
33 : /* FD_SHMEM_{UNKNOWN,NORMAL,HUGE,GIGANTIC}_{PAGE_LG_SZ,PAGE_SZ} give the
34 : log2 page size / page size on a hosted x86 target. These are
35 : explicit to workaround various compiler limitations in common use
36 : cases. */
37 :
38 0 : #define FD_SHMEM_UNKNOWN_LG_PAGE_SZ (-1)
39 3 : #define FD_SHMEM_NORMAL_LG_PAGE_SZ (12)
40 0 : #define FD_SHMEM_HUGE_LG_PAGE_SZ (21)
41 0 : #define FD_SHMEM_GIGANTIC_LG_PAGE_SZ (30)
42 :
43 27 : #define FD_SHMEM_UNKNOWN_PAGE_SZ (0UL)
44 3693 : #define FD_SHMEM_NORMAL_PAGE_SZ (4096UL)
45 3819 : #define FD_SHMEM_HUGE_PAGE_SZ (2097152UL)
46 9564 : #define FD_SHMEM_GIGANTIC_PAGE_SZ (1073741824UL)
47 :
48 : /* FD_SHMEM_NAME_MAX gives the maximum number of bytes needed to hold
49 : the cstr with the name fd_shmem region. That is, a valid fd_shmem
50 : region name will have a strlen in [1,FD_SHMEM_NAME_MAX). (Harmonized
51 : with FD_LOG_NAME_MAX but this is not strictly required.) */
52 :
53 8646 : #define FD_SHMEM_NAME_MAX FD_LOG_NAME_MAX
54 :
55 : /* FD_SHMEM_PAGE_SZ_CSTR_MAX is the size of a buffer large enough to
56 : hold an shmem page sz cstr (==strlen("gigantic")+1). */
57 :
58 : #define FD_SHMEM_PAGE_SZ_CSTR_MAX (9UL)
59 :
60 : /* fd_shmem_private_key_t is for internal use (tmpl/fd_map
61 : interoperability). */
62 :
63 : struct fd_shmem_private_key {
64 : char cstr[ FD_SHMEM_NAME_MAX ];
65 : };
66 :
67 : typedef struct fd_shmem_private_key fd_shmem_private_key_t;
68 :
69 : /* A fd_shmem_join_info_t used by various APIs to provide low level
70 : details about a join. */
71 :
72 : struct fd_shmem_join_info {
73 : long ref_cnt; /* Number of joins, -1L indicates a join/leave is in progress.
74 : Will be -1 the join is in join/leave func and positive otherwise. */
75 : void * join; /* Local join handle (i.e. what join_func returned). Will be NULL in a call join func. */
76 : void * shmem; /* Location in the thread group local address space of name. Will be non-NULL and page_sz aligned. */
77 : ulong page_sz; /* Page size used for the region. Will be a supported page size (e.g. non-zero integer power-of-two) */
78 : ulong page_cnt; /* Number of pages in the region. Will be non-zero, page_sz*page_cnt will not overflow */
79 : int mode; /* Will be in FD_SHMEM_JOIN_MODE_{READ_ONLY,READ_WRITE}. Attempting to execute and (if read-only) write in the
80 : shmem region will fault the thread group. */
81 : uint hash; /* Will be (uint)fd_hash( 0UL, name, FD_SHMEM_NAME_MAX ) */
82 : union {
83 : char name[ FD_SHMEM_NAME_MAX ]; /* cstr with the region name at join time (guaranteed '\0' terminated) */
84 : fd_shmem_private_key_t key; /* For easy interoperability tmpl/fd_map.h */
85 : };
86 : };
87 :
88 : typedef struct fd_shmem_join_info fd_shmem_join_info_t;
89 :
90 : /* A fd_shmem_joinleave_func_t is optionally used by fd_shmem_join /
91 : fd_shmem_leave to wrap / unwrap a shared memory region with
92 : additional thread group local context when it is mapped / unmapped. */
93 :
94 : typedef void *
95 : (*fd_shmem_joinleave_func_t)( void * context,
96 : fd_shmem_join_info_t const * join_info );
97 :
98 : /* A fd_shmem_info_t used by various APIs to provide low level details
99 : of a shared memory region. */
100 :
101 : struct fd_shmem_info {
102 : ulong page_sz; /* page size of the region, will be a supported page size (e.g. non-zero, integer power of two) */
103 : ulong page_cnt; /* number of pages in the region, will be positive, page_sz*page_cnt will not overflow */
104 : };
105 :
106 : typedef struct fd_shmem_info fd_shmem_info_t;
107 :
108 : FD_PROTOTYPES_BEGIN
109 :
110 : /* User APIs **********************************************************/
111 :
112 : /* fd_shmem_{join,leave} joins/leaves the caller to/from a named fd
113 : shared memory region.
114 :
115 : It is very convenient to be able to join the same region multiple
116 : times within a thread group. And it is safe and reasonably efficient
117 : to do so (O(1) but neither lockfree nor ultra HPC). To facilitate
118 : this, when a join requires mapping the region into the thread group's
119 : local address space (e.g. the first join to the region in the thread
120 : group), this will try to discover the page size that is backing the
121 : region (if there multiple regions with same name, this will try to
122 : join the one backed by the largest page size). Then the region is
123 : mapped into the address appropriately for the given access mode
124 : (FD_SHMEM_JOIN_MODE_{READ_ONLY,READ_WRITE}). Lastly, any user
125 : provided fd_shmem_join_func_t is called on the mapping.
126 :
127 : A fd_shmem_join_func_t is meant to do any additional local address
128 : translations and what not as a one-time upfront cost on behalf of all
129 : subsequent joins. It is called if the underlying shared memory needs
130 : to be mapped into the thread group's address space and ignored
131 : otherwise. The input to a join_func is a pointer to any user context
132 : (i.e. the context passed to fd_shmem_join) and a pointer to
133 : information about the region (lifetime is the duration of the call
134 : and should not be assumed to be longer).
135 :
136 : On success, a join_func returns the join that wraps the shmem (often
137 : just a shmem); it should be one-to-one with shmem (i.e. while a
138 : thread group is joined, name cstr / shmem / join uniquely identify
139 : the name cstr / shmem / join). On failure, a join_func returns NULL
140 : (ideally without impacting thread group state while logging details
141 : about the failure).
142 :
143 : Pass NULL for join_func if no special handling is needed. The join
144 : handle will be just a pointer to the first byte of the region's local
145 : mapping.
146 :
147 : All joins should be paired with a leave.
148 :
149 : On success, if opt_info is non-NULL, *opt_info will also provide
150 : additional details about the join (i.e. the same details one would
151 : get if querying the join atomically with respect to join operations
152 : immediately afterward). On failure, *opt_info is ignored.
153 :
154 : fd_shmem_leave is just the inverse of this. It can fail for a few
155 : reasons, including if the mmap cannot be close()'d for any reason.
156 : IT will log extensive details if there is any wonkiness under the
157 : hood. The caller may wish to proceed even if it fails.
158 :
159 : IMPORTANT! It is safe to have join/leave functions themselves call
160 : fd_shmem_join/fd_shmem_leave to join additional regions as necessary.
161 : This allows very complex interdependent shared memory topologies to
162 : be constructed in a natural way. The only restriction (beyond the
163 : total number of regions that can be joined) is that there can't be
164 : join/leave cycles (e.g. fd_shmem_join("region1") calls join_func
165 : region1_join("region1") which calls fd_shmem_join("region2") which
166 : calls join_func region2_join("region2") which calls
167 : shmem_join("region1")). Such cycles will be detected, logged and
168 : failed.
169 :
170 : If lock_pages is 1, the mapped region will be locked to physical DRAM
171 : when it is mapped in, ensuring that the memory pages will not be swapped
172 : out. Most callers of this function should lock their pages, unless the
173 : region is larger than the physical memory available.
174 :
175 : IMPORTANT: not locking pages can lead to unexpected behaviour and
176 : performance degradation, so is is highly recommended to lock pages. */
177 :
178 : void *
179 : fd_shmem_join( char const * name,
180 : int mode,
181 : fd_shmem_joinleave_func_t join_func,
182 : void * context,
183 : fd_shmem_join_info_t * opt_info,
184 : int lock_pages );
185 :
186 : int
187 : fd_shmem_leave( void * join,
188 : fd_shmem_joinleave_func_t leave_func,
189 : void * context );
190 :
191 : /* FIXME: CONSIDER OPTION FOR SLIGHTLY MORE ALGO EFFICIENT LEAVE BY NAME
192 : VARIANT? */
193 :
194 : /* fd_shmem_join_query_by_{name,join,addr} queries if the cstr pointed
195 : by name is already joined by the caller's thread group / the join
196 : handle is a valid current join handle / [addr,addr+sz-1] overlaps (at
197 : least partially) with a shared memory region of a current join.
198 :
199 : On success, returns 0 and, if opt_info non-NULL, *opt_info will hold
200 : details about the join (as observed at a point between when the call
201 : was made and when it returned). On failure, returns a non-zero
202 : strerror friendly error code (these do not log anything so they can
203 : be use in situations where the query might fail in normal operation
204 : without being excessively chatty in the log). Reasons for failure
205 : include name is not valid (EINVAL) and there is no join currently
206 : (ENOENT).
207 :
208 : For query_by_addr, returns ENOENT if sz is 0 (no overlap with an
209 : empty set) and EINVAL if the address range wraps around the end of
210 : address space. If there are multiple joins overlapped by the range,
211 : returns 0 and, if opt_info is non-NULL, *opt_info will have details
212 : about one of the joins (it is undefined which join). Note it is
213 : impossible for a range to overlap multiple joins when sz==1.
214 :
215 : query by name is a reasonably fast O(1). query by join and by addr
216 : are theoretically O(FD_SHMEM_JOIN_MAX) but still quite fast
217 : practically. */
218 :
219 : int
220 : fd_shmem_join_query_by_name( char const * name,
221 : fd_shmem_join_info_t * opt_info );
222 :
223 : int
224 : fd_shmem_join_query_by_join( void const * join,
225 : fd_shmem_join_info_t * opt_info );
226 :
227 : int
228 : fd_shmem_join_query_by_addr( void const * addr,
229 : ulong sz,
230 : fd_shmem_join_info_t * opt_info );
231 :
232 : /* fd_shmem_join_anonymous treats with region pointed to by mem (which
233 : must be non-NULL with page_sz alignment and page_sz*page_cnt
234 : footprint) as a shmem join with the local join handle join, cstr name
235 : and mode.
236 :
237 : Other code in the thread group can fd_shmem_join( name, ... ) as
238 : though the fd_shmem_join_anonymous was done for the mapping join for
239 : name in the thread group. This is useful to allow memory regions
240 : procured out-of-band (e.g. a private anonymous mmap, interfacing with
241 : custom hardware that provides its own functions for getting access to
242 : its memory, etc) as a normal join.
243 :
244 : Returns 0 on failure and a strerror friendly error code on failure
245 : (logs details). Reasons for failure include EINVAL: bad name (NULL /
246 : too short / too long / bad characters / already joined), bad join
247 : (NULL join / already joined), bad mem (NULL mem / unaligned mem /
248 : already joined), unsupported page_sz, zero page cnt, unsupported mode
249 : (not FD_SHMEM_JOIN_MODE_{READ_ONLY,READ_WRITE}.
250 :
251 : This will shadow any named shared memory region in the calling thread
252 : group (but not other thread groups).
253 :
254 : fd_shmem_leave_anonymous is just the inverse of this. Returns 0 on
255 : success and a non-zero strerror friendly error code on failure (logs
256 : details on failure). On success, if opt_info is non-NULL, *opt_info
257 : will contain details about the former join (e.g. determine details
258 : like the original name, mode, mem, page_sz and page_cnt of the join,
259 : ... opt_info->ref_cnt will be zero). It is untouched otherwise.
260 : Reasons for failure include EINVAL: join is obviously not an
261 : anonymous join with a reference count of 1.
262 :
263 : IMPORTANT! The join will have a ref cnt of 1 on return from
264 : join_anonymous. The final leave of something joined by
265 : fd_shmem_join_anonymous should done only by fd_shmem_leave_anonymous.
266 : Conversely, fd_shmem_leave_anonymous should only be used for the
267 : final leave of any anonymous join. */
268 :
269 : int
270 : fd_shmem_join_anonymous( char const * name,
271 : int mode,
272 : void * join,
273 : void * mem,
274 : ulong page_sz,
275 : ulong page_cnt );
276 :
277 : int
278 : fd_shmem_leave_anonymous( void * join,
279 : fd_shmem_join_info_t * opt_info );
280 :
281 : /* Administrative APIs ************************************************/
282 :
283 : /* Numa topology API */
284 :
285 : /* fd_shmem_{numa,cpu}_cnt returns the number of numa nodes / logical
286 : cpus configured in system. numa nodes are indexed in
287 : [0,fd_shmem_numa_cnt()) where fd_shmem_numa_cnt() is in
288 : [1,FD_SHMEM_NUMA_MAX] and similarly for logical cpus. This value is
289 : determined at thread group boot. cpu_cnt>=numa_cnt. */
290 :
291 : FD_FN_PURE ulong fd_shmem_numa_cnt( void );
292 : FD_FN_PURE ulong fd_shmem_cpu_cnt ( void );
293 :
294 : /* fd_shmem_numa_idx returns the closest numa node to the given logical
295 : cpu_idx. Given a cpu_idx in [0,fd_shmem_cpu_cnt()), returns a value
296 : in [0,fd_shmem_numa_cnt()). Returns ULONG_MAX otherwise. The cpu ->
297 : numa mapping is determined at thread group boot. */
298 :
299 : FD_FN_PURE ulong fd_shmem_numa_idx( ulong cpu_idx );
300 :
301 : /* fd_shmem_cpu_idx returns the smallest cpu_idx of a cpu close to
302 : numa_idx. Given a numa_idx in [0,fd_shmem_numa_cnt()), returns a
303 : value in [0,fd_shmem_cpu_cnt()). Returns ULONG_MAX otherwise. The
304 : numa -> cpu mapping is determined at thread group boot. */
305 :
306 : FD_FN_PURE ulong fd_shmem_cpu_idx( ulong numa_idx );
307 :
308 : /* fd_shmem_numa_validate returns 0 if all the pages in the page_cnt
309 : page_sz pages pointed to by mem are on a numa node near cpu_idx and a
310 : strerror friendly non-zero error code otherwise (logs details).
311 : Pages in mem will be queried (potentially non-atomically) over some
312 : point in time between when the call was made and when the call
313 : returns. */
314 :
315 : int
316 : fd_shmem_numa_validate( void const * mem,
317 : ulong page_sz,
318 : ulong page_cnt,
319 : ulong cpu_idx );
320 :
321 : /* Creation/destruction APIs */
322 :
323 : /* fd_shmem_create_multi creates a shared memory region whose name is
324 : given by the cstr pointed to by name backed by page_sz pages. The
325 : region will consist of sub_cnt subregions, indexed [0,sub_cnt). Each
326 : subregion will have page_cnt pages near cpu_idx and the region will
327 : be the concatenation of these subregions in the order specified.
328 : mode specifies the permissions for this region (the usual POSIX open
329 : umask caveats apply).
330 :
331 : Returns 0 on success and an strerror friendly error code on failure
332 : (also logs extensive details on error). Reasons for failure include
333 : name is invalid (EINVAL), page_sz is invalid (EINVAL), page_cnt is
334 : zero (EINVAL), cnt*page_sz overflows an off_t (EINVAL), open fails
335 : (errno of the open, e.g. region with the same name and page_sz in the
336 : thread domain already exists), ftruncate fails (errno of ftruncate,
337 : e.g. no suitable memory available near cpu_idx), etc.
338 :
339 : Note that each page_sz has its own namespace. As such, names are
340 : unique over caller's shared memory domain for a given page_sz. Names
341 : can be reused between two different page_sz (and such will correspond
342 : to two unrelated mappings). Generally, it is a good idea to have
343 : unique names over all page_sz but this is not strictly required (the
344 : APIs may not work particularly well in this case though).
345 :
346 : fd_shmem_create is a simple wrapper around fd_shmem_create_multi for
347 : applications that just want to create a shared memory region that
348 : contains only 1 subregion. */
349 :
350 : int /* 0 on success, strerror compatible error code on failure */
351 : fd_shmem_create_multi( char const * name, /* Should point to cstr with a valid name for a shared memory region */
352 : ulong page_sz, /* Should be a FD_SHMEM_{NORMAL,HUGE,GIGANTIC}_PAGE_SZ */
353 : ulong sub_cnt, /* Should be positive */
354 : ulong const * sub_page_cnt, /* Indexed [0,sub_cnt), 0 < sum(page_cnt)*page_sz <= ULONG_MAX */
355 : ulong const * sub_cpu_idx, /* Indexed [0,sub_cnt), each should be in [0,fd_shmem_cpu_cnt()) */
356 : ulong mode ); /* E.g. 0660 for user rw, group rw, world none */
357 :
358 : /* fd_shmem_create_multi_unlocked creates a shared memory region whose
359 : name is given by the cstr pointed to by name backed by page_sz pages.
360 : It functions the same as fd_shmem_create_multi, but the pages are not
361 : locked, not pinned to any particular numa node, and have the default numa
362 : mempolicy.
363 :
364 : mode specifies the permissions for this region (the usual POSIX open
365 : umask caveats apply).
366 :
367 : Returns 0 on success and an strerror friendly error code on failure
368 : (also logs extensive details on error). Reasons for failure include
369 : name is invalid (EINVAL), page_sz is invalid (EINVAL), page_cnt is
370 : zero (EINVAL), cnt*page_sz overflows an off_t (EINVAL), open fails
371 : (errno of the open, e.g. region with the same name and page_sz in the
372 : thread domain already exists), ftruncate fails (errno of ftruncate,
373 : e.g. no suitable memory available near cpu_idx), etc.
374 : */
375 :
376 : int
377 : fd_shmem_create_multi_unlocked( char const * name,
378 : ulong page_sz,
379 : ulong page_cnt,
380 : ulong mode );
381 :
382 : /* fd_shmem_update_multi updates a shared memory region created by
383 : fd_shmem_create_multi in place, to be as-if it was created with
384 : the provided parameters instead.
385 :
386 : This can be preferable to deleting and recreating the shmem region
387 : because it prevents needing to zero all of the underlying memory.
388 :
389 : WARNING: The memory returned will not be zeroed and the user will
390 : be able to read any contents that were in the previous workspace. */
391 :
392 : int
393 : fd_shmem_update_multi( char const * name,
394 : ulong page_sz,
395 : ulong sub_cnt,
396 : ulong const * sub_page_cnt,
397 : ulong const * sub_cpu_idx,
398 : ulong mode );
399 :
400 : static inline int
401 : fd_shmem_create( char const * name,
402 : ulong page_sz,
403 : ulong page_cnt,
404 : ulong cpu_idx,
405 0 : ulong mode ) {
406 0 : return fd_shmem_create_multi( name, page_sz, 1UL, &page_cnt, &cpu_idx, mode );
407 0 : }
408 :
409 : /* fd_shmem_unlink removes the name of the page_sz backed shared memory
410 : region in the thread group's shared memory domain such that it can no
411 : longer be mapped into a thread group's address space. The pages used
412 : for that region will be freed once there are no longer in use by any
413 : existing thread group.
414 :
415 : Return 0 on success and strerror friendly error code on failure (also
416 : logs extensive details on error). Reasons for failure include name
417 : is invalid (EINVAL), page_sz is invalid (EINVAL), unlink failed
418 : (error of the unlink, e.g. there is no region backed by page_sz pages
419 : in the thread group's shared memory domain currently with that name),
420 : etc. */
421 :
422 : int
423 : fd_shmem_unlink( char const * name,
424 : ulong page_sz );
425 :
426 : /* fd_shmem_info returns info about the given page_sz backed shared
427 : memory region in the thread groups' shared memory domain. If the
428 : page_sz is zero, the page size will be discovered. If there are
429 : multiple regions with different page sizes but the same name, the
430 : region backed by the largest (non-atomic) page size will be queried.
431 :
432 : Returns 0 on success and a strerror friendly error code on failure
433 : (logs extensive details on error with the exception of ENOENT / there
434 : is no region with that name so that existence checks can be done
435 : without generating excessive log chatter). Reasons for failure
436 : include name is invalid (EINVAL), page_sz is invalid (EINVAL), open
437 : failed (error of the open, e.g. there is no region), stat failed
438 : (error of the stat) or the mounts have been corrupted (EFAULT).
439 :
440 : On success, if opt_buf is non-NULL, *opt_buf will contain additional
441 : metadata about the region as observed at some point between when the
442 : call was made and when it returned. On failure, *opt_buf not be
443 : touched. */
444 :
445 : int
446 : fd_shmem_info( char const * name,
447 : ulong page_sz,
448 : fd_shmem_info_t * opt_info );
449 :
450 : /* Raw page allocation */
451 :
452 : /* fd_shmem_acquire_multi acquires the page_sz pages to create a memory
453 : region for the private use of the caller's thread group. The region
454 : will consist of sub_cnt subregions, indexed [0,sub_cnt). Each
455 : subregion will have page_cnt pages near cpu_idx and the region will
456 : be the concatenation of these subregions in the order specified.
457 : The lifetime of a page in the allocation is until the thread group
458 : terminates or the page is explicitly released. Returns a pointer to
459 : the location in the local address space of the mapped pages on
460 : success and NULL on failure (logs details). Reasons for failure
461 : include page_sz is invalid, page_cnt is zero, cnt*page_sz overflows
462 : an off_t, etc.
463 :
464 : fd_shmem_acquire is a simple wrapper around fd_shmem_acquire_multi
465 : for applications that just want to a create a shared memory region
466 : that contains only 1 subregion. */
467 :
468 : void *
469 : fd_shmem_acquire_multi( ulong page_sz, /* Should be a FD_SHMEM_{NORMAL,HUGE,GIGANTIC}_PAGE_SZ */
470 : ulong sub_cnt, /* Should be positive */
471 : ulong const * sub_page_cnt, /* Indexed [0,sub_cnt), 0 < sum(page_cnt)*page_sz <= ULONG_MAX */
472 : ulong const * sub_cpu_idx ); /* Indexed [0,sub_cnt), each should be in [0,fd_shmem_cpu_cnt()) */
473 :
474 : static inline void *
475 : fd_shmem_acquire( ulong page_sz,
476 : ulong page_cnt,
477 36 : ulong cpu_idx ) {
478 36 : return fd_shmem_acquire_multi( page_sz, 1UL, &page_cnt, &cpu_idx );
479 36 : }
480 :
481 : /* fd_shmem_release releases page_cnt page_sz pages of memory allocated
482 : by fd_shmem_acquire. This always succeeds from the caller's POV but
483 : logs details if there is any wonkiness under the hood. It is fine to
484 : release subregions of individual previous acquisitions.
485 :
486 : Returns 0 if successful, -1 for any errors. */
487 :
488 : int
489 : fd_shmem_release( void * mem,
490 : ulong page_sz,
491 : ulong page_cnt );
492 :
493 : /* Parsing APIs */
494 :
495 : /* fd_shmem_name_len: If name points at a cstr holding a valid name,
496 : returns strlen( name ) (which is guaranteed to be in
497 : [1,FD_SHMEM_NAME_MAX)). Returns 0 otherwise (e.g. name is NULL, name
498 : is too short, name is too long, name contains characters other than
499 : [0-9,A-Z,a-z,'_','-','.'], name doesn't start with a [0-9,A-Z,a-z],
500 : etc). */
501 :
502 : FD_FN_PURE ulong fd_shmem_name_len( char const * name );
503 :
504 : /* fd_shmem_page_sz_valid: Returns 1 if page_sz is a valid page size
505 : or 0 otherwise. */
506 :
507 : FD_FN_CONST static inline int
508 3171 : fd_shmem_is_page_sz( ulong page_sz ) {
509 3171 : return (page_sz==FD_SHMEM_NORMAL_PAGE_SZ) | (page_sz==FD_SHMEM_HUGE_PAGE_SZ) | (page_sz==FD_SHMEM_GIGANTIC_PAGE_SZ);
510 3171 : }
511 :
512 : /* fd_cstr_to_shmem_lg_page_sz: Convert a cstr pointed to by cstr to
513 : a shmem log2 page size (guaranteed to be one of
514 : FD_SHMEM_*_LG_PAGE_SZ) via case insensitive comparison with various
515 : tokens and (if none match) fd_cstr_to_int. Returns
516 : FD_SHMEM_UNKNOWN_LG_PAGE_SZ (-1 ... the only negative return
517 : possible) if it can't figure this out. */
518 :
519 : FD_FN_PURE int
520 : fd_cstr_to_shmem_lg_page_sz( char const * cstr );
521 :
522 : /* fd_shmem_lg_page_sz_to_cstr: Return a pointer to a cstr
523 : corresponding to a shmem log2 page sz. The pointer is guaranteed to
524 : be non-NULL with an infinite lifetime. If lg_page_sz is not a valid
525 : shmem log2 page size, the cstr will be "unknown". Otherwise, the
526 : returned cstr is guaranteed to be compatible with
527 : fd_cstr_to_shmem_lg_page_sz / fd_cstr_to_shmem_page_sz. strlen of
528 : the returned result will be in in [1,FD_SHMEM_PAGE_SZ_CSTR_MAX]. */
529 :
530 : FD_FN_CONST char const *
531 : fd_shmem_lg_page_sz_to_cstr( int lg_page_sz );
532 :
533 : /* fd_cstr_to_shmem_page_sz: Convert a cstr pointed to by cstr to a
534 : shmem page size (guaranteed to be one of the FD_SHMEM_*_PAGE_SZ
535 : values) via case insensitive comparison with various token and (if
536 : non match) via fd_cstr_to_ulong. Returns FD_SHMEM_UNKNOWN_PAGE_SZ
537 : (0UL, the only non-integral power of 2 return possible) if it can't
538 : figure this out. */
539 :
540 : FD_FN_PURE ulong
541 : fd_cstr_to_shmem_page_sz( char const * cstr );
542 :
543 : /* fd_shmem_page_sz_to_cstr: Return a pointer to a cstr corresponding
544 : to a shmem page sz. The pointer is guaranteed to be non-NULL with an
545 : infinite lifetime. If page_sz is not a valid shmem page size, the
546 : cstr will be "unknown". Otherwise, the returned cstr is guaranteed
547 : to be compatible with fd_cstr_to_shmem_lg_page_sz /
548 : fd_cstr_to_shmem_page_sz. strlen of the returned result in
549 : [1,FD_SHMEM_PAGE_SZ_CSTR_MAX]. */
550 :
551 : FD_FN_CONST char const *
552 : fd_shmem_page_sz_to_cstr( ulong page_sz );
553 :
554 : /* fd_shmem_iter_begin returns a pointer to the first join info in the
555 : process. Returns NULL if there are no more joins. */
556 :
557 : fd_shmem_join_info_t const * fd_shmem_iter_begin( void );
558 :
559 : /* fd_shmem_iter_next returns the next join info in the process.
560 : Returns NULL if there are no more joins */
561 :
562 : fd_shmem_join_info_t const * fd_shmem_iter_next( fd_shmem_join_info_t const * iter );
563 :
564 0 : static inline int fd_shmem_iter_done( fd_shmem_join_info_t const * iter ) {
565 : return (iter == NULL);
566 0 : }
567 :
568 : /* These functions are for fd_shmem internal use only. */
569 :
570 : void
571 : fd_shmem_private_boot( int * pargc,
572 : char *** pargv );
573 :
574 : void
575 : fd_shmem_private_halt( void );
576 :
577 : FD_PROTOTYPES_END
578 :
579 : #endif /* HEADER_fd_src_util_shmem_fd_shmem_h */
580 :
|