Line data Source code
1 : /* Generate prototypes, inlines and/or implementations for concurrent
2 : persistent shared element pools. A pool can hold a practically
3 : unbounded number of elements. Acquiring an element from and
4 : releasing an element to a pool are typically fast O(1) time.
5 : Requires small O(1) space per element.
6 :
7 : The current implementation is based on a lockfree stack. Acquire and
8 : release are done via atomic compare-and-swap of the stack top. As
9 : such, concurrent usage requires FD_HAS_ATOMIC support (this can still
10 : be used on platforms without FD_HAS_ATOMIC but it will not be safe
11 : for concurrent usage). Stack top versioning is used to handle ABA.
12 : Versioning has been tweaked to support locked pool operations like
13 : initialization (and thus this can also be used without changes as a
14 : more conventional spin lock based concurrent stack). Unsurprisingly,
15 : the current implementation is equally usable as a concurrent element
16 : stack (though the implementation may be changed in the future to
17 : better support ultra high contention ultra high concurrency ala
18 : fd_alloc).
19 :
20 : The current implementation is optimized for pools with a moderate
21 : number of reasonably localized users (e.g. a handful of cores and
22 : memory on the same NUMA node). Various operations are slightly more
23 : optimal when the size of a pool element is an integer power of 2.
24 : Operations do much internal integrity checking / bounds checking for
25 : use in high reliability / high security environments.
26 :
27 : This API is designed for tight and flexible composition with treaps,
28 : heaps, lists, maps, etc. Further, a pool can be persisted beyond the
29 : lifetime of the creating process, be used inter-process, be relocated
30 : in memory, be naively serialized/deserialized, be moved between
31 : hosts, use index compression for cache and memory bandwidth
32 : efficiency, etc.
33 :
34 : Typical usage:
35 :
36 : struct myele {
37 : ulong next; // Technically "POOL_IDX_T POOL_NEXT" (default is ulong next), managed by the mypool when in the mypool
38 :
39 : ... next can be located arbitrarily in the element and can be
40 : ... reused for other purposes when the element is not in a
41 : ... mypool. elements are all located in a linear array element
42 : ... store whose lifetime is at least that of the mypool.
43 :
44 : };
45 :
46 : typedef struct myele myele_t;
47 :
48 : #define POOL_NAME mypool
49 : #define POOL_ELE_T myele_t
50 : #include "tmpl/fd_pool_para.c"
51 :
52 : will declare the following APIs as a header only style library in the
53 : compilation unit:
54 :
55 : // A mypool_t is a stack declaration friendly quasi-opaque handle
56 : // used to describe a join to a mypool. E.g. it is fine to do
57 : // "mypool_t join[1];" to allocate a mypool_t but the contents
58 : // should be used directly.
59 :
60 : typedef struct mypool_private mypool_t;
61 :
62 : // Constructors
63 :
64 : // mypool_ele_max_max returns the maximum element store capacity
65 : // compatible with a mypool.
66 :
67 : ulong mypool_ele_max_max( void );
68 :
69 : // mypool_{align,footprint} returns the alignment and footprint
70 : // needed for a memory region to be used as a mypool. align will
71 : // be an integer power-of-two and footprint will be a multiple of
72 : // align.
73 : //
74 : // mypool_new formats a memory region with the appropriate
75 : // alignment and footprint into a mypool. shmem points in the
76 : // caller's address space of the memory region to format. Returns
77 : // shmem on success (mypool has ownership of the memory region) and
78 : // NULL on failure (no changes, logs details). Caller is not
79 : // joined on return. The mypool will be empty and unlocked.
80 : //
81 : // mypool_join joins a mypool. ljoin points to a mypool_t
82 : // compatible memory region in the caller's address space used to
83 : // hold info about the local join, shpool points in the caller's
84 : // address space to the memory region containing the mypool, shele
85 : // points in the caller's address space to mypool's element store,
86 : // and ele_max is the element store's capacity. Returns a handle
87 : // to the caller's local join on success (join has ownership of the
88 : // ljoin region) and NULL on failure (no changes, logs details).
89 : //
90 : // mypool_leave leaves a mypool. join points to a current local
91 : // join. Returns the memory used for the local join (caller has
92 : // ownership on return and caller is no longer joined) on success
93 : // and NULL on failure (no changes, logs details). Use the join
94 : // accessors before leaving to get shpool, shele and ele_max used
95 : // by the join if needed.
96 : //
97 : // mypool_delete unformats a memory region used as a mypool.
98 : // Assumes shpool points in the caller's address space to the
99 : // memory region containing the mypool and that there are no
100 : // current joins globally. Returns shpool on success (caller has
101 : // ownership of the memory region and any elements still in the
102 : // mypool are acquired by the caller) and NULL on failure (no
103 : // changes, logs details).
104 :
105 : ulong mypool_align ( void );
106 : ulong mypool_footprint( void );
107 : void * mypool_new ( void * shmem );
108 : mypool_t * mypool_join ( void * ljoin, void * shpool, void * shele, ulong ele_max );
109 : void * mypool_leave ( mypool_t * join );
110 : void * mypool_delete ( void * shpool );
111 :
112 : // mypool_{shpool,shele,ele_max} return join details. Assumes join
113 : // is a current local join. mypool_{shpool_const,shele_const} are
114 : // const correct versions. The lifetime of the returned pointers
115 : // is the lifetime of the join.
116 :
117 : void const * mypool_shpool_const( mypool_t const * join );
118 : void const * mypool_shele_const ( mypool_t const * join );
119 : ulong mypool_ele_max ( mypool_t const * join );
120 :
121 : void * mypool_shpool( mypool_t * join );
122 : void * mypool_shele ( mypool_t * join );
123 :
124 : // mypool_idx_null returns the element store index used to
125 : // represent null for a mypool.
126 : //
127 : // mypool_idx_is_null returns 1 if an element store index is the
128 : // null index value and 0 otherwise.
129 : //
130 : // mypool_idx returns the element store index for the element
131 : // pointed to by ele in the caller's address space. Assumes join
132 : // is a current local join. If ele is NULL or not into the element
133 : // store, returns the element store null index.
134 : //
135 : // mypool_ele returns a pointer in the caller's address space to
136 : // the element whose element store index is ele_idx. If ele_idx is
137 : // the null value or invalid, returns NULL. mypool_ele_const is a
138 : // const correct version.
139 : //
140 : // These are usually not needed but allow translating pointers to
141 : // element store elements from one address space to another.
142 :
143 : ulong mypool_idx_null ( void );
144 : int mypool_idx_is_null( ulong idx );
145 : ulong mypool_idx ( mypool_t const * join, myele_t const * ele );
146 :
147 : myele_t const * mypool_ele_const( mypool_t const * join, ulong ele_idx );
148 : myele_t * mypool_ele ( mypool_t * join, ulong ele_idx );
149 :
150 : // mypool_peek returns a pointer in the local address space to the
151 : // next element to acquire from the mypool or NULL if the mypool
152 : // was empty at some point during the call. mypool_peek_const is a
153 : // const correct version. Because of concurrent operations, unless
154 : // the caller is holding a lock on the mypool, this may not be the
155 : // actual element the caller will acquire next from the mypool.
156 :
157 : myele_t const * mypool_peek_const( mypool_t const * join );
158 : myele_t * mypool_peek ( mypool_t * join );
159 :
160 : // mypool_acquire acquires an element from a mypool. Assumes join
161 : // is a current local join. If the mypool is empty or an error
162 : // occurs, returns sentinel (arbitrary). A non-zero / zero value
163 : // for blocking indicates locked operations on the mypool are / are
164 : // not allowed to block the caller. If opt_err is not NULL, on
165 : // return, *_opt_err will indicate FD_POOL_SUCCESS (zero) or an
166 : // FD_POOL_ERR code (negative). On success, the returned value
167 : // will be a pointer in the caller's address space to the element
168 : // store element acquired from the mypool. On failure for any
169 : // reason, the value returned will be sentinel and the mypool will
170 : // be unchanged. Reasons for failure:
171 : //
172 : // FD_POOL_ERR_EMPTY: the mypool contained no elements at some
173 : // point during the call.
174 : //
175 : // FD_POOL_ERR_AGAIN: the mypool was locked at some point during
176 : // the call. Never returned for a blocking call (but then locking
177 : // operations can then potentially block the caller indefinitely).
178 : //
179 : // FD_POOL_ERR_CORRUPT: memory corruption was detected during the
180 : // call.
181 :
182 : myele_t * mypool_acquire( mypool_t * join, myele_t * sentinel, int blocking, int * _opt_err );
183 :
184 : // mypool_release releases an element to a mypoool. Assumes join
185 : // is a current local join, ele is a pointer in the caller's
186 : // address space to the element, and the element is currently not
187 : // in the mypool. Returns FD_POOL_SUCCESS (zero) on success (the
188 : // element will be in the mypool on return) and an FD_POOL_ERR code
189 : // (negative) on failure (the element will not be in the mypool on
190 : // return). Reasons for failure:
191 : //
192 : // FD_POOL_ERR_INVAL: ele does not point to an element in mypool's
193 : // element store.
194 : //
195 : // FD_POOL_ERR_AGAIN: the mypool was locked at some point during
196 : // the call. Never returned for a blocking call (but locking
197 : // operations can then potentially block the caller indefinitely).
198 : //
199 : // FD_POOL_ERR_CORRUPT: memory corruption was detected during the
200 : // call.
201 :
202 : int mypool_release( mypool_t * join, myele_t * ele, int blocking );
203 :
204 : // mypool_is_locked returns whether or not a mypool is locked.
205 : // Assumes join is a current local join.
206 :
207 : int mypool_is_locked( mypool_t const * join );
208 :
209 : // mypool_lock will lock a mypool (e.g. pausing concurrent acquire
210 : // / release operations). A non-zero / zero value for blocking
211 : // indicates the call should / should not wait to lock the mypool
212 : // if it is currently locked. Returns FD_POOL_SUCCESS on success
213 : // (caller has the lock on return) and FD_POOL_ERR_AGAIN on failure
214 : // (pool was already locked at some point during the call). AGAIN
215 : // is never returned if blocking is requested. Assumes join is a
216 : // current local join.
217 :
218 : int mypool_lock( mypool_t * join, int blocking );
219 :
220 : // mypool_unlock will unlock a mypool (e.g. resuming concurrent
221 : // acquire / release operations). Assumes join is a current local
222 : // join and the caller has a lock on mypool. Guaranteed to
223 : // succeed.
224 :
225 : void mypool_unlock( mypool_t * join );
226 :
227 : // mypool_reset resets the mypool. On return, it will hold all but
228 : // the leading sentinel_cnt elements in the element store (e.g.
229 : // initialization after creation) in ascending order. If
230 : // sentinel_cnt is greater than or equal to the element store
231 : // capacity, the mypool will be empty on return. Thus, on return,
232 : // if sentinel_cnt is zero, every element in the element store will
233 : // be in the mypool and, if sentinel_cnt is ele_max or greater
234 : // (e.g. ULONG_MAX), every element will be removed from the mypool.
235 : // Assumes join is a current local join and the mypool is locked or
236 : // otherwise idle.
237 :
238 : void mypool_reset( mypool_t * join, ulong sentinel_cnt );
239 :
240 : // mypool_verify returns FD_POOL_SUCCESS if join appears to be
241 : // current local join to a valid mypool and FD_POOL_ERR_CORRUPT
242 : // otherwise (logs details). Assumes join is a current local join
243 : // and the mypool is locked or otherwise idle.
244 :
245 : int mypool_verify( mypool_t const * join );
246 :
247 : // mypool_strerror converts an FD_POOL_SUCCESS / FD_POOL_ERR code
248 : // into a human readable cstr. The lifetime of the returned
249 : // pointer is infinite. The returned pointer is always to a
250 : // non-NULL cstr.
251 :
252 : char const * mypool_strerror( int err );
253 :
254 : Do this as often as desired in a compilation unit to get different
255 : types of concurrent pools. Options exist for generating library
256 : header prototypes and/or library implementations for concurrent pools
257 : usable across multiple compilation units. Additional options exist
258 : to use index compression, configuring versioning, etc. */
259 :
260 : /* POOL_NAME gives the API prefix to use for pool */
261 :
262 : #ifndef POOL_NAME
263 : #error "Define POOL_NAME"
264 : #endif
265 :
266 : /* POOL_ELE_T is the pool element type. */
267 :
268 : #ifndef POOL_ELE_T
269 : #error "Define POOL_ELE_T"
270 : #endif
271 :
272 : /* POOL_IDX_T is the type used for the next field in the POOL_ELE_T.
273 : Should be a primitive unsigned integer type. Defaults to ulong. A
274 : pool can't use element stores with a capacity that can't be
275 : represented by a POOL_IDX_T. (E.g. if ushort, the maximum capacity
276 : pool compatible element store is 65535 elements.) */
277 :
278 : #ifndef POOL_IDX_T
279 : #define POOL_IDX_T ulong
280 : #endif
281 :
282 : /* POOL_NEXT is the POOL_ELE_T next field */
283 :
284 : #ifndef POOL_NEXT
285 899739 : #define POOL_NEXT next
286 : #endif
287 :
288 : /* POOL_ALIGN gives the alignment required for the pool shared memory.
289 : Default is 128 for double cache line alignment. Should be at least
290 : ulong alignment. */
291 :
292 : #ifndef POOL_ALIGN
293 : #define POOL_ALIGN (128UL)
294 : #endif
295 :
296 : /* POOL_IDX_WIDTH gives the number of bits in a ulong to reserve for
297 : encoding the element store index in a versioned index. Element store
298 : capacity should be representable in this width. Default is 43 bits
299 : (e.g. enough to support a ~1 PiB element store of 128 byte elements).
300 : The versioning width will be 64-POOL_IDX_WIDTH. Since the least
301 : significant bit of the version is used to indicate global locking,
302 : versioning width should be at least 2 and ideally as large as
303 : possible. With the 43 default, version numbers will not be reused
304 : until 2^20 individual operations have been done. */
305 :
306 : #ifndef POOL_IDX_WIDTH
307 66905016 : #define POOL_IDX_WIDTH (43)
308 : #endif
309 :
310 : /* POOL_MAGIC is the magic number to use for the structure to aid in
311 : persistent and or IPC usage. */
312 :
313 : #ifndef POOL_MAGIC
314 237 : #define POOL_MAGIC (0xf17eda2c37c90010UL) /* firedancer cpool version 0 */
315 : #endif
316 :
317 : /* POOL_IMPL_STYLE controls what to generate:
318 : 0 - local use only
319 : 1 - library header declaration
320 : 2 - library implementation */
321 :
322 : #ifndef POOL_IMPL_STYLE
323 : #define POOL_IMPL_STYLE 0
324 : #endif
325 :
326 : /* POOL_LAZY enables lazy initialization for faster startup if defined
327 : to non-zero. Decreases pool_reset cost from O(ele_max) to O(1), at
328 : the cost of more complex allocation logic. */
329 :
330 : #ifndef POOL_LAZY
331 : #define POOL_LAZY 0
332 : #endif
333 :
334 : /* Common pool error codes (FIXME: probably should get around to making
335 : unified error codes and string handling across all util at least so
336 : we don't have to do this in the generator itself) */
337 :
338 12218991 : #define FD_POOL_SUCCESS ( 0)
339 780 : #define FD_POOL_ERR_INVAL (-1)
340 6 : #define FD_POOL_ERR_AGAIN (-2)
341 1276971 : #define FD_POOL_ERR_CORRUPT (-3)
342 300012 : #define FD_POOL_ERR_EMPTY (-4)
343 : //#define FD_POOL_ERR_FULL (-5)
344 : //#define FD_POOL_ERR_KEY (-6)
345 :
346 : /* Implementation *****************************************************/
347 :
348 48914064 : #define POOL_VER_WIDTH (64-POOL_IDX_WIDTH)
349 :
350 : #if POOL_IMPL_STYLE==0 /* local use only */
351 : #define POOL_STATIC FD_FN_UNUSED static
352 : #else /* library header and/or implementation */
353 : #define POOL_STATIC
354 : #endif
355 :
356 76493097 : #define POOL_(n) FD_EXPAND_THEN_CONCAT3(POOL_NAME,_,n)
357 :
358 : #if POOL_IMPL_STYLE!=2 /* need header */
359 :
360 : #include "../bits/fd_bits.h"
361 :
362 : struct __attribute__((aligned(POOL_ALIGN))) POOL_(shmem_private) {
363 :
364 : /* Note: there is no free count because that that isn't precisely
365 : knowable in a portable concurrent data structure. (Not enough bits
366 : to squeeze into ver_top for large pools, requiring 128-bit wide
367 : ver_top would limit supported targets, etc). */
368 :
369 : ulong magic; /* == POOL_MAGIC */
370 : ulong ver_top; /* Versioned index of the free stack top, top is in [0,ele_max) (not-empty) or is idx_null (empty) */
371 :
372 : # if POOL_LAZY
373 : ulong ver_lazy; /* Versioned index of the lazy init, lazy is in [0,ele_max] (not-empty) or is idx_null (empty) */
374 : # endif
375 :
376 : };
377 :
378 : typedef struct POOL_(shmem_private) POOL_(shmem_t);
379 :
380 : struct POOL_(private) {
381 : POOL_(shmem_t) * pool; /* Pool location in the local address space */
382 : POOL_ELE_T * ele; /* Element store location in the local address space, NULL okay if ele_max==0 */
383 : ulong ele_max; /* Element store capacity, in [0,ele_max_max] */
384 : };
385 :
386 : typedef struct POOL_(private) POOL_(t);
387 :
388 : FD_PROTOTYPES_BEGIN
389 :
390 : /* pool_private_vidx pack ver and idx into a versioned idx. ver is
391 : masked to fit into POOL_VER_WIDTH bits. idx is assumed in
392 : [0,ele_max_max].
393 :
394 : pool_private_vidx_{ver,idx} extract the {version,index} from a
395 : versioned index and will fit into {POOL_VER_WIDTH,POOL_IDX_WIDTH}
396 : bits. */
397 :
398 11917695 : FD_FN_CONST static inline ulong POOL_(private_vidx)( ulong ver, ulong idx ) { return (ver<<POOL_IDX_WIDTH) | idx; }
399 :
400 12218838 : FD_FN_CONST static inline ulong POOL_(private_vidx_ver)( ulong ver_idx ) { return ver_idx >> POOL_IDX_WIDTH; }
401 15078804 : FD_FN_CONST static inline ulong POOL_(private_vidx_idx)( ulong ver_idx ) { return (ver_idx << POOL_VER_WIDTH) >> POOL_VER_WIDTH; }
402 :
403 : /* pool_private_{cidx,idx} compress/decompress a 64-bit in-register
404 : index to/from its in-memory representation. */
405 :
406 7539213 : FD_FN_CONST static inline POOL_IDX_T POOL_(private_cidx)( ulong idx ) { return (POOL_IDX_T) idx; }
407 7549053 : FD_FN_CONST static inline ulong POOL_(private_idx) ( ulong cidx ) { return (ulong) cidx; }
408 :
409 : /* pool_private_cas does a ulong FD_ATOMIC_CAS when FD_HAS_ATOMIC
410 : support is available and emulates it when not. Similarly for
411 : pool_private_fetch_and_or. When emulated, the pool will not be safe
412 : to use concurrently (but will still work). */
413 :
414 : static inline ulong
415 : POOL_(private_cas)( ulong volatile * p,
416 : ulong c,
417 11916969 : ulong s ) {
418 11916969 : ulong o;
419 11916969 : FD_COMPILER_MFENCE();
420 11916969 : # if FD_HAS_ATOMIC
421 11916969 : o = FD_ATOMIC_CAS( p, c, s );
422 : # else
423 : o = *p;
424 : *p = fd_ulong_if( o==c, s, o );
425 : # endif
426 11916969 : FD_COMPILER_MFENCE();
427 11916969 : return o;
428 11916969 : }
429 :
430 : static inline ulong
431 : POOL_(private_fetch_and_or)( ulong volatile * p,
432 6 : ulong b ) {
433 6 : ulong x;
434 6 : FD_COMPILER_MFENCE();
435 6 : # if FD_HAS_ATOMIC
436 6 : x = FD_ATOMIC_FETCH_AND_OR( p, b );
437 : # else
438 : x = *p;
439 : *p = x | b;
440 : # endif
441 6 : FD_COMPILER_MFENCE();
442 6 : return x;
443 6 : }
444 :
445 1233 : FD_FN_CONST static inline ulong POOL_(ele_max_max)( void ) { return (ulong)(POOL_IDX_T)(ULONG_MAX >> POOL_VER_WIDTH); }
446 :
447 2886 : FD_FN_CONST static inline ulong POOL_(align) ( void ) { return alignof(POOL_(shmem_t)); }
448 711 : FD_FN_CONST static inline ulong POOL_(footprint)( void ) { return sizeof (POOL_(shmem_t)); }
449 :
450 18 : FD_FN_PURE static inline void const * POOL_(shpool_const)( POOL_(t) const * join ) { return join->pool; }
451 36 : FD_FN_PURE static inline void const * POOL_(shele_const) ( POOL_(t) const * join ) { return join->ele; }
452 4983420 : FD_FN_PURE static inline ulong POOL_(ele_max) ( POOL_(t) const * join ) { return join->ele_max; }
453 :
454 3 : FD_FN_PURE static inline void * POOL_(shpool)( POOL_(t) * join ) { return join->pool; }
455 11319921 : FD_FN_PURE static inline void * POOL_(shele) ( POOL_(t) * join ) { return join->ele; }
456 :
457 18755223 : FD_FN_CONST static inline ulong POOL_(idx_null)( void ) { return (ulong)(POOL_IDX_T)(ULONG_MAX >> POOL_VER_WIDTH); }
458 18754155 : FD_FN_CONST static inline int POOL_(idx_is_null)( ulong idx ) { return idx==POOL_(idx_null)(); }
459 :
460 : FD_FN_PURE static inline ulong
461 : POOL_(idx)( POOL_(t) const * join,
462 16972170 : POOL_ELE_T const * ele ) {
463 16972170 : ulong ele_idx = (ulong)(ele - join->ele);
464 16972170 : return ele_idx<join->ele_max ? ele_idx : POOL_(idx_null)();
465 16972170 : }
466 :
467 : FD_FN_PURE static inline POOL_ELE_T const *
468 : POOL_(ele_const)( POOL_(t) const * join,
469 3201 : ulong ele_idx ) {
470 3201 : POOL_ELE_T const * ele = join->ele;
471 3201 : return (ele_idx < join->ele_max) ? (ele + ele_idx) : NULL;
472 3201 : }
473 :
474 : FD_FN_PURE static inline POOL_ELE_T *
475 : POOL_(ele)( POOL_(t) * join,
476 3348 : ulong ele_idx ) {
477 3348 : POOL_ELE_T * ele = join->ele;
478 3348 : return (ele_idx < join->ele_max) ? (ele + ele_idx) : NULL;
479 3348 : }
480 :
481 : static inline POOL_ELE_T const *
482 36 : POOL_(peek_const)( POOL_(t) const * join ) {
483 36 : POOL_(shmem_t) const * pool = join->pool;
484 36 : POOL_ELE_T const * ele = join->ele;
485 36 : ulong ele_max = join->ele_max;
486 36 : FD_COMPILER_MFENCE();
487 36 : ulong ver_top = pool->ver_top;
488 : # if POOL_LAZY
489 : ulong ver_lazy = pool->ver_lazy;
490 : # endif
491 36 : FD_COMPILER_MFENCE();
492 36 : ulong ele_idx = POOL_(private_vidx_idx)( ver_top );
493 : # if POOL_LAZY
494 30 : if( ele_idx<ele_max ) {
495 0 : return ele + ele_idx;
496 30 : } else {
497 30 : ulong lazy_idx = POOL_(private_vidx_idx)( ver_lazy );
498 30 : return (lazy_idx<ele_max) ? ele + lazy_idx : NULL;
499 30 : }
500 : # else
501 6 : return (ele_idx<ele_max) ? ele + ele_idx : NULL;
502 : # endif
503 36 : }
504 :
505 21 : static inline POOL_ELE_T * POOL_(peek)( POOL_(t) * join ) { return (POOL_ELE_T *)POOL_(peek_const)( join ); }
506 :
507 : static inline int
508 9 : POOL_(is_locked)( POOL_(t) const * join ) {
509 9 : POOL_(shmem_t) const * pool = join->pool;
510 9 : FD_COMPILER_MFENCE();
511 9 : ulong ver_top = pool->ver_top;
512 9 : # if POOL_LAZY
513 9 : ulong ver_lazy = pool->ver_lazy;
514 9 : # endif
515 9 : FD_COMPILER_MFENCE();
516 9 : return
517 9 : (int)(POOL_(private_vidx_ver)( ver_top ) & 1UL)
518 9 : # if POOL_LAZY
519 9 : | (int)(POOL_(private_vidx_ver)( ver_lazy ) & 1UL)
520 9 : # endif
521 9 : ;
522 9 : }
523 :
524 : static inline void
525 3 : POOL_(unlock)( POOL_(t) * join ) {
526 3 : POOL_(shmem_t) * pool = join->pool;
527 3 : FD_COMPILER_MFENCE();
528 3 : pool->ver_top += 1UL<<POOL_IDX_WIDTH;
529 3 : # if POOL_LAZY
530 3 : pool->ver_lazy += 1UL<<POOL_IDX_WIDTH;
531 3 : # endif
532 3 : FD_COMPILER_MFENCE();
533 3 : }
534 :
535 : POOL_STATIC void * POOL_(new) ( void * shmem );
536 : POOL_STATIC POOL_(t) * POOL_(join) ( void * ljoin, void * shpool, void * shele, ulong ele_max );
537 : POOL_STATIC void * POOL_(leave) ( POOL_(t) * join );
538 : POOL_STATIC void * POOL_(delete)( void * shpool );
539 :
540 : POOL_STATIC POOL_ELE_T * POOL_(acquire)( POOL_(t) * join, POOL_ELE_T * sentinel, int blocking, int * _opt_err );
541 :
542 : POOL_STATIC int POOL_(release)( POOL_(t) * join, POOL_ELE_T * ele, int blocking );
543 :
544 : POOL_STATIC int POOL_(is_empty)( POOL_(t) * join );
545 :
546 : POOL_STATIC int POOL_(lock)( POOL_(t) * join, int blocking );
547 :
548 : POOL_STATIC void POOL_(reset)( POOL_(t) * join, ulong sentinel_cnt );
549 :
550 : POOL_STATIC int POOL_(verify)( POOL_(t) const * join );
551 :
552 : POOL_STATIC FD_FN_CONST char const * POOL_(strerror)( int err );
553 :
554 : FD_PROTOTYPES_END
555 :
556 : #endif
557 :
558 : #if POOL_IMPL_STYLE!=1 /* need implementations (assumes header already included) */
559 :
560 : #include "../log/fd_log.h" /* used by constructors and verify (FIXME: Consider making a compile time option) */
561 :
562 : POOL_STATIC void *
563 249 : POOL_(new)( void * shmem ) {
564 249 : POOL_(shmem_t) * pool = (POOL_(shmem_t) *)shmem;
565 :
566 249 : if( FD_UNLIKELY( !pool ) ) {
567 3 : FD_LOG_WARNING(( "NULL shmem" ));
568 3 : return NULL;
569 3 : }
570 :
571 246 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)pool, POOL_(align)() ) ) ) {
572 3 : FD_LOG_WARNING(( "misaligned shmem" ));
573 3 : return NULL;
574 3 : }
575 :
576 243 : pool->ver_top = POOL_(private_vidx)( 0UL, POOL_(idx_null)() );
577 : # if POOL_LAZY
578 114 : pool->ver_lazy = POOL_(private_vidx)( 0UL, POOL_(idx_null)() );
579 : # endif
580 :
581 243 : FD_COMPILER_MFENCE();
582 243 : pool->magic = POOL_MAGIC;
583 243 : FD_COMPILER_MFENCE();
584 :
585 243 : return (void *)pool;
586 246 : }
587 :
588 : POOL_STATIC POOL_(t) *
589 : POOL_(join)( void * ljoin,
590 : void * shpool,
591 : void * shele,
592 690 : ulong ele_max ) {
593 690 : POOL_(t) * join = (POOL_(t) *)ljoin;
594 690 : POOL_(shmem_t) * pool = (POOL_(shmem_t) *)shpool;
595 690 : POOL_ELE_T * ele = (POOL_ELE_T *)shele;
596 :
597 690 : if( FD_UNLIKELY( !join ) ) {
598 3 : FD_LOG_WARNING(( "NULL ljoin" ));
599 3 : return NULL;
600 3 : }
601 :
602 687 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)join, alignof(POOL_(t)) ) ) ) {
603 3 : FD_LOG_WARNING(( "misaligned ljoin" ));
604 3 : return NULL;
605 3 : }
606 :
607 684 : if( FD_UNLIKELY( !pool ) ) {
608 3 : FD_LOG_WARNING(( "NULL shpool" ));
609 3 : return NULL;
610 3 : }
611 :
612 681 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)pool, POOL_(align)() ) ) ) {
613 3 : FD_LOG_WARNING(( "misaligned shpool" ));
614 3 : return NULL;
615 3 : }
616 :
617 678 : if( FD_UNLIKELY( pool->magic!=POOL_MAGIC ) ) {
618 3 : FD_LOG_WARNING(( "bad magic" ));
619 3 : return NULL;
620 3 : }
621 :
622 675 : if( FD_UNLIKELY( (!ele) & (!!ele_max) ) ) {
623 3 : FD_LOG_WARNING(( "NULL shele" ));
624 3 : return NULL;
625 3 : }
626 :
627 672 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)ele, alignof(POOL_ELE_T) ) ) ) {
628 3 : FD_LOG_WARNING(( "misaligned shele" ));
629 3 : return NULL;
630 3 : }
631 :
632 669 : if( FD_UNLIKELY( ele_max>POOL_(ele_max_max)() ) ) {
633 6 : FD_LOG_WARNING(( "bad ele_max" ));
634 6 : return NULL;
635 6 : }
636 :
637 663 : join->pool = pool;
638 663 : join->ele = ele;
639 663 : join->ele_max = ele_max;
640 :
641 663 : return join;
642 669 : }
643 :
644 : POOL_STATIC void *
645 18 : POOL_(leave)( POOL_(t) * join ) {
646 :
647 18 : if( FD_UNLIKELY( !join ) ) {
648 3 : FD_LOG_WARNING(( "NULL join" ));
649 3 : return NULL;
650 3 : }
651 :
652 15 : return (void *)join;
653 18 : }
654 :
655 : POOL_STATIC void *
656 18 : POOL_(delete)( void * shpool ) {
657 18 : POOL_(shmem_t) * pool = (POOL_(shmem_t) *)shpool;
658 :
659 18 : if( FD_UNLIKELY( !pool) ) {
660 3 : FD_LOG_WARNING(( "NULL shpool" ));
661 3 : return NULL;
662 3 : }
663 :
664 15 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)pool, POOL_(align)() ) ) ) {
665 3 : FD_LOG_WARNING(( "misaligned shpool" ));
666 3 : return NULL;
667 3 : }
668 :
669 12 : if( FD_UNLIKELY( pool->magic!=POOL_MAGIC ) ) {
670 3 : FD_LOG_WARNING(( "bad magic" ));
671 3 : return NULL;
672 3 : }
673 :
674 9 : FD_COMPILER_MFENCE();
675 9 : pool->magic = 0UL;
676 9 : FD_COMPILER_MFENCE();
677 :
678 9 : return (void *)pool;
679 12 : }
680 :
681 : #if POOL_LAZY
682 :
683 : static inline POOL_ELE_T *
684 : POOL_(acquire_lazy)( POOL_(t) * join,
685 : POOL_ELE_T * sentinel,
686 : int blocking,
687 1473 : int * _opt_err ) {
688 1473 : POOL_ELE_T * ele0 = join->ele;
689 1473 : ulong ele_max = join->ele_max;
690 1473 : ulong volatile * _l = (ulong volatile *)&join->pool->ver_lazy;
691 :
692 1473 : POOL_ELE_T * ele = sentinel;
693 1473 : int err = FD_POOL_SUCCESS;
694 :
695 1473 : FD_COMPILER_MFENCE();
696 :
697 1473 : for(;;) {
698 1473 : ulong ver_lazy = *_l;
699 :
700 1473 : ulong ver = POOL_(private_vidx_ver)( ver_lazy );
701 1473 : ulong ele_idx = POOL_(private_vidx_idx)( ver_lazy );
702 :
703 1473 : if( FD_LIKELY( !(ver & 1UL) ) ) { /* opt for unlocked */
704 :
705 1473 : if( FD_UNLIKELY( POOL_(idx_is_null)( ele_idx ) ) ) { /* opt for not empty */
706 0 : err = FD_POOL_ERR_EMPTY;
707 0 : break;
708 0 : }
709 :
710 1473 : if( FD_UNLIKELY( ele_idx>=ele_max ) ) { /* opt for not corrupt */
711 0 : err = FD_POOL_ERR_CORRUPT;
712 0 : break;
713 0 : }
714 :
715 1473 : ulong ele_nxt = ele_idx+1UL;
716 1473 : if( FD_UNLIKELY( ele_nxt>=ele_max ) ) ele_nxt = POOL_(idx_null)();
717 :
718 1473 : ulong new_ver_lazy = POOL_(private_vidx)( ver+2UL, ele_nxt );
719 1473 : if( FD_LIKELY( POOL_(private_cas)( _l, ver_lazy, new_ver_lazy )==ver_lazy ) ) { /* opt for low contention */
720 1473 : ele = ele0 + ele_idx;
721 1473 : break;
722 1473 : }
723 1473 : } else if( FD_UNLIKELY( !blocking ) ) { /* opt for blocking */
724 :
725 0 : err = FD_POOL_ERR_AGAIN;
726 0 : break; /* opt for blocking */
727 :
728 0 : }
729 :
730 0 : FD_SPIN_PAUSE();
731 0 : }
732 :
733 1473 : FD_COMPILER_MFENCE();
734 :
735 1473 : fd_int_store_if( !!_opt_err, _opt_err, err );
736 1473 : return ele;
737 1473 : }
738 :
739 : #endif /* POOL_LAZY */
740 :
741 : POOL_STATIC POOL_ELE_T *
742 : POOL_(acquire)( POOL_(t) * join,
743 : POOL_ELE_T * sentinel,
744 : int blocking,
745 6258519 : int * _opt_err ) {
746 6258519 : POOL_ELE_T * ele0 = join->ele;
747 6258519 : ulong ele_max = join->ele_max;
748 6258519 : ulong volatile * _v = (ulong volatile *)&join->pool->ver_top;
749 :
750 6258519 : POOL_ELE_T * ele = sentinel;
751 6258519 : int err = FD_POOL_SUCCESS;
752 :
753 6258519 : FD_COMPILER_MFENCE();
754 :
755 6258519 : for(;;) {
756 6258519 : ulong ver_top = *_v;
757 :
758 6258519 : ulong ver = POOL_(private_vidx_ver)( ver_top );
759 6258519 : ulong ele_idx = POOL_(private_vidx_idx)( ver_top );
760 :
761 6258519 : if( FD_LIKELY( !(ver & 1UL) ) ) { /* opt for unlocked */
762 :
763 6258519 : if( FD_UNLIKELY( POOL_(idx_is_null)( ele_idx ) ) ) { /* opt for not empty */
764 : # if POOL_LAZY
765 1473 : return POOL_(acquire_lazy)( join, sentinel, blocking, _opt_err );
766 0 : # endif
767 300009 : err = FD_POOL_ERR_EMPTY;
768 0 : break;
769 301482 : }
770 :
771 5957037 : if( FD_UNLIKELY( ele_idx>=ele_max ) ) { /* opt for not corrupt */
772 0 : err = FD_POOL_ERR_CORRUPT;
773 0 : break;
774 0 : }
775 :
776 5957037 : ulong ele_nxt = POOL_(private_idx)( ele0[ ele_idx ].POOL_NEXT );
777 :
778 5957037 : if( FD_UNLIKELY( (ele_nxt>=ele_max) & (!POOL_(idx_is_null)( ele_nxt )) ) ) { /* ele_nxt is invalid, opt for valid */
779 : /* It is possible that another thread acquired ele_idx and
780 : repurposed ele_idx's POOL_NEXT (storing something in it that
781 : isn't a valid pool value) between when we read ver_top and
782 : when we read ele_idx's POOL_NEXT above. If so, the pool
783 : version would be changed from what we read above. We thus
784 : only signal ERR_CORRUPT if the version number hasn't changed
785 : since we read it. */
786 :
787 0 : if( FD_UNLIKELY( POOL_(private_vidx_ver)( *_v )==ver ) ) {
788 0 : err = FD_POOL_ERR_CORRUPT;
789 0 : break;
790 0 : }
791 5957037 : } else { /* ele_nxt is valid */
792 5957037 : ulong new_ver_top = POOL_(private_vidx)( ver+2UL, ele_nxt );
793 :
794 5957037 : if( FD_LIKELY( POOL_(private_cas)( _v, ver_top, new_ver_top )==ver_top ) ) { /* opt for low contention */
795 5957037 : ele = ele0 + ele_idx;
796 5957037 : break;
797 5957037 : }
798 5957037 : }
799 5957037 : } else if( FD_UNLIKELY( !blocking ) ) { /* opt for blocking */
800 :
801 0 : err = FD_POOL_ERR_AGAIN;
802 0 : break; /* opt for blocking */
803 :
804 0 : }
805 :
806 0 : FD_SPIN_PAUSE();
807 0 : }
808 :
809 6257046 : FD_COMPILER_MFENCE();
810 :
811 1425495 : fd_int_store_if( !!_opt_err, _opt_err, err );
812 1425495 : return ele;
813 6258519 : }
814 :
815 : POOL_STATIC int
816 : POOL_(release)( POOL_(t) * join,
817 : POOL_ELE_T * ele,
818 5959236 : int blocking ) {
819 5959236 : ulong ele_max = join->ele_max;
820 5959236 : ulong volatile * _v = (ulong volatile *)&join->pool->ver_top;
821 :
822 5959236 : ulong ele_idx = (ulong)(ele - join->ele);
823 5959236 : if( FD_UNLIKELY( ele_idx>=ele_max ) ) return FD_POOL_ERR_INVAL; /* opt for valid call */
824 :
825 5958459 : int err = FD_POOL_SUCCESS;
826 :
827 5958459 : FD_COMPILER_MFENCE();
828 :
829 5958459 : for(;;) {
830 5958459 : ulong ver_top = *_v;
831 :
832 5958459 : ulong ver = POOL_(private_vidx_ver)( ver_top );
833 5958459 : ulong ele_nxt = POOL_(private_vidx_idx)( ver_top );
834 :
835 5958459 : if( FD_LIKELY( !(ver & 1UL) ) ) { /* opt for unlocked */
836 :
837 5958459 : if( FD_UNLIKELY( (ele_nxt>=ele_max) & (!POOL_(idx_is_null)( ele_nxt )) ) ) { /* opt for not corrupt */
838 0 : err = FD_POOL_ERR_CORRUPT;
839 0 : break;
840 0 : }
841 :
842 5958459 : ele->POOL_NEXT = POOL_(private_cidx)( ele_nxt );
843 :
844 5958459 : ulong new_ver_top = POOL_(private_vidx)( ver+2UL, ele_idx );
845 :
846 5958459 : if( FD_LIKELY( POOL_(private_cas)( _v, ver_top, new_ver_top )==ver_top ) ) break; /* opt for low contention */
847 :
848 5958459 : } else if( FD_UNLIKELY( !blocking ) ) { /* opt for blocking */
849 :
850 0 : err = FD_POOL_ERR_AGAIN;
851 0 : break;
852 :
853 0 : }
854 :
855 0 : FD_SPIN_PAUSE();
856 0 : }
857 :
858 5958459 : FD_COMPILER_MFENCE();
859 :
860 5958459 : return err;
861 5959236 : }
862 :
863 : POOL_STATIC int
864 2858799 : POOL_(is_empty)( POOL_(t) * join ) {
865 2858799 : POOL_(shmem_t) const * pool = join->pool;
866 2858799 : FD_COMPILER_MFENCE();
867 2858799 : ulong ver_top = pool->ver_top;
868 : # if POOL_LAZY
869 : ulong ver_lazy = pool->ver_lazy;
870 : # endif
871 2858799 : FD_COMPILER_MFENCE();
872 :
873 2858799 : ulong top_idx = POOL_(private_vidx_idx)( ver_top );
874 : # if POOL_LAZY
875 : ulong ele_max = join->ele_max;
876 2287089 : if( FD_LIKELY( top_idx<ele_max ) ) return 0; /* explicit stack non-empty */
877 666 : ulong lazy_idx = POOL_(private_vidx_idx)( ver_lazy );
878 666 : return !(lazy_idx<ele_max); /* empty only if lazy also empty */
879 : # else
880 571710 : return POOL_(idx_is_null)( top_idx );
881 : # endif
882 2858799 : }
883 :
884 : POOL_STATIC int
885 : POOL_(lock)( POOL_(t) * join,
886 6 : int blocking ) {
887 6 : ulong volatile * _v = (ulong volatile *)&join->pool->ver_top;
888 : # if POOL_LAZY
889 : ulong volatile * _l = (ulong volatile *)&join->pool->ver_lazy;
890 : # endif
891 :
892 6 : int err = FD_POOL_SUCCESS;
893 :
894 6 : FD_COMPILER_MFENCE();
895 :
896 6 : ulong ver_top;
897 6 : for(;;) {
898 :
899 : /* use a test-and-test-and-set style for reduced contention */
900 :
901 6 : ver_top = *_v;
902 6 : if( FD_LIKELY( !(ver_top & (1UL<<POOL_IDX_WIDTH)) ) ) { /* opt for low contention */
903 3 : ver_top = POOL_(private_fetch_and_or)( _v, 1UL<<POOL_IDX_WIDTH );
904 3 : if( FD_LIKELY( !(ver_top & (1UL<<POOL_IDX_WIDTH)) ) ) break; /* opt for low contention */
905 3 : }
906 :
907 3 : if( FD_UNLIKELY( !blocking ) ) { /* opt for blocking */
908 3 : err = FD_POOL_ERR_AGAIN;
909 3 : goto fail;
910 3 : }
911 :
912 0 : FD_SPIN_PAUSE();
913 0 : }
914 :
915 3 : FD_COMPILER_MFENCE();
916 :
917 : # if POOL_LAZY
918 :
919 3 : for(;;) {
920 :
921 : /* use a test-and-test-and-set style for reduced contention */
922 :
923 3 : ulong ver_lazy = *_l;
924 3 : if( FD_LIKELY( !(ver_lazy & (1UL<<POOL_IDX_WIDTH)) ) ) { /* opt for low contention */
925 3 : ver_lazy = POOL_(private_fetch_and_or)( _l, 1UL<<POOL_IDX_WIDTH );
926 3 : if( FD_LIKELY( !(ver_lazy & (1UL<<POOL_IDX_WIDTH)) ) ) break; /* opt for low contention */
927 3 : }
928 :
929 0 : if( FD_UNLIKELY( !blocking ) ) { /* opt for blocking */
930 0 : *_v = POOL_(private_vidx)( POOL_(private_vidx_ver)( ver_top )+2UL, POOL_(private_vidx_idx)( ver_top ) ); /* unlock */
931 0 : err = FD_POOL_ERR_AGAIN;
932 0 : goto fail;
933 0 : }
934 :
935 0 : FD_SPIN_PAUSE();
936 0 : }
937 :
938 3 : FD_COMPILER_MFENCE();
939 :
940 3 : # endif
941 :
942 6 : fail:
943 6 : return err;
944 3 : }
945 :
946 : #if !POOL_LAZY
947 :
948 : POOL_STATIC void
949 : POOL_(reset)( POOL_(t) * join,
950 123 : ulong sentinel_cnt ) {
951 123 : POOL_(shmem_t) * pool = join->pool;
952 123 : POOL_ELE_T * ele = join->ele;
953 123 : ulong ele_max = join->ele_max;
954 :
955 : /* Insert all but the leading sentinel_cnt elements in increasing
956 : order */
957 :
958 123 : ulong ele_top;
959 :
960 123 : if( FD_UNLIKELY( sentinel_cnt>=ele_max ) ) ele_top = POOL_(idx_null)(); /* All items are sentinels */
961 117 : else { /* Note: ele_max at least 1 here */
962 117 : ele_top = sentinel_cnt;
963 1580754 : for( ulong ele_idx=ele_top; ele_idx<(ele_max-1UL); ele_idx++ ) {
964 1580637 : ele[ ele_idx ].POOL_NEXT = POOL_(private_cidx)( ele_idx+1UL );
965 1580637 : }
966 117 : ele[ ele_max-1UL ].POOL_NEXT = POOL_(private_cidx)( POOL_(idx_null)() );
967 117 : }
968 :
969 123 : ulong ver_top = pool->ver_top;
970 123 : ulong ver = POOL_(private_vidx_ver)( ver_top );
971 123 : pool->ver_top = POOL_(private_vidx)( ver, ele_top );
972 123 : }
973 :
974 : #else
975 :
976 : POOL_STATIC void
977 : POOL_(reset)( POOL_(t) * join,
978 123 : ulong sentinel_cnt ) {
979 123 : POOL_(shmem_t) * pool = join->pool;
980 123 : ulong ele_max = join->ele_max;
981 :
982 : /* Assign all but the leading sentinel_cnt elements to the bump
983 : allocator */
984 :
985 123 : ulong ele_top = POOL_(idx_null)();
986 123 : ulong ele_lazy = sentinel_cnt<ele_max ? sentinel_cnt : POOL_(idx_null)();
987 :
988 123 : ulong ver_top = pool->ver_top;
989 123 : ulong ver_lazy = pool->ver_lazy;
990 123 : pool->ver_top = POOL_(private_vidx)( POOL_(private_vidx_ver)( ver_top ), ele_top );
991 123 : pool->ver_lazy = POOL_(private_vidx)( POOL_(private_vidx_ver)( ver_lazy ), ele_lazy );
992 123 : }
993 :
994 : #endif
995 :
996 : POOL_STATIC int
997 531 : POOL_(verify)( POOL_(t) const * join ) {
998 :
999 1597059 : # define POOL_TEST(c) do { \
1000 1597059 : if( FD_UNLIKELY( !(c) ) ) { FD_LOG_WARNING(( "FAIL: %s", #c )); return FD_POOL_ERR_CORRUPT; } \
1001 1597059 : } while(0)
1002 :
1003 : /* Validate join */
1004 :
1005 531 : POOL_TEST( join );
1006 531 : POOL_TEST( fd_ulong_is_aligned( (ulong)join, alignof(POOL_(t)) ) );
1007 :
1008 531 : POOL_(shmem_t) const * pool = join->pool;
1009 531 : POOL_ELE_T const * ele = join->ele;
1010 531 : ulong ele_max = join->ele_max;
1011 :
1012 531 : POOL_TEST( pool );
1013 531 : POOL_TEST( fd_ulong_is_aligned( (ulong)pool, POOL_(align)() ) );
1014 :
1015 531 : POOL_TEST( (!!ele)| (!ele_max) );
1016 531 : POOL_TEST( fd_ulong_is_aligned( (ulong)ele, alignof(POOL_ELE_T) ) );
1017 :
1018 531 : POOL_TEST( ele_max<=POOL_(ele_max_max)() );
1019 :
1020 : /* Validate pool metadata */
1021 :
1022 531 : ulong magic = pool->magic;
1023 531 : ulong ver_top = pool->ver_top;
1024 :
1025 : /* version arbitrary as far as verify is concerned */
1026 531 : ulong ele_idx = POOL_(private_vidx_idx)( ver_top );
1027 :
1028 531 : POOL_TEST( magic==POOL_MAGIC );
1029 :
1030 : /* Validate pool elements */
1031 :
1032 531 : ulong ele_rem = ele_max;
1033 1592547 : while( ele_idx<ele_max ) {
1034 1592016 : POOL_TEST( ele_rem ); ele_rem--; /* no cycles */
1035 1592016 : ele_idx = POOL_(private_idx)( ele[ ele_idx ].POOL_NEXT );
1036 1592016 : }
1037 :
1038 531 : POOL_TEST( POOL_(idx_is_null)( ele_idx ) );
1039 :
1040 : # if POOL_LAZY
1041 264 : ulong lazy_idx = POOL_(private_vidx_idx)( pool->ver_lazy );
1042 264 : ulong lazy_free = POOL_(idx_is_null)( lazy_idx ) ? 0UL : (ele_max-lazy_idx);
1043 264 : POOL_TEST( lazy_free<=ele_rem );
1044 264 : # endif
1045 :
1046 264 : # undef POOL_TEST
1047 :
1048 531 : return FD_POOL_SUCCESS;
1049 264 : }
1050 :
1051 : POOL_STATIC char const *
1052 18 : POOL_(strerror)( int err ) {
1053 18 : switch( err ) {
1054 3 : case FD_POOL_SUCCESS: return "success";
1055 3 : case FD_POOL_ERR_INVAL: return "bad input";
1056 3 : case FD_POOL_ERR_AGAIN: return "try again";
1057 3 : case FD_POOL_ERR_CORRUPT: return "corruption detected";
1058 3 : case FD_POOL_ERR_EMPTY: return "pool empty";
1059 3 : default: break;
1060 18 : }
1061 3 : return "unknown";
1062 18 : }
1063 :
1064 : #endif
1065 :
1066 : #undef POOL_
1067 : #undef POOL_STATIC
1068 : #undef POOL_VER_WIDTH
1069 :
1070 : #undef POOL_LAZY
1071 : #undef POOL_IMPL_STYLE
1072 : #undef POOL_MAGIC
1073 : #undef POOL_IDX_WIDTH
1074 : #undef POOL_ALIGN
1075 : #undef POOL_NEXT
1076 : #undef POOL_IDX_T
1077 : #undef POOL_ELE_T
1078 : #undef POOL_NAME
|