Line data Source code
1 : #if FD_HAS_THREADS /* THREADS implies HOSTED */
2 : #define _GNU_SOURCE
3 : #endif
4 :
5 : #include "fd_shmem_private.h"
6 :
7 : /* Portable APIs */
8 :
9 : int
10 0 : fd_cstr_to_shmem_lg_page_sz( char const * cstr ) {
11 0 : if( !cstr ) return FD_SHMEM_UNKNOWN_LG_PAGE_SZ;
12 :
13 0 : if( !fd_cstr_casecmp( cstr, "normal" ) ) return FD_SHMEM_NORMAL_LG_PAGE_SZ;
14 0 : if( !fd_cstr_casecmp( cstr, "huge" ) ) return FD_SHMEM_HUGE_LG_PAGE_SZ;
15 0 : if( !fd_cstr_casecmp( cstr, "gigantic" ) ) return FD_SHMEM_GIGANTIC_LG_PAGE_SZ;
16 :
17 0 : int i = fd_cstr_to_int( cstr );
18 0 : if( i==FD_SHMEM_NORMAL_LG_PAGE_SZ ) return FD_SHMEM_NORMAL_LG_PAGE_SZ;
19 0 : if( i==FD_SHMEM_HUGE_LG_PAGE_SZ ) return FD_SHMEM_HUGE_LG_PAGE_SZ;
20 0 : if( i==FD_SHMEM_GIGANTIC_LG_PAGE_SZ ) return FD_SHMEM_GIGANTIC_LG_PAGE_SZ;
21 :
22 0 : return FD_SHMEM_UNKNOWN_LG_PAGE_SZ;
23 0 : }
24 :
25 : char const *
26 0 : fd_shmem_lg_page_sz_to_cstr( int lg_page_sz ) {
27 0 : switch( lg_page_sz ) {
28 0 : case FD_SHMEM_NORMAL_LG_PAGE_SZ: return "normal";
29 0 : case FD_SHMEM_HUGE_LG_PAGE_SZ: return "huge";
30 0 : case FD_SHMEM_GIGANTIC_LG_PAGE_SZ: return "gigantic";
31 0 : default: break;
32 0 : }
33 0 : return "unknown";
34 0 : }
35 :
36 : ulong
37 273 : fd_cstr_to_shmem_page_sz( char const * cstr ) {
38 273 : if( !cstr ) return FD_SHMEM_UNKNOWN_PAGE_SZ;
39 :
40 273 : if( !fd_cstr_casecmp( cstr, "normal" ) ) return FD_SHMEM_NORMAL_PAGE_SZ;
41 195 : if( !fd_cstr_casecmp( cstr, "huge" ) ) return FD_SHMEM_HUGE_PAGE_SZ;
42 192 : if( !fd_cstr_casecmp( cstr, "gigantic" ) ) return FD_SHMEM_GIGANTIC_PAGE_SZ;
43 :
44 27 : ulong u = fd_cstr_to_ulong( cstr );
45 27 : if( u==FD_SHMEM_NORMAL_PAGE_SZ ) return FD_SHMEM_NORMAL_PAGE_SZ;
46 27 : if( u==FD_SHMEM_HUGE_PAGE_SZ ) return FD_SHMEM_HUGE_PAGE_SZ;
47 27 : if( u==FD_SHMEM_GIGANTIC_PAGE_SZ ) return FD_SHMEM_GIGANTIC_PAGE_SZ;
48 :
49 27 : return FD_SHMEM_UNKNOWN_PAGE_SZ;
50 27 : }
51 :
52 : char const *
53 4353 : fd_shmem_page_sz_to_cstr( ulong page_sz ) {
54 4353 : switch( page_sz ) {
55 180 : case FD_SHMEM_NORMAL_PAGE_SZ: return "normal";
56 159 : case FD_SHMEM_HUGE_PAGE_SZ: return "huge";
57 4014 : case FD_SHMEM_GIGANTIC_PAGE_SZ: return "gigantic";
58 0 : default: break;
59 4353 : }
60 0 : return "unknown";
61 4353 : }
62 :
63 : #if FD_HAS_HOSTED
64 :
65 : #include <ctype.h>
66 : #include <errno.h>
67 : #include <unistd.h>
68 : #include <fcntl.h>
69 : #include <linux/mempolicy.h>
70 : #include <sys/mman.h>
71 : #include <sys/stat.h>
72 : #include <linux/mman.h>
73 :
74 : #if FD_HAS_THREADS
75 : pthread_mutex_t fd_shmem_private_lock[1];
76 : #endif
77 :
78 : char fd_shmem_private_base[ FD_SHMEM_PRIVATE_BASE_MAX ]; /* "" at thread group start, initialized at boot */
79 : ulong fd_shmem_private_base_len; /* 0UL at ", initialized at boot */
80 :
81 : /* NUMA TOPOLOGY APIS *************************************************/
82 :
83 : static ulong fd_shmem_private_numa_cnt; /* 0UL at thread group start, initialized at boot */
84 : static ulong fd_shmem_private_cpu_cnt; /* " */
85 : static ushort fd_shmem_private_numa_idx[ FD_SHMEM_CPU_MAX ]; /* " */
86 : static ushort fd_shmem_private_cpu_idx [ FD_SHMEM_NUMA_MAX ]; /* " */
87 :
88 6 : ulong fd_shmem_numa_cnt( void ) { return fd_shmem_private_numa_cnt; }
89 399 : ulong fd_shmem_cpu_cnt ( void ) { return fd_shmem_private_cpu_cnt; }
90 :
91 : ulong
92 372 : fd_shmem_numa_idx( ulong cpu_idx ) {
93 372 : if( FD_UNLIKELY( cpu_idx>=fd_shmem_private_cpu_cnt ) ) return ULONG_MAX;
94 372 : return (ulong)fd_shmem_private_numa_idx[ cpu_idx ];
95 372 : }
96 :
97 : ulong
98 54 : fd_shmem_cpu_idx( ulong numa_idx ) {
99 54 : if( FD_UNLIKELY( numa_idx>=fd_shmem_private_numa_cnt ) ) return ULONG_MAX;
100 54 : return (ulong)fd_shmem_private_cpu_idx[ numa_idx ];
101 54 : }
102 :
103 : int
104 : fd_shmem_numa_validate( void const * mem,
105 : ulong page_sz,
106 : ulong page_cnt,
107 159 : ulong cpu_idx ) {
108 159 : if( FD_UNLIKELY( !mem ) ) {
109 0 : FD_LOG_WARNING(( "NULL mem" ));
110 0 : return EINVAL;
111 0 : }
112 :
113 159 : if( FD_UNLIKELY( !fd_shmem_is_page_sz( page_sz ) ) ) {
114 0 : FD_LOG_WARNING(( "bad page_sz (%lu)", page_sz ));
115 0 : return EINVAL;
116 0 : }
117 :
118 159 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)mem, page_sz ) ) ) {
119 0 : FD_LOG_WARNING(( "misaligned mem" ));
120 0 : return EINVAL;
121 0 : }
122 :
123 159 : if( FD_UNLIKELY( !((1UL<=page_cnt) & (page_cnt<=(((ulong)LONG_MAX)/page_sz))) ) ) {
124 0 : FD_LOG_WARNING(( "bad page_cnt (%lu)", page_cnt ));
125 0 : return EINVAL;
126 0 : }
127 :
128 159 : if( FD_UNLIKELY( !(cpu_idx<fd_shmem_cpu_cnt()) ) ) {
129 0 : FD_LOG_WARNING(( "bad cpu_idx (%lu)", cpu_idx ));
130 0 : return EINVAL;
131 0 : }
132 :
133 159 : ulong numa_idx = fd_shmem_numa_idx( cpu_idx );
134 :
135 159 : ulong page = (ulong)mem;
136 159 : int batch_status[ 512 ];
137 159 : void * batch_page [ 512 ];
138 159 : ulong batch_cnt = 0UL;
139 621 : while( page_cnt ) {
140 462 : batch_page[ batch_cnt++ ] = (void *)page;
141 462 : page += page_sz;
142 462 : page_cnt--;
143 462 : if( FD_UNLIKELY( ((batch_cnt==512UL) | (!page_cnt) ) ) ) {
144 159 : if( FD_UNLIKELY( fd_numa_move_pages( 0, batch_cnt, batch_page, NULL, batch_status, 0 ) ) ) {
145 0 : FD_LOG_WARNING(( "fd_numa_move_pages query failed (%i-%s)", errno, fd_io_strerror( errno ) ));
146 0 : return errno;
147 0 : }
148 621 : for( ulong batch_idx=0UL; batch_idx<batch_cnt; batch_idx++ ) {
149 462 : if( FD_UNLIKELY( batch_status[batch_idx]<0 ) ) {
150 0 : int err = -batch_status[batch_idx];
151 0 : FD_LOG_WARNING(( "page status failed (%i-%s)", err, fd_io_strerror( err ) ));
152 0 : return err;
153 0 : }
154 462 : if( FD_UNLIKELY( batch_status[batch_idx]!=(int)numa_idx ) ) {
155 0 : FD_LOG_WARNING(( "page allocated to numa %i instead of numa %lu", batch_status[batch_idx], numa_idx ));
156 0 : return EFAULT;
157 0 : }
158 462 : }
159 159 : batch_cnt = 0UL;
160 159 : }
161 462 : }
162 :
163 159 : return 0;
164 159 : }
165 :
166 : /* SHMEM REGION CREATION AND DESTRUCTION ******************************/
167 :
168 : static int
169 : fd_shmem_create_multi_flags( char const * name,
170 : ulong page_sz,
171 : ulong sub_cnt,
172 : ulong const * _sub_page_cnt,
173 : ulong const * _sub_cpu_idx,
174 : ulong mode,
175 69 : int open_flags ) {
176 :
177 : /* Check input args */
178 :
179 69 : if( FD_UNLIKELY( !fd_shmem_name_len( name ) ) ) { FD_LOG_WARNING(( "bad name (%s)", name ? name : "NULL" )); return EINVAL; }
180 :
181 66 : if( FD_UNLIKELY( !fd_shmem_is_page_sz( page_sz ) ) ) { FD_LOG_WARNING(( "bad page_sz (%lu)", page_sz )); return EINVAL; }
182 :
183 63 : if( FD_UNLIKELY( !sub_cnt ) ) { FD_LOG_WARNING(( "zero sub_cnt" )); return EINVAL; }
184 63 : if( FD_UNLIKELY( !_sub_page_cnt ) ) { FD_LOG_WARNING(( "NULL sub_page_cnt" )); return EINVAL; }
185 63 : if( FD_UNLIKELY( !_sub_cpu_idx ) ) { FD_LOG_WARNING(( "NULL sub_cpu_idx" )); return EINVAL; }
186 :
187 63 : ulong cpu_cnt = fd_shmem_cpu_cnt();
188 :
189 63 : ulong page_cnt = 0UL;
190 117 : for( ulong sub_idx=0UL; sub_idx<sub_cnt; sub_idx++ ) {
191 63 : ulong sub_page_cnt = _sub_page_cnt[ sub_idx ];
192 63 : if( FD_UNLIKELY( !sub_page_cnt ) ) continue; /* Skip over empty subregions */
193 :
194 60 : page_cnt += sub_page_cnt;
195 60 : if( FD_UNLIKELY( page_cnt<sub_page_cnt ) ) {
196 0 : FD_LOG_WARNING(( "sub[%lu] sub page_cnt overflow (page_cnt %lu, sub_page_cnt %lu)",
197 0 : sub_idx, page_cnt-sub_page_cnt, sub_page_cnt ));
198 0 : return EINVAL;
199 0 : }
200 :
201 60 : ulong sub_cpu_idx = _sub_cpu_idx[ sub_idx ];
202 60 : if( FD_UNLIKELY( sub_cpu_idx>=cpu_cnt ) ) {
203 9 : FD_LOG_WARNING(( "sub[%lu] bad cpu_idx (%lu)", sub_idx, sub_cpu_idx ));
204 9 : return EINVAL;
205 9 : }
206 60 : }
207 :
208 54 : if( FD_UNLIKELY( !((1UL<=page_cnt) & (page_cnt<=(((ulong)LONG_MAX)/page_sz))) ) ) { /* LONG_MAX from off_t */
209 3 : FD_LOG_WARNING(( "bad total page_cnt (%lu)", page_cnt ));
210 3 : return EINVAL;
211 3 : }
212 :
213 51 : if( FD_UNLIKELY( mode!=(ulong)(mode_t)mode ) ) { FD_LOG_WARNING(( "bad mode (0%03lo)", mode )); return EINVAL; }
214 :
215 : /* We use the FD_SHMEM_LOCK in create just to be safe given some
216 : thread safety ambiguities in the documentation for some of the
217 : below APIs. */
218 :
219 42 : FD_SHMEM_LOCK;
220 :
221 42 : int err;
222 :
223 42 : # define ERROR( cleanup ) do { err = errno; goto cleanup; } while(0)
224 :
225 42 : int orig_mempolicy;
226 42 : ulong orig_nodemask[ (FD_SHMEM_NUMA_MAX+63UL)/64UL ];
227 42 : char path[ FD_SHMEM_PRIVATE_PATH_BUF_MAX ];
228 42 : int fd;
229 42 : void * shmem;
230 :
231 42 : ulong sz = page_cnt*page_sz;
232 :
233 : /* Save this thread's numa node mempolicy */
234 :
235 42 : if( FD_UNLIKELY( fd_numa_get_mempolicy( &orig_mempolicy, orig_nodemask, FD_SHMEM_NUMA_MAX, NULL, 0UL ) ) ) {
236 0 : FD_LOG_WARNING(( "fd_numa_get_mempolicy failed (%i-%s)", errno, fd_io_strerror( errno ) ));
237 0 : ERROR( done );
238 0 : }
239 :
240 : /* Create the region */
241 :
242 42 : fd = open( fd_shmem_private_path( name, page_sz, path ), open_flags, (mode_t)mode );
243 42 : if( FD_UNLIKELY( fd==-1 ) ) {
244 6 : FD_LOG_WARNING(( "open(\"%s\",%#x,0%03lo) failed (%i-%s)", path, (uint)open_flags, mode, errno, fd_io_strerror( errno ) ));
245 6 : ERROR( restore );
246 6 : }
247 :
248 : /* Size the region */
249 :
250 36 : if( FD_UNLIKELY( ftruncate( fd, (off_t)sz ) ) ) {
251 0 : FD_LOG_WARNING(( "ftruncate(\"%s\",%lu KiB) failed (%i-%s)", path, sz>>10, errno, fd_io_strerror( errno ) ));
252 0 : ERROR( close );
253 0 : }
254 :
255 : /* Map the region into our address space. */
256 :
257 36 : shmem = mmap( NULL, sz, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)0);
258 36 : if( FD_UNLIKELY( shmem==MAP_FAILED ) ) {
259 0 : FD_LOG_WARNING(( "mmap(NULL,%lu KiB,PROT_READ|PROT_WRITE,MAP_SHARED,\"%s\",0) failed (%i-%s)",
260 0 : sz>>10, path, errno, fd_io_strerror( errno ) ));
261 0 : ERROR( close );
262 0 : }
263 :
264 : /* Validate the mapping */
265 :
266 36 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shmem, page_sz ) ) ) {
267 0 : FD_LOG_WARNING(( "misaligned memory mapping for \"%s\"\n\t"
268 0 : "This thread group's hugetlbfs mount path (--shmem-path / FD_SHMEM_PATH):\n\t"
269 0 : "\t%s\n\t"
270 0 : "has probably been corrupted and needs to be redone.\n\t"
271 0 : "See 'bin/fd_shmem_cfg help' for more information.",
272 0 : path, fd_shmem_private_base ));
273 0 : errno = EFAULT; /* ENOMEM is arguable */
274 0 : ERROR( unmap );
275 0 : }
276 :
277 : /* For each subregion */
278 :
279 36 : uchar * sub_shmem = (uchar *)shmem;
280 72 : for( ulong sub_idx=0UL; sub_idx<sub_cnt; sub_idx++ ) {
281 36 : ulong sub_page_cnt = _sub_page_cnt[ sub_idx ];
282 36 : if( FD_UNLIKELY( !sub_page_cnt ) ) continue; /* Skip over empty sub-regions */
283 :
284 36 : ulong sub_sz = sub_page_cnt*page_sz;
285 36 : ulong sub_cpu_idx = _sub_cpu_idx[ sub_idx ];
286 36 : ulong sub_numa_idx = fd_shmem_numa_idx( sub_cpu_idx );
287 :
288 36 : ulong nodemask[ (FD_SHMEM_NUMA_MAX+63UL)/64UL ];
289 :
290 : /* Set the mempolicy to bind newly allocated memory to the numa idx
291 : corresponding to logical cpu cpu_idx. This should force page
292 : allocation to be on the desired numa node, keeping our fingers
293 : crossed that even the ftruncate / mmap above did not trigger
294 : this; it doesn't seem too, even when the user's thread group has
295 : configured things like mlockall(MCL_CURRENT | MCL_FUTURE ).
296 : Theoretically, the fd_numa_mbind below should do it without this
297 : but the Linux kernel tends to view requests to move pages between
298 : numa nodes after allocation as for entertainment purposes only. */
299 :
300 36 : fd_memset( nodemask, 0, 8UL*((FD_SHMEM_NUMA_MAX+63UL)/64UL) );
301 36 : nodemask[ sub_numa_idx >> 6 ] = 1UL << (sub_numa_idx & 63UL);
302 :
303 36 : if( FD_UNLIKELY( fd_numa_set_mempolicy( MPOL_BIND | MPOL_F_STATIC_NODES, nodemask, FD_SHMEM_NUMA_MAX ) ) ) {
304 0 : FD_LOG_WARNING(( "fd_numa_set_mempolicy failed (%i-%s)", errno, fd_io_strerror( errno ) ));
305 0 : ERROR( unmap );
306 0 : }
307 :
308 : /* If a mempolicy has been set and the numa_idx node does not have
309 : sufficient pages to back the mapping, touching the memory will
310 : trigger a SIGBUS when it touches the first part of the mapping
311 : for which there are no pages. Unfortunately, mmap will only
312 : error if there are insufficient pages across all NUMA nodes (even
313 : if using mlockall( MCL_FUTURE ) or passing MAP_POPULATE), so we
314 : need to check that the mapping can be backed without handling
315 : signals.
316 :
317 : So we mlock the subregion to force the region to be backed by
318 : pages now. The subregion should be backed by page_sz pages
319 : (thanks to the hugetlbfs configuration) and should be on the
320 : correct NUMA node (thanks to the mempolicy above). Specifically,
321 : mlock will error with ENOMEM if there were insufficient pages
322 : available. mlock guarantees that if it succeeds, the mapping has
323 : been fully backed by pages and these pages will remain resident
324 : in DRAM at least until the mapping is closed. We can then
325 : proceed as usual without the risk of meeting SIGBUS or its
326 : friends. */
327 :
328 36 : if( FD_UNLIKELY( fd_numa_mlock( sub_shmem, sub_sz ) ) ) {
329 0 : FD_LOG_WARNING(( "sub[%lu]: fd_numa_mlock(\"%s\",%lu KiB) failed (%i-%s)",
330 0 : sub_idx, path, sub_sz>>10, errno, fd_io_strerror( errno ) ));
331 0 : ERROR( unmap );
332 0 : }
333 :
334 : /* At this point all pages in this subregion should be allocated on
335 : the right NUMA node and resident in DRAM. But in the spirit of
336 : not trusting Linux to get this right robustly, we continue with
337 : touching pages from cpu_idx. */
338 :
339 : /* FIXME: NUMA TOUCH HERE (ALSO WOULD A LOCAL TOUCH WORK GIVEN THE
340 : MEMPOLICY DONE ABOVE?) */
341 :
342 : /* fd_numa_mbind the memory subregion to this numa node to nominally
343 : stay put after we unmap it. We recompute the nodemask to be on
344 : the safe side in case set mempolicy above clobbered it. */
345 :
346 36 : fd_memset( nodemask, 0, 8UL*((FD_SHMEM_NUMA_MAX+63UL)/64UL) );
347 36 : nodemask[ sub_numa_idx >> 6 ] = 1UL << (sub_numa_idx & 63UL);
348 :
349 36 : if( FD_UNLIKELY( fd_numa_mbind( sub_shmem, sub_sz, MPOL_BIND, nodemask, FD_SHMEM_NUMA_MAX, MPOL_MF_MOVE|MPOL_MF_STRICT ) ) ) {
350 0 : FD_LOG_WARNING(( "sub[%lu]: fd_numa_mbind(\"%s\",%lu KiB,MPOL_BIND,1UL<<%lu,MPOL_MF_MOVE|MPOL_MF_STRICT) failed (%i-%s)",
351 0 : sub_idx, path, sub_sz>>10, sub_numa_idx, errno, fd_io_strerror( errno ) ));
352 0 : ERROR( unmap );
353 0 : }
354 :
355 : /* And since the fd_numa_mbind still often will ignore requests, we
356 : double check that the pages are in the right place. */
357 :
358 36 : int warn = fd_shmem_numa_validate( sub_shmem, page_sz, sub_page_cnt, sub_cpu_idx ); /* logs details */
359 36 : if( FD_UNLIKELY( warn ) )
360 0 : FD_LOG_WARNING(( "sub[%lu]: mmap(NULL,%lu KiB,PROT_READ|PROT_WRITE,MAP_SHARED,\"%s\",0) numa binding failed (%i-%s)",
361 36 : sub_idx, sub_sz>>10, path, warn, fd_io_strerror( warn ) ));
362 :
363 36 : sub_shmem += sub_sz;
364 36 : }
365 :
366 36 : err = 0;
367 :
368 36 : # undef ERROR
369 :
370 36 : unmap:
371 36 : if( FD_UNLIKELY( munmap( shmem, sz ) ) )
372 0 : FD_LOG_ERR(( "munmap(\"%s\",%lu KiB) failed (%i-%s)",
373 36 : path, sz>>10, errno, fd_io_strerror( errno ) ));
374 :
375 36 : close:
376 36 : if( FD_UNLIKELY( err ) && FD_UNLIKELY( unlink( path ) ) )
377 0 : FD_LOG_ERR(( "unlink(\"%s\") failed (%i-%s)", path, errno, fd_io_strerror( errno ) ));
378 36 : if( FD_UNLIKELY( close( fd ) ) )
379 0 : FD_LOG_ERR(( "close(\"%s\") failed (%i-%s)", path, errno, fd_io_strerror( errno ) ));
380 :
381 42 : restore:
382 42 : if( FD_UNLIKELY( fd_numa_set_mempolicy( orig_mempolicy, orig_nodemask, FD_SHMEM_NUMA_MAX ) ) )
383 0 : FD_LOG_ERR(( "fd_numa_set_mempolicy failed (%i-%s)", errno, fd_io_strerror( errno ) ));
384 :
385 42 : done:
386 42 : FD_SHMEM_UNLOCK;
387 42 : return err;
388 42 : }
389 :
390 : int
391 : fd_shmem_create_multi( char const * name,
392 : ulong page_sz,
393 : ulong sub_cnt,
394 : ulong const * _sub_page_cnt,
395 : ulong const * _sub_cpu_idx,
396 69 : ulong mode ) {
397 69 : return fd_shmem_create_multi_flags( name, page_sz, sub_cnt, _sub_page_cnt, _sub_cpu_idx, mode, O_RDWR | O_CREAT | O_EXCL );
398 69 : }
399 :
400 : int
401 : fd_shmem_update_multi( char const * name,
402 : ulong page_sz,
403 : ulong sub_cnt,
404 : ulong const * _sub_page_cnt,
405 : ulong const * _sub_cpu_idx,
406 0 : ulong mode ) {
407 0 : return fd_shmem_create_multi_flags( name, page_sz, sub_cnt, _sub_page_cnt, _sub_cpu_idx, mode, O_RDWR );
408 0 : }
409 :
410 : int
411 : fd_shmem_create_multi_unlocked( char const * name,
412 : ulong page_sz,
413 : ulong page_cnt,
414 0 : ulong mode ) {
415 :
416 : /* Check input args */
417 :
418 0 : if( FD_UNLIKELY( !fd_shmem_name_len( name ) ) ) { FD_LOG_WARNING(( "bad name (%s)", name ? name : "NULL" )); return EINVAL; }
419 :
420 0 : if( FD_UNLIKELY( !page_cnt ) ) { FD_LOG_WARNING(( "zero page_cnt" )); return EINVAL; }
421 :
422 0 : if( FD_UNLIKELY( !fd_shmem_is_page_sz( page_sz ) ) ) { FD_LOG_WARNING(( "bad page_sz (%lu)", page_sz )); return EINVAL; }
423 :
424 :
425 :
426 0 : # define ERROR( cleanup ) do { err = errno; goto cleanup; } while(0)
427 :
428 0 : int err = 0;
429 :
430 0 : char path[ FD_SHMEM_PRIVATE_PATH_BUF_MAX ];
431 0 : void * shmem;
432 :
433 0 : ulong sz = page_cnt*page_sz;
434 :
435 : /* Acquire the pages at a random address */
436 :
437 : /* Create the region */
438 0 : int open_flags = O_RDWR | O_CREAT | O_TRUNC;
439 0 : int fd = open( fd_shmem_private_path( name, page_sz, path ), open_flags, (mode_t)mode );
440 0 : if( FD_UNLIKELY( fd==-1 ) ) {
441 0 : FD_LOG_WARNING(( "open(\"%s\",%#x,0%03lo) failed (%i-%s)", path, (uint)open_flags, mode, errno, fd_io_strerror( errno ) ));
442 0 : ERROR( done );
443 0 : }
444 :
445 : /* Size the region */
446 :
447 0 : if( FD_UNLIKELY( ftruncate( fd, (off_t)sz ) ) ) {
448 0 : FD_LOG_WARNING(( "ftruncate(\"%s\",%lu KiB) failed (%i-%s)", path, sz>>10, errno, fd_io_strerror( errno ) ));
449 0 : ERROR( close );
450 0 : }
451 :
452 : /* Map the region into our address space. */
453 :
454 0 : shmem = mmap( NULL, sz, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)0);
455 0 : if( FD_UNLIKELY( shmem==MAP_FAILED ) ) {
456 0 : FD_LOG_WARNING(( "mmap(NULL,%lu KiB,PROT_READ|PROT_WRITE,MAP_SHARED,\"%s\",0) failed (%i-%s)",
457 0 : sz>>10, path, errno, fd_io_strerror( errno ) ));
458 0 : ERROR( close );
459 0 : }
460 :
461 : /* fallocate the file, to force the kernel to allocate disk space. This is done because otherwise,
462 : if the disk runs out of space accessing the mapped memory will throw SIGBUS mid-execution.
463 : It's better to fail early, if the disk is not large enough to back the whole memory region. */
464 0 : if( FD_UNLIKELY( fallocate( fd, FALLOC_FL_KEEP_SIZE, 0, (off_t)sz ) ) ) {
465 0 : FD_LOG_WARNING(( "fallocate(\"%s\",FALLOC_FL_KEEP_SIZE,%lu KiB) failed (%i-%s)",
466 0 : path, sz>>10, errno, fd_io_strerror( errno ) ));
467 0 : ERROR( unmap );
468 0 : }
469 :
470 : /* Validate the mapping */
471 :
472 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shmem, page_sz ) ) ) {
473 0 : FD_LOG_WARNING(( "misaligned memory mapping for unpinned shmem region \"%s\"", name ));
474 0 : errno = EFAULT; /* ENOMEM is arguable */
475 0 : ERROR( unmap );
476 0 : }
477 :
478 0 : # undef ERROR
479 :
480 0 : unmap:
481 0 : if( FD_UNLIKELY( munmap( shmem, sz ) ) )
482 0 : FD_LOG_ERR(( "munmap(\"%s\",%lu KiB) failed (%i-%s)",
483 0 : path, sz>>10, errno, fd_io_strerror( errno ) ));
484 :
485 0 : close:
486 0 : if( FD_UNLIKELY( err ) && FD_UNLIKELY( unlink( path ) ) )
487 0 : FD_LOG_ERR(( "unlink(\"%s\") failed (%i-%s)", path, errno, fd_io_strerror( errno ) ));
488 0 : if( FD_UNLIKELY( close( fd ) ) )
489 0 : FD_LOG_ERR(( "close(\"%s\") failed (%i-%s)", path, errno, fd_io_strerror( errno ) ));
490 :
491 0 : done:
492 0 : return err;
493 0 : }
494 :
495 : int
496 : fd_shmem_unlink( char const * name,
497 39 : ulong page_sz ) {
498 39 : char path[ FD_SHMEM_PRIVATE_PATH_BUF_MAX ];
499 :
500 : /* Check input args */
501 :
502 39 : if( FD_UNLIKELY( !fd_shmem_name_len( name ) ) ) { FD_LOG_WARNING(( "bad name (%s)", name ? name : "NULL" )); return EINVAL; }
503 :
504 39 : if( FD_UNLIKELY( !fd_shmem_is_page_sz( page_sz ) ) ) { FD_LOG_WARNING(( "bad page_sz (%lu)", page_sz )); return EINVAL; }
505 :
506 : /* Unlink the name */
507 :
508 39 : if( FD_UNLIKELY( unlink( fd_shmem_private_path( name, page_sz, path ) ) ) ) {
509 3 : FD_LOG_WARNING(( "unlink(\"%s\") failed (%i-%s)", path, errno, fd_io_strerror( errno ) ));
510 3 : return errno;
511 3 : }
512 :
513 36 : return 0;
514 39 : }
515 :
516 : int
517 : fd_shmem_info( char const * name,
518 : ulong page_sz,
519 4431 : fd_shmem_info_t * opt_info ) {
520 :
521 4431 : if( FD_UNLIKELY( !fd_shmem_name_len( name ) ) ) { FD_LOG_WARNING(( "bad name (%s)", name ? name : "NULL" )); return EINVAL; }
522 :
523 4428 : if( !page_sz ) {
524 2052 : if( !fd_shmem_info( name, FD_SHMEM_GIGANTIC_PAGE_SZ, opt_info ) ) return 0;
525 156 : if( !fd_shmem_info( name, FD_SHMEM_HUGE_PAGE_SZ, opt_info ) ) return 0;
526 156 : if( !fd_shmem_info( name, FD_SHMEM_NORMAL_PAGE_SZ, opt_info ) ) return 0;
527 147 : return ENOENT;
528 156 : }
529 :
530 2376 : if( FD_UNLIKELY( !fd_shmem_is_page_sz( page_sz ) ) ) { FD_LOG_WARNING(( "bad page_sz (%lu)", page_sz )); return EINVAL; }
531 :
532 2376 : char path[ FD_SHMEM_PRIVATE_PATH_BUF_MAX ];
533 2376 : int fd = open( fd_shmem_private_path( name, page_sz, path ), O_RDONLY, (mode_t)0 );
534 2376 : if( FD_UNLIKELY( fd==-1 ) ) return errno; /* no logging here as this might be an existence check */
535 :
536 1911 : struct stat stat[1];
537 1911 : if( FD_UNLIKELY( fstat( fd, stat ) ) ) {
538 0 : FD_LOG_WARNING(( "fstat failed (%i-%s)", errno, fd_io_strerror( errno ) ));
539 0 : int err = errno;
540 0 : if( FD_UNLIKELY( close( fd ) ) )
541 0 : FD_LOG_WARNING(( "close(\"%s\") failed (%i-%s); attempting to continue", path, errno, fd_io_strerror( errno ) ));
542 0 : return err;
543 0 : }
544 :
545 1911 : ulong sz = (ulong)stat->st_size;
546 1911 : if( FD_UNLIKELY( !fd_ulong_is_aligned( sz, page_sz ) ) ) {
547 0 : FD_LOG_WARNING(( "\"%s\" size (%lu) not a page size (%lu) multiple\n\t"
548 0 : "This thread group's hugetlbfs mount path (--shmem-path / FD_SHMEM_PATH):\n\t"
549 0 : "\t%s\n\t"
550 0 : "has probably been corrupted and needs to be redone.\n\t"
551 0 : "See 'bin/fd_shmem_cfg help' for more information.",
552 0 : path, sz, page_sz, fd_shmem_private_base ));
553 0 : if( FD_UNLIKELY( close( fd ) ) )
554 0 : FD_LOG_WARNING(( "close(\"%s\") failed (%i-%s); attempting to continue", path, errno, fd_io_strerror( errno ) ));
555 0 : return EFAULT;
556 0 : }
557 1911 : ulong page_cnt = sz / page_sz;
558 :
559 1911 : if( FD_UNLIKELY( close( fd ) ) )
560 0 : FD_LOG_WARNING(( "close(\"%s\") failed (%i-%s); attempting to continue", path, errno, fd_io_strerror( errno ) ));
561 :
562 1911 : if( opt_info ) {
563 1911 : opt_info->page_sz = page_sz;
564 1911 : opt_info->page_cnt = page_cnt;
565 1911 : }
566 1911 : return 0;
567 1911 : }
568 :
569 : /* RAW PAGE ALLOCATION APIS *******************************************/
570 :
571 : void *
572 : fd_shmem_acquire_multi( ulong page_sz,
573 : ulong sub_cnt,
574 : ulong const * _sub_page_cnt,
575 126 : ulong const * _sub_cpu_idx ) {
576 :
577 : /* Check input args */
578 :
579 126 : if( FD_UNLIKELY( !fd_shmem_is_page_sz( page_sz ) ) ) { FD_LOG_WARNING(( "bad page_sz (%lu)", page_sz )); return NULL; }
580 :
581 126 : if( FD_UNLIKELY( !sub_cnt ) ) { FD_LOG_WARNING(( "zero sub_cnt" )); return NULL; }
582 126 : if( FD_UNLIKELY( !_sub_page_cnt ) ) { FD_LOG_WARNING(( "NULL sub_page_cnt" )); return NULL; }
583 126 : if( FD_UNLIKELY( !_sub_cpu_idx ) ) { FD_LOG_WARNING(( "NULL sub_cpu_idx" )); return NULL; }
584 :
585 126 : ulong cpu_cnt = fd_shmem_cpu_cnt();
586 :
587 126 : ulong page_cnt = 0UL;
588 249 : for( ulong sub_idx=0UL; sub_idx<sub_cnt; sub_idx++ ) {
589 126 : ulong sub_page_cnt = _sub_page_cnt[ sub_idx ];
590 126 : if( FD_UNLIKELY( !sub_page_cnt ) ) continue; /* Skip over empty subregions */
591 :
592 126 : page_cnt += sub_page_cnt;
593 126 : if( FD_UNLIKELY( page_cnt<sub_page_cnt ) ) {
594 0 : FD_LOG_WARNING(( "sub[%lu] sub page_cnt overflow (page_cnt %lu, sub_page_cnt %lu)",
595 0 : sub_idx, page_cnt-sub_page_cnt, sub_page_cnt ));
596 0 : return NULL;
597 0 : }
598 :
599 126 : ulong sub_cpu_idx = _sub_cpu_idx[ sub_idx ];
600 126 : if( FD_UNLIKELY( sub_cpu_idx>=cpu_cnt ) ) {
601 3 : FD_LOG_WARNING(( "sub[%lu] bad cpu_idx (%lu)", sub_idx, sub_cpu_idx ));
602 3 : return NULL;
603 3 : }
604 126 : }
605 :
606 123 : if( FD_UNLIKELY( !((1UL<=page_cnt) & (page_cnt<=(((ulong)LONG_MAX)/page_sz))) ) ) { /* LONG_MAX from off_t */
607 0 : FD_LOG_WARNING(( "bad total page_cnt (%lu)", page_cnt ));
608 0 : return NULL;
609 0 : }
610 :
611 123 : int flags = MAP_PRIVATE | MAP_ANONYMOUS;
612 123 : if( page_sz==FD_SHMEM_HUGE_PAGE_SZ ) flags |= (int)MAP_HUGETLB | (int)MAP_HUGE_2MB;
613 123 : if( page_sz==FD_SHMEM_GIGANTIC_PAGE_SZ ) flags |= (int)MAP_HUGETLB | (int)MAP_HUGE_1GB;
614 :
615 : /* See fd_shmem_create_multi for details on the locking, mempolicy
616 : and what not tricks */
617 :
618 123 : FD_SHMEM_LOCK;
619 :
620 123 : int err;
621 :
622 123 : # define ERROR( cleanup ) do { err = errno; goto cleanup; } while(0)
623 :
624 123 : int orig_mempolicy;
625 123 : ulong orig_nodemask[ (FD_SHMEM_NUMA_MAX+63UL)/64UL ];
626 123 : void * mem = NULL;
627 :
628 123 : ulong sz = page_cnt*page_sz;
629 :
630 123 : if( FD_UNLIKELY( fd_numa_get_mempolicy( &orig_mempolicy, orig_nodemask, FD_SHMEM_NUMA_MAX, NULL, 0UL ) ) ) {
631 0 : FD_LOG_WARNING(( "fd_numa_get_mempolicy failed (%i-%s)", errno, fd_io_strerror( errno ) ));
632 0 : ERROR( done );
633 0 : }
634 :
635 123 : mem = mmap( NULL, sz, PROT_READ | PROT_WRITE, flags, -1, (off_t)0);
636 123 : if( FD_UNLIKELY( mem==MAP_FAILED ) ) {
637 0 : FD_LOG_WARNING(( "mmap(NULL,%lu KiB,PROT_READ|PROT_WRITE,%x,-1,0) failed (%i-%s)",
638 0 : sz>>10, (uint)flags, errno, fd_io_strerror( errno ) ));
639 0 : ERROR( restore );
640 0 : }
641 :
642 123 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)mem, page_sz ) ) ) {
643 0 : FD_LOG_WARNING(( "mmap(NULL,%lu KiB,PROT_READ|PROT_WRITE,%x,-1,0) misaligned", sz>>10, (uint)flags ));
644 0 : errno = EFAULT; /* ENOMEM is arguable */
645 0 : ERROR( unmap );
646 0 : }
647 :
648 123 : uchar * sub_mem = (uchar *)mem;
649 246 : for( ulong sub_idx=0UL; sub_idx<sub_cnt; sub_idx++ ) {
650 123 : ulong sub_page_cnt = _sub_page_cnt[ sub_idx ];
651 123 : if( FD_UNLIKELY( !sub_page_cnt ) ) continue;
652 :
653 123 : ulong sub_sz = sub_page_cnt*page_sz;
654 123 : ulong sub_cpu_idx = _sub_cpu_idx[ sub_idx ];
655 123 : ulong sub_numa_idx = fd_shmem_numa_idx( sub_cpu_idx );
656 :
657 123 : ulong nodemask[ (FD_SHMEM_NUMA_MAX+63UL)/64UL ];
658 :
659 123 : fd_memset( nodemask, 0, 8UL*((FD_SHMEM_NUMA_MAX+63UL)/64UL) );
660 123 : nodemask[ sub_numa_idx >> 6 ] = 1UL << (sub_numa_idx & 63UL);
661 :
662 123 : if( FD_UNLIKELY( fd_numa_set_mempolicy( MPOL_BIND | MPOL_F_STATIC_NODES, nodemask, FD_SHMEM_NUMA_MAX ) ) ) {
663 0 : FD_LOG_WARNING(( "fd_numa_set_mempolicy failed (%i-%s)", errno, fd_io_strerror( errno ) ));
664 0 : ERROR( unmap );
665 0 : }
666 :
667 123 : if( FD_UNLIKELY( fd_numa_mlock( sub_mem, sub_sz ) ) ) {
668 0 : FD_LOG_WARNING(( "sub[%lu]: fd_numa_mlock(anon,%lu KiB) failed (%i-%s)",
669 0 : sub_idx, sub_sz>>10, errno, fd_io_strerror( errno ) ));
670 0 : ERROR( unmap );
671 0 : }
672 :
673 : /* FIXME: NUMA TOUCH HERE (ALSO WOULD A LOCAL TOUCH WORK GIVEN THE
674 : MEMPOLICY DONE ABOVE?) */
675 :
676 123 : fd_memset( nodemask, 0, 8UL*((FD_SHMEM_NUMA_MAX+63UL)/64UL) );
677 123 : nodemask[ sub_numa_idx >> 6 ] = 1UL << (sub_numa_idx & 63UL);
678 :
679 123 : if( FD_UNLIKELY( fd_numa_mbind( sub_mem, sub_sz, MPOL_BIND, nodemask, FD_SHMEM_NUMA_MAX, MPOL_MF_MOVE|MPOL_MF_STRICT ) ) ) {
680 0 : FD_LOG_WARNING(( "sub[%lu]: fd_numa_mbind(anon,%lu KiB,MPOL_BIND,1UL<<%lu,MPOL_MF_MOVE|MPOL_MF_STRICT) failed (%i-%s)",
681 0 : sub_idx, sub_sz>>10, sub_numa_idx, errno, fd_io_strerror( errno ) ));
682 0 : ERROR( unmap );
683 0 : }
684 :
685 123 : int warn = fd_shmem_numa_validate( sub_mem, page_sz, sub_page_cnt, sub_cpu_idx ); /* logs details */
686 123 : if( FD_UNLIKELY( warn ) )
687 0 : FD_LOG_WARNING(( "sub[%lu]: mmap(NULL,%lu KiB,PROT_READ|PROT_WRITE,%x,-1,0) numa binding failed (%i-%s)",
688 123 : sub_idx, sub_sz>>10, (uint)flags, warn, fd_io_strerror( warn ) ));
689 :
690 123 : sub_mem += sub_sz;
691 123 : }
692 :
693 123 : err = 0;
694 :
695 123 : # undef ERROR
696 :
697 123 : unmap:
698 123 : if( FD_UNLIKELY( err ) && FD_UNLIKELY( munmap( mem, sz ) ) )
699 0 : FD_LOG_WARNING(( "munmap(anon,%lu KiB) failed (%i-%s); attempting to continue",
700 123 : sz>>10, errno, fd_io_strerror( errno ) ));
701 :
702 123 : restore:
703 123 : if( FD_UNLIKELY( fd_numa_set_mempolicy( orig_mempolicy, orig_nodemask, FD_SHMEM_NUMA_MAX ) ) )
704 0 : FD_LOG_WARNING(( "fd_numa_set_mempolicy failed (%i-%s); attempting to continue", errno, fd_io_strerror( errno ) ));
705 :
706 123 : done:
707 123 : FD_SHMEM_UNLOCK;
708 123 : return err ? NULL : mem;
709 123 : }
710 :
711 : int
712 : fd_shmem_release( void * mem,
713 : ulong page_sz,
714 138 : ulong page_cnt ) {
715 138 : if( FD_UNLIKELY( !mem ) ) {
716 0 : FD_LOG_WARNING(( "NULL mem" ));
717 0 : return -1;
718 0 : }
719 :
720 138 : if( FD_UNLIKELY( !fd_shmem_is_page_sz( page_sz ) ) ) {
721 0 : FD_LOG_WARNING(( "bad page_sz (%lu)", page_sz ));
722 0 : return -1;
723 0 : }
724 :
725 138 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)mem, page_sz ) ) ) {
726 0 : FD_LOG_WARNING(( "misaligned mem" ));
727 0 : return -1;
728 0 : }
729 :
730 138 : if( FD_UNLIKELY( !((1UL<=page_cnt) & (page_cnt<=(((ulong)LONG_MAX)/page_sz))) ) ) {
731 0 : FD_LOG_WARNING(( "bad page_cnt (%lu)", page_cnt ));
732 0 : return -1;
733 0 : }
734 :
735 138 : ulong sz = page_sz*page_cnt;
736 :
737 138 : int result = munmap( mem, sz );
738 138 : if( FD_UNLIKELY( result ) )
739 0 : FD_LOG_WARNING(( "munmap(anon,%lu KiB) failed (%i-%s); attempting to continue", sz>>10, errno, fd_io_strerror( errno ) ));
740 :
741 138 : return result;
742 138 : }
743 :
744 : /* SHMEM PARSING APIS *************************************************/
745 :
746 : ulong
747 8754 : fd_shmem_name_len( char const * name ) {
748 8754 : if( FD_UNLIKELY( !name ) ) return 0UL; /* NULL name */
749 :
750 8733 : ulong len = 0UL;
751 145089 : while( FD_LIKELY( len<FD_SHMEM_NAME_MAX ) ) {
752 145089 : char c = name[len];
753 145089 : if( FD_UNLIKELY( !c ) ) break;
754 136524 : if( FD_UNLIKELY( !( fd_isalnum( c ) | ((len>0UL) & ((c=='_') | (c=='-') | (c=='.'))) ) ) ) return 0UL; /* Bad character */
755 136356 : len++;
756 136356 : }
757 :
758 8565 : if( FD_UNLIKELY( !len ) ) return 0UL; /* Name too short (empty string) */
759 8565 : if( FD_UNLIKELY( len>=FD_SHMEM_NAME_MAX ) ) return 0UL; /* Name too long */
760 8565 : return len;
761 8565 : }
762 :
763 : /* BOOT/HALT APIs *****************************************************/
764 :
765 : void
766 : fd_shmem_private_boot( int * pargc,
767 2409 : char *** pargv ) {
768 2409 : FD_LOG_INFO(( "fd_shmem: booting" ));
769 :
770 : /* Initialize the phtread mutex */
771 :
772 2409 : # if FD_HAS_THREADS
773 2409 : pthread_mutexattr_t lockattr[1];
774 :
775 2409 : if( FD_UNLIKELY( pthread_mutexattr_init( lockattr ) ) )
776 0 : FD_LOG_ERR(( "fd_shmem: pthread_mutexattr_init failed" ));
777 :
778 2409 : if( FD_UNLIKELY( pthread_mutexattr_settype( lockattr, PTHREAD_MUTEX_RECURSIVE ) ) )
779 0 : FD_LOG_ERR(( "fd_shmem: pthread_mutexattr_settype failed" ));
780 :
781 2409 : if( FD_UNLIKELY( pthread_mutex_init( fd_shmem_private_lock, lockattr ) ) )
782 0 : FD_LOG_ERR(( "fd_shmem: pthread_mutex_init failed" ));
783 :
784 2409 : if( FD_UNLIKELY( pthread_mutexattr_destroy( lockattr ) ) )
785 0 : FD_LOG_WARNING(( "fd_shmem: pthread_mutexattr_destroy failed; attempting to continue" ));
786 2409 : # endif /* FD_HAS_THREADS */
787 :
788 : /* Cache the numa topology for this thread group's host for
789 : subsequent fast use by the application. */
790 :
791 2409 : ulong numa_cnt = fd_numa_node_cnt();
792 2409 : if( FD_UNLIKELY( !((1UL<=numa_cnt) & (numa_cnt<=FD_SHMEM_NUMA_MAX)) ) )
793 0 : FD_LOG_ERR(( "fd_shmem: unexpected numa_cnt %lu (expected in [1,%lu])", numa_cnt, FD_SHMEM_NUMA_MAX ));
794 2409 : fd_shmem_private_numa_cnt = numa_cnt;
795 :
796 2409 : ulong cpu_cnt = fd_numa_cpu_cnt();
797 2409 : if( FD_UNLIKELY( !((1UL<=cpu_cnt) & (cpu_cnt<=FD_SHMEM_CPU_MAX)) ) )
798 0 : FD_LOG_ERR(( "fd_shmem: unexpected cpu_cnt %lu (expected in [1,%lu])", cpu_cnt, FD_SHMEM_CPU_MAX ));
799 2409 : fd_shmem_private_cpu_cnt = cpu_cnt;
800 :
801 156585 : for( ulong cpu_rem=cpu_cnt; cpu_rem; cpu_rem-- ) {
802 154176 : ulong cpu_idx = cpu_rem-1UL;
803 154176 : ulong numa_idx = fd_numa_node_idx( cpu_idx );
804 154176 : if( FD_UNLIKELY( numa_idx>=FD_SHMEM_NUMA_MAX) )
805 0 : FD_LOG_ERR(( "fd_shmem: unexpected numa idx (%lu) for cpu idx %lu", numa_idx, cpu_idx ));
806 154176 : fd_shmem_private_numa_idx[ cpu_idx ] = (ushort)numa_idx;
807 154176 : fd_shmem_private_cpu_idx [ numa_idx ] = (ushort)cpu_idx;
808 154176 : }
809 :
810 : /* Determine the shared memory domain for this thread group */
811 :
812 2409 : char const * shmem_base = fd_env_strip_cmdline_cstr( pargc, pargv, "--shmem-path", "FD_SHMEM_PATH", "/mnt/.fd" );
813 :
814 2409 : ulong len = strlen( shmem_base );
815 2409 : while( (len>1UL) && (shmem_base[len-1UL]=='/') ) len--; /* lop off any trailing slashes */
816 2409 : if( FD_UNLIKELY( !len ) ) FD_LOG_ERR(( "Too short --shmem-base" ));
817 2409 : if( FD_UNLIKELY( len>=FD_SHMEM_PRIVATE_BASE_MAX ) ) FD_LOG_ERR(( "Too long --shmem-base" ));
818 2409 : fd_memcpy( fd_shmem_private_base, shmem_base, len );
819 2409 : fd_shmem_private_base[len] = '\0';
820 2409 : fd_shmem_private_base_len = (ulong)len;
821 :
822 : /* At this point, shared memory is online */
823 :
824 2409 : FD_LOG_INFO(( "fd_shmem: --shmem-path %s", fd_shmem_private_base ));
825 2409 : FD_LOG_INFO(( "fd_shmem: boot success" ));
826 2409 : }
827 :
828 : void
829 1275 : fd_shmem_private_halt( void ) {
830 1275 : FD_LOG_INFO(( "fd_shmem: halting" ));
831 :
832 : /* At this point, shared memory is offline */
833 :
834 1275 : fd_shmem_private_numa_cnt = 0;
835 1275 : fd_shmem_private_cpu_cnt = 0;
836 1275 : fd_memset( fd_shmem_private_numa_idx, 0, FD_SHMEM_CPU_MAX );
837 :
838 1275 : fd_shmem_private_base[0] = '\0';
839 1275 : fd_shmem_private_base_len = 0UL;
840 :
841 1275 : # if FD_HAS_THREADS
842 1275 : if( FD_UNLIKELY( pthread_mutex_destroy( fd_shmem_private_lock ) ) )
843 0 : FD_LOG_WARNING(( "fd_shmem: pthread_mutex_destroy failed; attempting to continue" ));
844 1275 : # endif /* FD_HAS_THREADS */
845 :
846 1275 : FD_LOG_INFO(( "fd_shmem: halt success" ));
847 1275 : }
848 :
849 : #else /* unhosted */
850 :
851 : void
852 : fd_shmem_private_boot( int * pargc,
853 : char *** pargv ) {
854 : FD_LOG_INFO(( "fd_shmem: booting" ));
855 :
856 : /* Strip the command line even though ignored to make environment
857 : parsing identical to downstream regardless of platform. */
858 :
859 : (void)fd_env_strip_cmdline_cstr( pargc, pargv, "--shmem-path", "FD_SHMEM_PATH", "/mnt/.fd" );
860 :
861 : FD_LOG_INFO(( "fd_shmem: --shmem-path (ignored)" ));
862 : FD_LOG_INFO(( "fd_shmem: boot success" ));
863 : }
864 :
865 : void
866 : fd_shmem_private_halt( void ) {
867 : FD_LOG_INFO(( "fd_shmem: halting" ));
868 : FD_LOG_INFO(( "fd_shmem: halt success" ));
869 : }
870 :
871 : #endif
|