Line data Source code
1 : #if FD_HAS_HOSTED
2 : #define _GNU_SOURCE
3 : #endif
4 :
5 : #include "fd_shmem_private.h"
6 :
7 : #if FD_HAS_HOSTED
8 :
9 : #include <errno.h>
10 : #include <unistd.h>
11 : #include <fcntl.h>
12 : #include <sys/mman.h>
13 : #include <sys/random.h>
14 :
15 : /* fd_shmem_private_key converts the cstr pointed to by name into a
16 : valid key and stores it at the location pointed to by key assumed
17 : valid). Returns key on success and NULL on failure (i.e. name does
18 : not point to a valid shmem region name). All bytes of key will be
19 : unambiguously initialized so there is no issue with using things like
20 : memcmp to compare keys, etc. */
21 :
22 : static inline fd_shmem_private_key_t *
23 : fd_shmem_private_key( fd_shmem_private_key_t * key,
24 110796 : char const * name ) {
25 110796 : ulong len = fd_shmem_name_len( name );
26 110796 : if( FD_UNLIKELY( !len ) ) return NULL;
27 110781 : fd_memset( key->cstr, 0, FD_SHMEM_NAME_MAX );
28 110781 : fd_memcpy( key->cstr, name, len );
29 110781 : return key;
30 110796 : }
31 :
32 : static fd_shmem_private_key_t const fd_shmem_private_key_null; /* Will be zeros at thread group start */
33 :
34 129052974 : #define FD_SHMEM_PRIVATE_MAP_LG_SLOT_CNT (8)
35 128939097 : #define FD_SHMEM_PRIVATE_MAP_SLOT_CNT (1UL<<FD_SHMEM_PRIVATE_MAP_LG_SLOT_CNT)
36 : FD_STATIC_ASSERT( FD_SHMEM_JOIN_MAX < FD_SHMEM_PRIVATE_MAP_SLOT_CNT, increase_lg_slot_count );
37 :
38 : #define MAP_NAME fd_shmem_private_map
39 112335 : #define MAP_T fd_shmem_join_info_t
40 113877 : #define MAP_LG_SLOT_CNT FD_SHMEM_PRIVATE_MAP_LG_SLOT_CNT
41 113877 : #define MAP_KEY_T fd_shmem_private_key_t
42 1542 : #define MAP_KEY_NULL fd_shmem_private_key_null
43 129051960 : #define MAP_KEY_INVAL(k) (!((k).cstr[0]))
44 110151 : #define MAP_KEY_EQUAL(k0,k1) (!memcmp( (k0).cstr, (k1).cstr, FD_SHMEM_NAME_MAX ))
45 : #define MAP_KEY_EQUAL_IS_SLOW (1)
46 112335 : #define MAP_KEY_HASH(k) ((uint)fd_hash( 0UL, (k).cstr, FD_SHMEM_NAME_MAX ))
47 : #include "../tmpl/fd_map.c"
48 :
49 : /* fd_shmem_private_map_query_by_{join,addr} are some extra
50 : fd_shmem_private_map APIs allow looking up the join info for a region
51 : by its join handle and/or by a pointer at a byte in the region int
52 : the thread group's local address space. These aren't algorithmically
53 : efficient but aren't expected to be and are plenty fast in normal use
54 : anyway. */
55 :
56 : static inline fd_shmem_join_info_t *
57 : fd_shmem_private_map_query_by_join( fd_shmem_join_info_t * map,
58 : void const * join,
59 111246 : fd_shmem_join_info_t * def ) {
60 11924061 : for( ulong slot_idx=0UL; slot_idx<FD_SHMEM_PRIVATE_MAP_SLOT_CNT; slot_idx++ )
61 11923569 : if( ((!fd_shmem_private_map_key_inval( map[slot_idx].key )) & (map[slot_idx].join==join)) ) return &map[slot_idx];
62 492 : return def;
63 111246 : }
64 :
65 : static inline fd_shmem_join_info_t *
66 : fd_shmem_private_map_query_by_addr( fd_shmem_join_info_t * map,
67 : ulong a0,
68 : ulong a1, /* Assumes a1>=a0 */
69 1102419 : fd_shmem_join_info_t * def ) {
70 117015036 : for( ulong slot_idx=0UL; slot_idx<FD_SHMEM_PRIVATE_MAP_SLOT_CNT; slot_idx++ ) {
71 117014514 : ulong j0 = (ulong)map[slot_idx].shmem;
72 117014514 : ulong j1 = j0 + map[slot_idx].page_sz*map[slot_idx].page_cnt - 1UL;
73 117014514 : if( ((!fd_shmem_private_map_key_inval( map[slot_idx].key )) & (a1>=j0) & (a0<=j1)) ) return &map[slot_idx];
74 117014514 : }
75 522 : return def;
76 1102419 : }
77 :
78 : /*
79 : * fd_shmem_private_grab_region will attempt to map a region at the passed
80 : * address with the passed size. If the return value of `mmap` equals the
81 : * passed address this means the area of memory was unmapped previously and
82 : * we have succesfully "grabbed" the region. We can then call `mmap` with
83 : * MAP_FIXED over the region and be certain no corruption occurs. If the
84 : * return value of `mmap` does not return the passed address this means that
85 : * the passed region is already atleast partially mapped and we cannot grab it.
86 : */
87 : static int
88 : fd_shmem_private_grab_region( ulong addr,
89 1062 : ulong size ) {
90 1062 : void *mmap_ret;
91 1062 : int err;
92 :
93 1062 : mmap_ret = mmap( (void*)addr, size, PROT_READ, MAP_ANON|MAP_PRIVATE, -1, 0 );
94 :
95 1062 : if( mmap_ret == MAP_FAILED )
96 0 : return 0;
97 :
98 : /* Only call munmap on failure case. On success we want to keep the mapping */
99 1062 : if( (ulong)mmap_ret != addr ) {
100 0 : err = munmap( mmap_ret, size );
101 0 : if ( err == -1 ) {
102 0 : FD_LOG_ERR(( "failed to unmap temporary mapping, munmap() failed (%i-%s)", errno, fd_io_strerror( errno ) ));
103 0 : }
104 0 : return 0;
105 0 : }
106 :
107 1062 : return 1;
108 1062 : }
109 :
110 : static ulong
111 : fd_shmem_private_get_random_mappable_addr( ulong size,
112 1062 : ulong page_size ) {
113 1062 : ulong ret_addr = 0;
114 :
115 : /* Failure is unlikely, 1000 iterations should guarantee success */
116 1062 : for( ulong i = 0; i < 1000; i++ ) {
117 1062 : long n = getrandom( &ret_addr, sizeof(ret_addr), 0 );
118 1062 : if( FD_UNLIKELY( n!=sizeof(ret_addr) ) ) FD_LOG_ERR(( "could not generate random address, getrandom() failed (%i-%s)", errno, fd_io_strerror( errno ) ));
119 :
120 : /* The type of region determines the alignment we need for the region */
121 1062 : if( page_size == FD_SHMEM_GIGANTIC_PAGE_SZ )
122 1062 : ret_addr &= FD_SHMEM_PRIVATE_MMAP_GIGANTIC_MASK;
123 0 : else if( page_size == FD_SHMEM_HUGE_PAGE_SZ )
124 0 : ret_addr &= FD_SHMEM_PRIVATE_MMAP_HUGE_MASK;
125 0 : else
126 0 : ret_addr &= FD_SHMEM_PRIVATE_MMAP_NORMAL_MASK;
127 :
128 1062 : if( fd_shmem_private_grab_region( ret_addr, size ) ) {
129 1062 : return ret_addr;
130 1062 : }
131 1062 : }
132 :
133 0 : FD_LOG_ERR(( "unable to find random address for memory map after 1000 attempts" ));
134 0 : return (ulong)MAP_FAILED;
135 0 : }
136 :
137 : static fd_shmem_join_info_t fd_shmem_private_map[ FD_SHMEM_PRIVATE_MAP_SLOT_CNT ]; /* Empty on thread group start */
138 : static ulong fd_shmem_private_map_cnt; /* 0 on thread group start */
139 :
140 : void *
141 : fd_shmem_join( char const * name,
142 : int mode,
143 : fd_shmem_joinleave_func_t join_func,
144 : void * context,
145 110304 : fd_shmem_join_info_t * opt_info ) {
146 :
147 : /* Check input args */
148 :
149 110304 : fd_shmem_private_key_t key;
150 110304 : if( FD_UNLIKELY( !fd_shmem_private_key( &key, name ) ) ) {
151 15 : FD_LOG_WARNING(( "bad name (%s)", name ? name : "NULL" ));
152 15 : return NULL;
153 15 : }
154 :
155 110289 : if( FD_UNLIKELY( !( (mode==FD_SHMEM_JOIN_MODE_READ_ONLY) | (mode==FD_SHMEM_JOIN_MODE_READ_WRITE) ) ) ) {
156 0 : FD_LOG_WARNING(( "unsupported join mode (%i) for %s", mode, name ));
157 0 : return NULL;
158 0 : }
159 :
160 110289 : FD_SHMEM_LOCK;
161 :
162 : /* Query for an existing mapping */
163 :
164 110289 : fd_shmem_join_info_t * join_info = fd_shmem_private_map_query( fd_shmem_private_map, key, NULL );
165 110289 : if( join_info ) {
166 109212 : if( FD_UNLIKELY( join_info->ref_cnt<0L ) ) {
167 0 : FD_LOG_WARNING(( "join/leave circular dependency detected for %s", name ));
168 0 : FD_SHMEM_UNLOCK;
169 0 : return NULL;
170 0 : }
171 109212 : join_info->ref_cnt++;
172 :
173 109212 : if( opt_info ) *opt_info = *join_info;
174 109212 : FD_SHMEM_UNLOCK;
175 109212 : return join_info->join;
176 109212 : }
177 :
178 : /* Not currently mapped. See if we have enough room. */
179 :
180 1077 : if( FD_UNLIKELY( fd_shmem_private_map_cnt>=FD_SHMEM_JOIN_MAX ) ) {
181 0 : FD_SHMEM_UNLOCK;
182 0 : FD_LOG_WARNING(( "too many concurrent joins for %s", name ));
183 0 : return NULL;
184 0 : }
185 :
186 : /* We have enough room for it. Try to map the memory. */
187 :
188 1077 : fd_shmem_info_t shmem_info[1];
189 1077 : if( FD_UNLIKELY( fd_shmem_info( name, 0UL, shmem_info ) ) ) {
190 15 : FD_SHMEM_UNLOCK;
191 15 : FD_LOG_WARNING(( "unable to query region \"%s\"\n\tprobably does not exist or bad permissions", name ));
192 15 : return NULL;
193 15 : }
194 1062 : ulong page_sz = shmem_info->page_sz;
195 1062 : ulong page_cnt = shmem_info->page_cnt;
196 1062 : ulong sz = page_sz*page_cnt;
197 1062 : int rw = (mode==FD_SHMEM_JOIN_MODE_READ_WRITE);
198 :
199 : /* Map the region into our address space. */
200 :
201 1062 : char path[ FD_SHMEM_PRIVATE_PATH_BUF_MAX ];
202 1062 : int fd = open( fd_shmem_private_path( name, page_sz, path ), rw ? O_RDWR : O_RDONLY, (mode_t)0 );
203 1062 : if( FD_UNLIKELY( fd==-1 ) ) {
204 0 : FD_SHMEM_UNLOCK;
205 0 : FD_LOG_WARNING(( "open(\"%s\",%s,0) failed (%i-%s)", path, rw ? "O_RDWR" : "O_RDONLY", errno, fd_io_strerror( errno ) ));
206 0 : return NULL;
207 0 : }
208 :
209 : /* Generate a random address that we are guaranteed to be able to map */
210 1062 : ulong rand_addr = fd_shmem_private_get_random_mappable_addr( sz, page_sz );
211 :
212 : /* Note that MAP_HUGETLB and MAP_HUGE_* are implied by the mount point */
213 1062 : void * shmem = mmap( (void*)rand_addr, sz, rw ? (PROT_READ|PROT_WRITE) : PROT_READ, MAP_SHARED | MAP_FIXED, fd, (off_t)0 );
214 :
215 1062 : int mmap_errno = errno;
216 1062 : if( FD_UNLIKELY( close( fd ) ) )
217 0 : FD_LOG_WARNING(( "close(\"%s\") failed (%i-%s); attempting to continue", path, errno, fd_io_strerror( errno ) ));
218 :
219 : /* Validate the mapping */
220 :
221 1062 : if( FD_UNLIKELY( shmem==MAP_FAILED ) ) {
222 0 : FD_SHMEM_UNLOCK;
223 0 : FD_LOG_WARNING(( "mmap(NULL,%lu KiB,%s,MAP_SHARED,\"%s\",0) failed (%i-%s)",
224 0 : sz>>10, rw ? "PROT_READ|PROT_WRITE" : "PROT_READ", path, mmap_errno, fd_io_strerror( mmap_errno ) ));
225 0 : return NULL;
226 0 : }
227 :
228 1062 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shmem, page_sz ) ) ) {
229 0 : if( FD_UNLIKELY( munmap( shmem, sz ) ) )
230 0 : FD_LOG_WARNING(( "munmap(\"%s\",%lu KiB) failed (%i-%s); attempting to continue",
231 0 : path, sz>>10, errno, fd_io_strerror( errno ) ));
232 0 : FD_SHMEM_UNLOCK;
233 0 : FD_LOG_WARNING(( "misaligned memory mapping for \"%s\"\n\t"
234 0 : "This thread group's hugetlbfs mount path (--shmem-path / FD_SHMEM_PATH):\n\t"
235 0 : "\t%s\n\t"
236 0 : "has probably been corrupted and needs to be redone.\n\t"
237 0 : "See 'bin/fd_shmem_cfg help' for more information.",
238 0 : path, fd_shmem_private_base ));
239 0 : return NULL;
240 0 : }
241 :
242 : /* Lock this region in DRAM to prevent it going to swap and (try) to
243 : keep the virtual to physical DRAM mapping fixed for the join
244 : duration. Also advise the kernel to not dump this region to avoid
245 : large shared mappings in concurrent use by multiple processes
246 : destroying the system with core files if a bunch of thread using
247 : this mapping seg fault concurrently. */
248 :
249 1062 : if( FD_UNLIKELY( fd_numa_mlock( shmem, sz ) ) )
250 0 : FD_LOG_WARNING(( "fd_numa_mlock(\"%s\",%lu KiB) failed (%i-%s); attempting to continue",
251 1062 : path, sz>>10, errno, fd_io_strerror( errno ) ));
252 :
253 1062 : if( FD_UNLIKELY( madvise( shmem, sz, MADV_DONTDUMP ) ) )
254 0 : FD_LOG_WARNING(( "madvise(\"%s\",%lu KiB) failed (%i-%s); attempting to continue",
255 1062 : path, sz>>10, errno, fd_io_strerror( errno ) ));
256 :
257 : /* We have mapped the region. Try to complete the join. Note:
258 : map_query above and map_insert could be combined to improve
259 : efficiency further here (and eliminate the paranoid if check in the
260 : process). */
261 :
262 1062 : join_info = fd_shmem_private_map_insert( fd_shmem_private_map, key );
263 1062 : if( FD_UNLIKELY( !join_info ) ) /* should be impossible */
264 0 : FD_LOG_ERR(( "unable to insert region \"%s\" (internal error)", name ));
265 1062 : fd_shmem_private_map_cnt++;
266 :
267 1062 : join_info->ref_cnt = -1L; /* Mark join/leave in progress so we can detect circular join/leave dependencies */
268 1062 : join_info->join = NULL; /* Overridden below */
269 1062 : join_info->shmem = shmem;
270 1062 : join_info->page_sz = page_sz;
271 1062 : join_info->page_cnt = page_cnt;
272 1062 : join_info->mode = mode;
273 : /* join_info->hash handled by insert */
274 : /* join_info->name " */
275 : /* join_info->key " */
276 :
277 1062 : void * join = join_func ? join_func( context, join_info ): shmem; /* Reset by the join func if provided */
278 1062 : if( FD_UNLIKELY( !join ) ) {
279 0 : fd_shmem_private_map_remove( fd_shmem_private_map, join_info );
280 0 : fd_shmem_private_map_cnt--;
281 0 : if( FD_UNLIKELY( munmap( shmem, sz ) ) )
282 0 : FD_LOG_WARNING(( "munmap(\"%s\",%lu KiB) failed (%i-%s); attempting to continue",
283 0 : name, sz>>10, errno, fd_io_strerror( errno ) ));
284 0 : FD_SHMEM_UNLOCK;
285 0 : FD_LOG_WARNING(( "unable to join region \"%s\"", name ));
286 0 : return NULL;
287 0 : }
288 1062 : join_info->ref_cnt = 1UL;
289 1062 : join_info->join = join;
290 :
291 1062 : if( opt_info ) *opt_info = *join_info;
292 1062 : FD_SHMEM_UNLOCK;
293 1062 : return join;
294 1062 : }
295 :
296 : int
297 : fd_shmem_leave( void * join,
298 : fd_shmem_joinleave_func_t leave_func,
299 110577 : void * context ) {
300 110577 : if( FD_UNLIKELY( !join ) ) { FD_LOG_WARNING(( "NULL join" )); return 1; }
301 :
302 110577 : FD_SHMEM_LOCK;
303 :
304 110577 : if( FD_UNLIKELY( !fd_shmem_private_map_cnt ) ) {
305 3 : FD_SHMEM_UNLOCK;
306 3 : FD_LOG_WARNING(( "join is not a current join" ));
307 3 : return 1;
308 3 : }
309 110574 : fd_shmem_join_info_t * join_info = fd_shmem_private_map_query_by_join( fd_shmem_private_map, join, NULL );
310 110574 : if( FD_UNLIKELY( !join_info ) ) {
311 0 : FD_SHMEM_UNLOCK;
312 0 : FD_LOG_WARNING(( "join is not a current join" ));
313 0 : return 1;
314 0 : }
315 :
316 110574 : long ref_cnt = join_info->ref_cnt;
317 110574 : if( join_info->ref_cnt>1L ) {
318 109212 : join_info->ref_cnt = ref_cnt-1L;
319 109212 : FD_SHMEM_UNLOCK;
320 109212 : return 0;
321 109212 : }
322 :
323 1362 : if( join_info->ref_cnt==-1L ) {
324 0 : FD_SHMEM_UNLOCK;
325 0 : FD_LOG_WARNING(( "join/leave circular dependency detected for %s", join_info->name ));
326 0 : return 1;
327 0 : }
328 :
329 1362 : if( FD_UNLIKELY( join_info->ref_cnt!=1L ) ) /* Should be impossible */
330 0 : FD_LOG_WARNING(( "unexpected ref count for %s; attempting to continue", join_info->name ));
331 :
332 1362 : char const * name = join_info->name; /* Just in case leave_func clobbers */
333 1362 : void * shmem = join_info->shmem; /* " */
334 1362 : ulong page_sz = join_info->page_sz; /* " */
335 1362 : ulong page_cnt = join_info->page_cnt; /* " */
336 :
337 1362 : if( leave_func ) {
338 1320 : join_info->ref_cnt = -1L; /* Mark join/leave is in progress so we can detect join/leave circular dependencies */
339 1320 : leave_func( context, join_info );
340 1320 : }
341 :
342 1362 : int error = 0;
343 1362 : ulong sz = page_sz*page_cnt;
344 1362 : if( FD_UNLIKELY( munmap( shmem, sz ) ) ) {
345 0 : FD_LOG_WARNING(( "munmap(\"%s\",%lu KiB) failed (%i-%s); attempting to continue",
346 0 : name, sz>>10, errno, fd_io_strerror( errno ) ));
347 0 : error = 1;
348 0 : }
349 :
350 1362 : fd_shmem_private_map_remove( fd_shmem_private_map, join_info );
351 1362 : fd_shmem_private_map_cnt--;
352 1362 : FD_SHMEM_UNLOCK;
353 1362 : return error;
354 1362 : }
355 :
356 : int
357 : fd_shmem_join_query_by_name( char const * name,
358 0 : fd_shmem_join_info_t * opt_info ) {
359 0 : fd_shmem_private_key_t key;
360 0 : if( FD_UNLIKELY( !fd_shmem_private_key( &key, name ) ) ) return EINVAL;
361 :
362 0 : FD_SHMEM_LOCK;
363 :
364 0 : if( !fd_shmem_private_map_cnt ) { FD_SHMEM_UNLOCK; return ENOENT; }
365 0 : fd_shmem_join_info_t * join_info = fd_shmem_private_map_query( fd_shmem_private_map, key, NULL );
366 0 : if( !join_info ) { FD_SHMEM_UNLOCK; return ENOENT; }
367 0 : if( opt_info ) *opt_info = *join_info;
368 :
369 0 : FD_SHMEM_UNLOCK;
370 0 : return 0;
371 0 : }
372 :
373 : int
374 : fd_shmem_join_query_by_join( void const * join,
375 0 : fd_shmem_join_info_t * opt_info ) {
376 0 : if( FD_UNLIKELY( !join ) ) return EINVAL;
377 :
378 0 : FD_SHMEM_LOCK;
379 :
380 0 : if( !fd_shmem_private_map_cnt ) { FD_SHMEM_UNLOCK; return ENOENT; }
381 0 : fd_shmem_join_info_t * join_info = fd_shmem_private_map_query_by_join( fd_shmem_private_map, join, NULL );
382 0 : if( FD_UNLIKELY( !join_info ) ) { FD_SHMEM_UNLOCK; return ENOENT; }
383 0 : if( opt_info ) *opt_info = *join_info;
384 :
385 0 : FD_SHMEM_UNLOCK;
386 0 : return 0;
387 0 : }
388 :
389 : int
390 : fd_shmem_join_query_by_addr( void const * addr,
391 : ulong sz,
392 1101939 : fd_shmem_join_info_t * opt_info ) {
393 1101939 : if( FD_UNLIKELY( !sz ) ) return ENOENT; /* empty range */
394 1101939 : ulong a0 = (ulong)addr;
395 1101939 : ulong a1 = a0+sz-1UL;
396 1101939 : if( FD_UNLIKELY( a1<a0 ) ) return EINVAL; /* cyclic wrap range */
397 :
398 1101939 : FD_SHMEM_LOCK;
399 :
400 1101939 : if( !fd_shmem_private_map_cnt ) { FD_SHMEM_UNLOCK; return ENOENT; }
401 1101927 : fd_shmem_join_info_t * join_info = fd_shmem_private_map_query_by_addr( fd_shmem_private_map, a0, a1, NULL );
402 1101927 : if( FD_UNLIKELY( !join_info ) ) { FD_SHMEM_UNLOCK; return ENOENT; }
403 1101897 : if( opt_info ) *opt_info = *join_info;
404 :
405 1101897 : FD_SHMEM_UNLOCK;
406 1101897 : return 0;
407 1101927 : }
408 :
409 : int
410 : fd_shmem_join_anonymous( char const * name,
411 : int mode,
412 : void * join,
413 : void * mem,
414 : ulong page_sz,
415 492 : ulong page_cnt ) {
416 :
417 : /* Check input args */
418 :
419 492 : fd_shmem_private_key_t key;
420 492 : if( FD_UNLIKELY( !fd_shmem_private_key( &key, name ) ) ) {
421 0 : FD_LOG_WARNING(( "bad name (%s)", name ? name : "NULL" ));
422 0 : return EINVAL;
423 0 : }
424 :
425 492 : if( FD_UNLIKELY( !( (mode==FD_SHMEM_JOIN_MODE_READ_ONLY) | (mode==FD_SHMEM_JOIN_MODE_READ_WRITE) ) ) ) {
426 0 : FD_LOG_WARNING(( "unsupported join mode (%i) for %s", mode, name ));
427 0 : return EINVAL;
428 0 : }
429 :
430 492 : if( FD_UNLIKELY( !join ) ) {
431 0 : FD_LOG_WARNING(( "NULL join" ));
432 0 : return EINVAL;
433 0 : }
434 :
435 492 : if( FD_UNLIKELY( !mem ) ) {
436 0 : FD_LOG_WARNING(( "NULL mem" ));
437 0 : return EINVAL;
438 0 : }
439 :
440 492 : if( FD_UNLIKELY( !fd_shmem_is_page_sz( page_sz ) ) ) {
441 0 : FD_LOG_WARNING(( "unsupported page_sz (%lu)", page_sz ));
442 0 : return EINVAL;
443 0 : }
444 :
445 492 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)mem, page_sz ) ) ) {
446 0 : FD_LOG_WARNING(( "misaligned mem" ));
447 0 : return EINVAL;
448 0 : }
449 :
450 492 : if( FD_UNLIKELY( !page_cnt ) ) {
451 0 : FD_LOG_WARNING(( "unsupported page_sz (%lu)", page_sz ));
452 0 : return EINVAL;
453 0 : }
454 :
455 492 : if( FD_UNLIKELY( page_cnt > (ULONG_MAX/page_sz) ) ) {
456 0 : FD_LOG_WARNING(( "too large page cnt (%lu)", page_cnt ));
457 0 : return EINVAL;
458 0 : }
459 :
460 492 : ulong sz = page_cnt*page_sz;
461 492 : ulong a0 = (ulong)mem;
462 492 : ulong a1 = a0 + sz-1UL;
463 492 : if( FD_UNLIKELY( a1<a0 ) ) {
464 0 : FD_LOG_WARNING(( "bad mem range" ));
465 0 : return EINVAL;
466 0 : }
467 :
468 492 : FD_SHMEM_LOCK;
469 :
470 : /* Query for an existing mapping */
471 :
472 492 : fd_shmem_join_info_t * join_info;
473 :
474 492 : join_info = fd_shmem_private_map_query( fd_shmem_private_map, key, NULL );
475 492 : if( FD_UNLIKELY( join_info ) ) {
476 0 : FD_SHMEM_UNLOCK;
477 0 : FD_LOG_WARNING(( "%s already joined", name ));
478 0 : return EINVAL;
479 0 : }
480 :
481 492 : join_info = fd_shmem_private_map_query_by_join( fd_shmem_private_map, join, NULL );
482 492 : if( FD_UNLIKELY( join_info ) ) {
483 0 : FD_SHMEM_UNLOCK;
484 0 : FD_LOG_WARNING(( "%s join handle already in use", name ));
485 0 : return EINVAL;
486 0 : }
487 :
488 492 : join_info = fd_shmem_private_map_query_by_addr( fd_shmem_private_map, a0, a1, NULL );
489 492 : if( FD_UNLIKELY( join_info ) ) {
490 0 : FD_SHMEM_UNLOCK;
491 0 : FD_LOG_WARNING(( "%s join memory already mapped", name ));
492 0 : return EINVAL;
493 0 : }
494 :
495 : /* Not currently mapped. See if we have enough room. */
496 :
497 492 : if( FD_UNLIKELY( fd_shmem_private_map_cnt>=FD_SHMEM_JOIN_MAX ) ) {
498 0 : FD_SHMEM_UNLOCK;
499 0 : FD_LOG_WARNING(( "too many concurrent joins for %s", name ));
500 0 : return EINVAL;
501 0 : }
502 :
503 : /* We have enough room for it. Try to "map" the memory. */
504 :
505 492 : fd_shmem_info_t shmem_info[1];
506 492 : if( FD_UNLIKELY( !fd_shmem_info( name, 0UL, shmem_info ) ) )
507 0 : FD_LOG_WARNING(( "anonymous join to %s will shadow an existing shared memory region in this thread group; "
508 492 : "attempting to continue", name ));
509 :
510 492 : join_info = fd_shmem_private_map_insert( fd_shmem_private_map, key );
511 492 : if( FD_UNLIKELY( !join_info ) ) /* should be impossible */
512 0 : FD_LOG_ERR(( "unable to insert region \"%s\" (internal error)", name ));
513 492 : fd_shmem_private_map_cnt++;
514 :
515 492 : join_info->ref_cnt = 1L;
516 492 : join_info->join = join;
517 492 : join_info->shmem = mem;
518 492 : join_info->page_sz = page_sz;
519 492 : join_info->page_cnt = page_cnt;
520 492 : join_info->mode = mode;
521 : /* join_info->hash handled by insert */
522 : /* join_info->name " */
523 : /* join_info->key " */
524 :
525 492 : FD_SHMEM_UNLOCK;
526 492 : return 0;
527 492 : }
528 :
529 : int
530 : fd_shmem_leave_anonymous( void * join,
531 186 : fd_shmem_join_info_t * opt_info ) {
532 :
533 186 : if( FD_UNLIKELY( !join ) ) {
534 3 : FD_LOG_WARNING(( "NULL join" ));
535 3 : return EINVAL;
536 3 : }
537 :
538 183 : FD_SHMEM_LOCK;
539 :
540 183 : if( FD_UNLIKELY( !fd_shmem_private_map_cnt ) ) {
541 3 : FD_SHMEM_UNLOCK;
542 3 : FD_LOG_WARNING(( "join is not a current join" ));
543 3 : return EINVAL;
544 3 : }
545 :
546 180 : fd_shmem_join_info_t * join_info = fd_shmem_private_map_query_by_join( fd_shmem_private_map, join, NULL );
547 180 : if( FD_UNLIKELY( !join_info ) ) {
548 0 : FD_SHMEM_UNLOCK;
549 0 : FD_LOG_WARNING(( "join is not a current join" ));
550 0 : return EINVAL;
551 0 : }
552 :
553 180 : if( FD_UNLIKELY( join_info->ref_cnt!=1L ) ) {
554 0 : FD_SHMEM_UNLOCK;
555 0 : FD_LOG_WARNING(( "join ref_cnt is not 1" ));
556 0 : return EINVAL;
557 0 : }
558 :
559 180 : if( opt_info ) {
560 180 : *opt_info = *join_info;
561 180 : opt_info->ref_cnt = 0L;
562 180 : }
563 :
564 180 : fd_shmem_private_map_remove( fd_shmem_private_map, join_info );
565 180 : fd_shmem_private_map_cnt--;
566 180 : FD_SHMEM_UNLOCK;
567 180 : return 0;
568 180 : }
569 :
570 : #endif
|