Line data Source code
1 : #ifndef HEADER_fd_src_util_shmem_fd_shmem_private_h 2 : #define HEADER_fd_src_util_shmem_fd_shmem_private_h 3 : 4 : #include "fd_shmem.h" 5 : 6 : #if FD_HAS_THREADS 7 : #include <pthread.h> 8 : #endif 9 : 10 : /* Want strlen(base)+strlen("/.")+strlen(page)+strlen("/")+strlen(name)+1 <= BUF_MAX 11 : -> BASE_MAX-1 +2 +PAGE_MAX-1 +1 +NAME_MAX-1 +1 == BUF_MAX 12 : -> BASE_MAX == BUF_MAX - NAME_MAX - PAGE_MAX - 1 */ 13 : 14 4326 : #define FD_SHMEM_PRIVATE_PATH_BUF_MAX (256UL) 15 : #define FD_SHMEM_PRIVATE_BASE_MAX (FD_SHMEM_PRIVATE_PATH_BUF_MAX-FD_SHMEM_NAME_MAX-FD_SHMEM_PAGE_SZ_CSTR_MAX-1UL) 16 : 17 : #if FD_HAS_THREADS 18 6330 : #define FD_SHMEM_LOCK pthread_mutex_lock( fd_shmem_private_lock ) 19 6330 : #define FD_SHMEM_UNLOCK pthread_mutex_unlock( fd_shmem_private_lock ) 20 : #else 21 : #define FD_SHMEM_LOCK ((void)0) 22 : #define FD_SHMEM_UNLOCK ((void)0) 23 : #endif 24 : 25 : FD_PROTOTYPES_BEGIN 26 : 27 : /* NUMA backend ******************************************************/ 28 : 29 : /* fd_numa_node_cnt / fd_numa_cpu_cnt determines the current number of 30 : configured numa nodes / cpus (roughly equivalent to libnuma's 31 : numa_num_configured_nodes / numa_num_configured_cpus). Returns 0 if 32 : this could not be determined (logs details on failure). These 33 : function are only used during shmem initialization as part of 34 : topology discovery so should not do any fancy caching under the hood. */ 35 : 36 : ulong 37 : fd_numa_node_cnt( void ); 38 : 39 : ulong 40 : fd_numa_cpu_cnt( void ); 41 : 42 : /* fd_numa_node_idx determines the numa node closest to the given 43 : cpu_idx (roughly equivalent to libnuma's numa_node_of_cpu). Returns 44 : ULONG_MAX if this could not be determined (logs details on failure). 45 : This function is only used during shmem initialization as part of 46 : topology discovery so should not do any fancy caching under the hood. */ 47 : 48 : ulong 49 : fd_numa_node_idx( ulong cpu_idx ); 50 : 51 : /* FIXME: probably should clean up the below APIs to get something 52 : that allows for cleaner integration with fd_shmem_admin.c (e.g. if we 53 : are going to replace libnuma calls with our own, no reason to use the 54 : historical clunky APIs). */ 55 : 56 : /* fd_numa_mlock locks the memory region to reside at a stable position 57 : in physical DRAM. Wraps the `mlock(2)` Linux syscall. See: 58 : 59 : https://man7.org/linux/man-pages/man2/mlock.2.html */ 60 : 61 : int 62 : fd_numa_mlock( void const * addr, 63 : ulong len ); 64 : 65 : /* fd_numa_mlock unlocks the memory region. Wraps the `munlock(2)` 66 : Linux syscall. See: 67 : 68 : https://man7.org/linux/man-pages/man2/munlock.2.html */ 69 : 70 : int 71 : fd_numa_munlock( void const * addr, 72 : ulong len ); 73 : 74 : /* fd_numa_get_mempolicy retrieves the NUMA memory policy of the 75 : current thread. Wraps the `get_mempolicy(2)` Linux syscall. See: 76 : 77 : https://man7.org/linux/man-pages/man2/get_mempolicy.2.html */ 78 : 79 : long 80 : fd_numa_get_mempolicy( int * mode, 81 : ulong * nodemask, 82 : ulong maxnode, 83 : void * addr, 84 : uint flags ); 85 : 86 : /* fd_numa_set_mempolicy sets the default NUMA memory policy of the 87 : current thread and its children. Wraps the `set_mempolicy(2)` Linux 88 : syscall. See: 89 : 90 : https://man7.org/linux/man-pages/man2/set_mempolicy.2.html */ 91 : 92 : long 93 : fd_numa_set_mempolicy( int mode, 94 : ulong const * nodemask, 95 : ulong maxnode ); 96 : 97 : /* fd_numa_mbind sets the NUMA memory policy for a range of memory. 98 : Wraps the `mbind(2)` Linux syscall. See: 99 : 100 : https://man7.org/linux/man-pages/man2/mbind.2.html */ 101 : 102 : long 103 : fd_numa_mbind( void * addr, 104 : ulong len, 105 : int mode, 106 : ulong const * nodemask, 107 : ulong maxnode, 108 : uint flags ); 109 : 110 : /* fd_numa_move_page moves pages of a process to another node. Wraps 111 : the `move_pages(2)` Linux syscall. See: 112 : 113 : https://man7.org/linux/man-pages/man2/move_pages.2.html 114 : 115 : Also useful to detect the true NUMA node ownership of pages of memory 116 : after calls to `mlock(2)` and `mbind(2)`. */ 117 : 118 : long 119 : fd_numa_move_pages( int pid, 120 : ulong count, 121 : void ** pages, 122 : int const * nodes, 123 : int * status, 124 : int flags ); 125 : 126 : /**********************************************************************/ 127 : 128 : #if FD_HAS_THREADS 129 : extern pthread_mutex_t fd_shmem_private_lock[1]; 130 : #endif 131 : 132 : extern char fd_shmem_private_base[ FD_SHMEM_PRIVATE_BASE_MAX ]; /* "" at thread group start, initialized at boot */ 133 : extern ulong fd_shmem_private_base_len; /* 0UL at ", initialized at boot */ 134 : 135 : static inline char * /* ==buf always */ 136 : fd_shmem_private_path( char const * name, /* Valid name */ 137 : ulong page_sz, /* Valid page size (normal, huge, gigantic) */ 138 4326 : char * buf ) { /* Non-NULL with FD_SHMEM_PRIVATE_PATH_BUF_MAX bytes */ 139 4326 : return fd_cstr_printf( buf, FD_SHMEM_PRIVATE_PATH_BUF_MAX, NULL, "%s/.%s/%s", 140 4326 : fd_shmem_private_base, fd_shmem_page_sz_to_cstr( page_sz ), name ); 141 4326 : } 142 : 143 : /* fd_shmem_private_map_rand maps a private+anonymous pages of default 144 : page size at a random virtual address. align specifies the minimum 145 : alignment of the first byte to map. size is the minimum number of 146 : bytes to map. Returns a virtual address on success, and MAP_FAILED 147 : on failure. */ 148 : 149 : void * 150 : fd_shmem_private_map_rand( ulong size, 151 : ulong align ); 152 : 153 : FD_PROTOTYPES_END 154 : 155 : #endif /* HEADER_fd_src_util_shmem_fd_shmem_private_h */