Line data Source code
1 : #include "configure.h"
2 :
3 : #include "../../../platform/fd_file_util.h"
4 : #include "../../../platform/fd_sys_util.h"
5 :
6 : #include <unistd.h>
7 : #include <errno.h>
8 : #include <stdio.h>
9 : #include <stdlib.h> /* strtoul */
10 : #include <dirent.h>
11 : #include <sys/stat.h>
12 : #include <sys/mount.h>
13 : #include <linux/capability.h>
14 :
15 : static void
16 : init_perm( fd_cap_chk_t * chk,
17 0 : config_t const * config FD_PARAM_UNUSED ) {
18 0 : fd_cap_chk_root( chk, "hugetlbfs", "increase `/proc/sys/vm/nr_hugepages`" );
19 0 : fd_cap_chk_cap( chk, "hugetlbfs", CAP_SYS_ADMIN, "mount hugetlbfs filesystems" );
20 0 : }
21 :
22 : static void
23 : fini_perm( fd_cap_chk_t * chk,
24 0 : config_t const * config FD_PARAM_UNUSED ) {
25 0 : fd_cap_chk_root( chk, "hugetlbfs", "remove directories from `/mnt`" );
26 0 : fd_cap_chk_cap( chk, "hugetlbfs", CAP_SYS_ADMIN, "unmount hugetlbfs filesystems" );
27 0 : }
28 :
29 : static char const * TOTAL_HUGE_PAGE_PATH[ 2 ] = {
30 : "/sys/devices/system/node/node%lu/hugepages/hugepages-2048kB/nr_hugepages",
31 : "/sys/devices/system/node/node%lu/hugepages/hugepages-1048576kB/nr_hugepages",
32 : };
33 :
34 : static char const * FREE_HUGE_PAGE_PATH[ 2 ] = {
35 : "/sys/devices/system/node/node%lu/hugepages/hugepages-2048kB/free_hugepages",
36 : "/sys/devices/system/node/node%lu/hugepages/hugepages-1048576kB/free_hugepages",
37 : };
38 :
39 : static ulong PAGE_SIZE[ 2 ] = {
40 : 2097152,
41 : 1073741824,
42 : };
43 :
44 : static char const * PAGE_NAMES[ 2 ] = {
45 : "huge",
46 : "gigantic"
47 : };
48 :
49 : static void
50 0 : init( config_t const * config ) {
51 0 : char const * mount_path[ 2 ] = {
52 0 : config->hugetlbfs.huge_page_mount_path,
53 0 : config->hugetlbfs.gigantic_page_mount_path,
54 0 : };
55 :
56 0 : ulong numa_node_cnt = fd_shmem_numa_cnt();
57 0 : for( ulong i=0UL; i<numa_node_cnt; i++ ) {
58 0 : ulong required_pages[ 2 ] = {
59 0 : fd_topo_huge_page_cnt( &config->topo, i, 0 ),
60 0 : fd_topo_gigantic_page_cnt( &config->topo, i ),
61 0 : };
62 :
63 0 : for( ulong j=0UL; j<2UL; j++ ) {
64 0 : char free_page_path[ PATH_MAX ];
65 0 : FD_TEST( fd_cstr_printf_check( free_page_path, PATH_MAX, NULL, FREE_HUGE_PAGE_PATH[ j ], i ) );
66 0 : uint free_pages;
67 0 : if( FD_UNLIKELY( -1==fd_file_util_read_uint( free_page_path, &free_pages ) ) )
68 0 : FD_LOG_ERR(( "could not read `%s`, please confirm your host is configured for gigantic pages (%i-%s)", free_page_path, errno, fd_io_strerror( errno ) ));
69 :
70 : /* There is a TOCTOU race condition here, but it's not avoidable. There's
71 : no way to atomically increment the page count. */
72 0 : FD_TEST( required_pages[ j ]<=UINT_MAX );
73 0 : if( FD_UNLIKELY( free_pages<required_pages[ j ] ) ) {
74 0 : char total_page_path[ PATH_MAX ];
75 0 : FD_TEST( fd_cstr_printf_check( total_page_path, PATH_MAX, NULL, TOTAL_HUGE_PAGE_PATH[ j ], i ) );
76 0 : uint total_pages;
77 0 : if( FD_UNLIKELY( -1==fd_file_util_read_uint( total_page_path, &total_pages ) ) )
78 0 : FD_LOG_ERR(( "could not read `%s`, please confirm your host is configured for gigantic pages (%i-%s)", total_page_path, errno, fd_io_strerror( errno ) ));
79 :
80 0 : ulong additional_pages_needed = required_pages[ j ]-free_pages;
81 :
82 0 : if( FD_UNLIKELY( !config->hugetlbfs.allow_hugepage_increase && additional_pages_needed>0 ) ) {
83 0 : FD_LOG_ERR(( "trying to increase the number of %s pages on NUMA node %lu by %lu to %lu. increasing hugepage reservations is not allowed when hugetlbfs.allow_hugepage_increase is false",
84 0 : PAGE_NAMES[ j ], i, additional_pages_needed, required_pages[ j ] ));
85 0 : }
86 :
87 0 : FD_LOG_NOTICE(( "RUN: `echo \"%u\" > %s`", (uint)(total_pages+additional_pages_needed), total_page_path ));
88 0 : if( FD_UNLIKELY( -1==fd_file_util_write_uint( total_page_path, (uint)(total_pages+additional_pages_needed) ) ) )
89 0 : FD_LOG_ERR(( "could not increase the number of %s pages on NUMA node %lu (%i-%s)", PAGE_NAMES[ j ], i, errno, fd_io_strerror( errno ) ));
90 :
91 0 : uint raised_free_pages;
92 0 : if( FD_UNLIKELY( -1==fd_file_util_read_uint( free_page_path, &raised_free_pages ) ) )
93 0 : FD_LOG_ERR(( "could not read `%s`, please confirm your host is configured for gigantic pages (%i-%s)", free_page_path, errno, fd_io_strerror( errno ) ));
94 :
95 0 : if( FD_UNLIKELY( raised_free_pages<required_pages[ j ] ) ) {
96 : /* Well.. usually this is due to memory being fragmented,
97 : rather than not having enough memory. See something like
98 : https://tatref.github.io/blog/2023-visual-linux-memory-compact/
99 : for the sequence we do here. */
100 0 : FD_LOG_WARNING(( "ENOMEM-Out of memory when trying to reserve %s pages for Firedancer on NUMA node %lu. Compacting memory before trying again.",
101 0 : PAGE_NAMES[ j ],
102 0 : i ));
103 0 : FD_LOG_NOTICE(( "RUN: `echo \"1\" > /proc/sys/vm/compact_memory" ));
104 0 : if( FD_UNLIKELY( -1==fd_file_util_write_uint( "/proc/sys/vm/compact_memory", 1 ) ) )
105 0 : FD_LOG_ERR(( "could not write to `%s` (%i-%s)", "/proc/sys/vm/compact_memory", errno, fd_io_strerror( errno ) ));
106 : /* Sleep a little to give the OS some time to perform the
107 : compaction. */
108 0 : FD_TEST( -1!=fd_sys_util_nanosleep( 0, 500000000 /* 500 millis */ ) );
109 0 : FD_LOG_NOTICE(( "RUN: `echo \"3\" > /proc/sys/vm/drop_caches" ));
110 0 : if( FD_UNLIKELY( -1==fd_file_util_write_uint( "/proc/sys/vm/drop_caches", 3 ) ) )
111 0 : FD_LOG_ERR(( "could not write to `%s` (%i-%s)", "/proc/sys/vm/drop_caches", errno, fd_io_strerror( errno ) ));
112 0 : FD_TEST( -1!=fd_sys_util_nanosleep( 0, 500000000 /* 500 millis */ ) );
113 0 : FD_LOG_NOTICE(( "RUN: `echo \"1\" > /proc/sys/vm/compact_memory" ));
114 0 : if( FD_UNLIKELY( -1==fd_file_util_write_uint( "/proc/sys/vm/compact_memory", 1 ) ) )
115 0 : FD_LOG_ERR(( "could not write to `%s` (%i-%s)", "/proc/sys/vm/compact_memory", errno, fd_io_strerror( errno ) ));
116 0 : FD_TEST( -1!=fd_sys_util_nanosleep( 0, 500000000 /* 500 millis */ ) );
117 0 : }
118 :
119 0 : FD_LOG_NOTICE(( "RUN: `echo \"%u\" > %s`", (uint)(total_pages+additional_pages_needed), total_page_path ));
120 0 : if( FD_UNLIKELY( -1==fd_file_util_write_uint( total_page_path, (uint)(total_pages+additional_pages_needed) ) ) )
121 0 : FD_LOG_ERR(( "could not increase the number of %s pages on NUMA node %lu (%i-%s)", PAGE_NAMES[ j ], i, errno, fd_io_strerror( errno ) ));
122 0 : if( FD_UNLIKELY( -1==fd_file_util_read_uint( free_page_path, &raised_free_pages ) ) )
123 0 : FD_LOG_ERR(( "could not read `%s`, please confirm your host is configured for gigantic pages (%i-%s)", free_page_path, errno, fd_io_strerror( errno ) ));
124 0 : if( FD_UNLIKELY( raised_free_pages<required_pages[ j ] ) ) {
125 0 : FD_LOG_ERR(( "ENOMEM-Out of memory when trying to reserve %s pages for Firedancer on NUMA node %lu. Your Firedancer "
126 0 : "configuration requires %lu GiB of memory total consisting of %lu gigantic (1GiB) pages and %lu huge (2MiB) "
127 0 : "pages on this NUMA node but only %u %s pages were available according to `%s` (raised from %u). If your "
128 0 : "system has the required amount of memory, this can be because it is not configured with %s page support, or "
129 0 : "Firedancer cannot increase the value of `%s` at runtime. You might need to enable huge pages in grub at boot "
130 0 : "time. This error can also happen because system uptime is high and memory is fragmented. You can fix this by "
131 0 : "rebooting the machine and running the `hugetlbfs` stage immediately on boot.",
132 0 : PAGE_NAMES[ j ],
133 0 : i,
134 0 : required_pages[ 1 ] + (required_pages[ 0 ] / 512),
135 0 : required_pages[ 1 ],
136 0 : required_pages[ 0 ],
137 0 : raised_free_pages,
138 0 : PAGE_NAMES[ j ],
139 0 : free_page_path,
140 0 : free_pages,
141 0 : PAGE_NAMES[ j ],
142 0 : total_page_path ));
143 0 : }
144 0 : }
145 0 : }
146 0 : }
147 :
148 : /* Do NOT include anonymous huge pages in the min_size count that
149 : we reserve here, because they do not come from the hugetlbfs.
150 : Counting them towards that reservation would prevent the
151 : anonymous mmap which maps them in from succeeding.
152 :
153 : The kernel min_size option for the hugetlbfs does not include an
154 : option to reserve pages from a specific NUMA node, so we simply
155 : take the sum here and hope they are distributed correctly. If
156 : they are not, creating files in the mount on a specific node may
157 : fail later with ENOMEM. */
158 :
159 0 : ulong min_size[ 2 ] = {0};
160 0 : for( ulong i=0UL; i<numa_node_cnt; i++ ) {
161 0 : min_size[ 0 ] += PAGE_SIZE[ 0 ] * fd_topo_huge_page_cnt( &config->topo, i, 0 );
162 0 : min_size[ 1 ] += PAGE_SIZE[ 1 ] * fd_topo_gigantic_page_cnt( &config->topo, i );
163 0 : }
164 :
165 0 : for( ulong i=0UL; i<2UL; i++ ) {
166 0 : FD_LOG_NOTICE(( "RUN: `mkdir -p %s`", mount_path[ i ] ));
167 0 : if( FD_UNLIKELY( -1==fd_file_util_mkdir_all( mount_path[ i ], config->uid, config->gid ) ) ) {
168 0 : FD_LOG_ERR(( "could not create hugetlbfs mount directory `%s` (%i-%s)", mount_path[ i ], errno, fd_io_strerror( errno ) ));
169 0 : }
170 :
171 0 : char options[ 256 ];
172 0 : FD_TEST( fd_cstr_printf_check( options, sizeof(options), NULL, "pagesize=%lu,min_size=%lu", PAGE_SIZE[ i ], min_size[ i ] ) );
173 0 : FD_LOG_NOTICE(( "RUN: `mount -t hugetlbfs none %s -o %s`", mount_path[ i ], options ));
174 0 : if( FD_UNLIKELY( mount( "none", mount_path[ i ], "hugetlbfs", 0, options) ) )
175 0 : FD_LOG_ERR(( "mount of hugetlbfs at `%s` failed (%i-%s)", mount_path[ i ], errno, fd_io_strerror( errno ) ));
176 0 : if( FD_UNLIKELY( chown( mount_path[ i ], config->uid, config->gid ) ) )
177 0 : FD_LOG_ERR(( "chown of hugetlbfs at `%s` failed (%i-%s)", mount_path[ i ], errno, fd_io_strerror( errno ) ));
178 0 : if( FD_UNLIKELY( chmod( mount_path[ i ], S_IRUSR | S_IWUSR | S_IXUSR ) ) )
179 0 : FD_LOG_ERR(( "chmod of hugetlbfs at `%s` failed (%i-%s)", mount_path[ i ], errno, fd_io_strerror( errno ) ));
180 0 : }
181 0 : }
182 :
183 : static void
184 : cmdline( char * buf,
185 0 : ulong buf_sz ) {
186 0 : FILE * fp = fopen( "/proc/self/cmdline", "r" );
187 0 : if( FD_UNLIKELY( !fp ) ) FD_LOG_ERR(( "error opening `/proc/self/cmdline` (%i-%s)", errno, fd_io_strerror( errno ) ));
188 :
189 0 : ulong read = fread( buf, 1UL, buf_sz - 1UL, fp );
190 0 : if( FD_UNLIKELY( ferror( fp ) ) ) FD_LOG_ERR(( "error reading `/proc/self/cmdline` (%i-%s)", errno, fd_io_strerror( errno ) ));
191 0 : if( FD_UNLIKELY( fclose( fp ) ) ) FD_LOG_ERR(( "error closing `/proc/self/cmdline` (%i-%s)", errno, fd_io_strerror( errno ) ));
192 :
193 0 : buf[ read ] = '\0';
194 0 : }
195 :
196 : static void
197 0 : warn_mount_users( char const * mount_path ) {
198 0 : DIR * dir = opendir( "/proc" );
199 0 : if( FD_UNLIKELY( !dir ) ) FD_LOG_ERR(( "error opening `/proc` (%i-%s)", errno, fd_io_strerror( errno ) ));
200 :
201 0 : struct dirent * entry;
202 0 : while(( FD_LIKELY( entry = readdir( dir ) ) )) {
203 0 : if( FD_UNLIKELY( !strcmp( entry->d_name, "." ) || !strcmp( entry->d_name, ".." ) ) ) continue;
204 0 : char * endptr;
205 0 : ulong pid = strtoul( entry->d_name, &endptr, 10 );
206 0 : if( FD_UNLIKELY( *endptr ) ) continue;
207 :
208 0 : char path[ PATH_MAX ];
209 0 : FD_TEST( fd_cstr_printf_check( path, PATH_MAX, NULL, "/proc/%lu/maps", pid ) );
210 0 : FILE * fp = fopen( path, "r" );
211 0 : if( FD_UNLIKELY( !fp && errno!=ENOENT ) ) FD_LOG_ERR(( "error opening `%s` (%i-%s)", path, errno, fd_io_strerror( errno ) ));
212 :
213 0 : char self_cmdline[ PATH_MAX ];
214 0 : cmdline( self_cmdline, PATH_MAX );
215 :
216 0 : char line[ 4096 ];
217 0 : while( FD_LIKELY( fgets( line, 4096, fp ) ) ) {
218 0 : if( FD_UNLIKELY( strlen( line )==4095 ) ) FD_LOG_ERR(( "line too long in `%s`", path ));
219 0 : if( FD_UNLIKELY( strstr( line, mount_path ) ) ) {
220 0 : FD_LOG_WARNING(( "process `%lu`:`%s` has a file descriptor open in `%s`", pid, self_cmdline, mount_path ));
221 0 : break;
222 0 : }
223 0 : }
224 0 : if( FD_UNLIKELY( ferror( fp ) ) )
225 0 : FD_LOG_ERR(( "error reading `%s` (%i-%s)", path, errno, fd_io_strerror( errno ) ));
226 0 : if( FD_LIKELY( fclose( fp ) ) )
227 0 : FD_LOG_ERR(( "error closing `%s` (%i-%s)", path, errno, fd_io_strerror( errno ) ));
228 0 : }
229 :
230 0 : if( FD_UNLIKELY( -1==closedir( dir ) ) ) FD_LOG_ERR(( "closedir (%i-%s)", errno, fd_io_strerror( errno ) ));
231 0 : }
232 :
233 : static void
234 : fini( config_t const * config,
235 0 : int pre_init ) {
236 0 : (void)pre_init;
237 :
238 : /* Not used by fdctl but might be created by other debugging tools
239 : on the system. */
240 :
241 0 : char normal_page_mount_path[ PATH_MAX ];
242 0 : FD_TEST( fd_cstr_printf_check( normal_page_mount_path, PATH_MAX, NULL, "%s/.normal", config->hugetlbfs.mount_path ) );
243 :
244 0 : const char * mount_path[ 3 ] = {
245 0 : config->hugetlbfs.huge_page_mount_path,
246 0 : config->hugetlbfs.gigantic_page_mount_path,
247 0 : normal_page_mount_path,
248 0 : };
249 :
250 0 : for( ulong i=0UL; i<3UL; i++ ) {
251 0 : FILE * fp = fopen( "/proc/self/mounts", "r" );
252 0 : if( FD_UNLIKELY( !fp ) ) FD_LOG_ERR(( "failed to open `/proc/self/mounts`" ));
253 :
254 0 : char line[ 4096 ];
255 0 : while( FD_LIKELY( fgets( line, 4096UL, fp ) ) ) {
256 0 : if( FD_UNLIKELY( strlen( line )==4095UL ) ) FD_LOG_ERR(( "line too long in `/proc/self/mounts`" ));
257 0 : if( FD_UNLIKELY( strstr( line, mount_path[ i ] ) ) ) {
258 0 : FD_LOG_NOTICE(( "RUN: `umount %s`", mount_path[ i ] ));
259 0 : if( FD_UNLIKELY( umount( mount_path[ i ] ) ) ) {
260 0 : if( FD_LIKELY( errno==EBUSY ) ) {
261 0 : warn_mount_users( mount_path[ i ] );
262 :
263 0 : FD_LOG_ERR(( "Unmount of hugetlbfs at `%s` failed because the mount is still in use. "
264 0 : "You can unmount it by killing all processes that are actively using files in "
265 0 : "the mount and running `fdctl configure fini hugetlbfs` again, or unmount "
266 0 : "manually with `umount %s`", mount_path[ i ], mount_path[ i ] ));
267 0 : } else {
268 0 : FD_LOG_ERR(( "umount of hugetlbfs at `%s` failed (%i-%s)", mount_path[ i ], errno, fd_io_strerror( errno ) ));
269 0 : }
270 0 : }
271 0 : }
272 0 : }
273 :
274 0 : if( FD_UNLIKELY( ferror( fp ) ) )
275 0 : FD_LOG_ERR(( "error reading `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
276 0 : if( FD_LIKELY( fclose( fp ) ) )
277 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
278 :
279 0 : FD_LOG_NOTICE(( "RUN: `rmdir %s`", mount_path[ i ] ));
280 0 : if( FD_UNLIKELY( rmdir( mount_path[ i ] ) && errno!=ENOENT ) )
281 0 : FD_LOG_ERR(( "error removing hugetlbfs mount at `%s` (%i-%s)", mount_path[ i ], errno, fd_io_strerror( errno ) ));
282 0 : }
283 :
284 0 : FD_LOG_NOTICE(( "RUN: `rmdir %s`", config->hugetlbfs.mount_path ));
285 0 : if( FD_UNLIKELY( rmdir( config->hugetlbfs.mount_path ) && errno!=ENOENT ) )
286 0 : FD_LOG_ERR(( "error removing hugetlbfs directory at `%s` (%i-%s)", config->hugetlbfs.mount_path, errno, fd_io_strerror( errno ) ));
287 0 : }
288 :
289 : static configure_result_t
290 0 : check( config_t const * config ) {
291 0 : char const * mount_path[ 2 ] = {
292 0 : config->hugetlbfs.huge_page_mount_path,
293 0 : config->hugetlbfs.gigantic_page_mount_path,
294 0 : };
295 :
296 0 : static char const * MOUNT_PAGE_SIZE[ 2 ] = {
297 0 : "pagesize=2M",
298 0 : "pagesize=1024M",
299 0 : };
300 :
301 0 : ulong numa_node_cnt = fd_shmem_numa_cnt();
302 0 : ulong required_min_size[ 2 ] = {0};
303 0 : for( ulong i=0UL; i<numa_node_cnt; i++ ) {
304 0 : required_min_size[ 0 ] += PAGE_SIZE[ 0 ] * fd_topo_huge_page_cnt( &config->topo, i, 0 );
305 0 : required_min_size[ 1 ] += PAGE_SIZE[ 1 ] * fd_topo_gigantic_page_cnt( &config->topo, i );
306 0 : }
307 :
308 0 : struct stat st;
309 0 : int result1 = stat( mount_path[ 0 ], &st );
310 0 : if( FD_UNLIKELY( result1 && errno!=ENOENT ) )
311 0 : PARTIALLY_CONFIGURED( "failed to stat `%s` (%i-%s)", mount_path[ 0 ], errno, fd_io_strerror( errno ) );
312 0 : int result2 = stat( mount_path[ 1 ], &st );
313 0 : if( FD_UNLIKELY( result2 && errno!=ENOENT ) )
314 0 : PARTIALLY_CONFIGURED( "failed to stat `%s` (%i-%s)", mount_path[ 1 ], errno, fd_io_strerror( errno ) );
315 :
316 0 : if( FD_UNLIKELY( result1 && result2 ) )
317 0 : NOT_CONFIGURED( "mounts `%s` and `%s` do not exist", mount_path[ 0 ], mount_path[ 1 ] );
318 0 : else if( FD_UNLIKELY( result1 ) )
319 0 : PARTIALLY_CONFIGURED( "mount `%s` does not exist", mount_path[ 0 ] );
320 0 : else if( FD_UNLIKELY( result2 ) )
321 0 : PARTIALLY_CONFIGURED( "mount `%s` does not exist", mount_path[ 1 ] );
322 :
323 0 : CHECK( check_dir( config->hugetlbfs.mount_path, config->uid, config->gid, S_IFDIR | S_IRUSR | S_IWUSR | S_IXUSR ) );
324 0 : for( ulong i=0UL; i<2UL; i++ ) {
325 0 : CHECK( check_dir( mount_path[ i ], config->uid, config->gid, S_IFDIR | S_IRUSR | S_IWUSR | S_IXUSR ) );
326 :
327 0 : FILE * fp = fopen( "/proc/self/mounts", "r" );
328 0 : if( FD_UNLIKELY( !fp ) ) FD_LOG_ERR(( "failed to open `/proc/self/mounts`" ));
329 :
330 0 : char line[ 4096 ];
331 0 : int found = 0;
332 0 : while( FD_LIKELY( fgets( line, 4096UL, fp ) ) ) {
333 0 : if( FD_UNLIKELY( strlen( line )==4095UL ) ) FD_LOG_ERR(( "line too long in `/proc/self/mounts`" ));
334 0 : if( FD_UNLIKELY( strstr( line, mount_path[ i ] ) ) ) {
335 0 : found = 1;
336 :
337 0 : char * saveptr;
338 0 : char * device = strtok_r( line, " ", &saveptr );
339 0 : if( FD_UNLIKELY( !device ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
340 0 : if( FD_UNLIKELY( strcmp( device, "none" ) ) ) {
341 0 : if( FD_UNLIKELY( fclose( fp ) ) )
342 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
343 0 : PARTIALLY_CONFIGURED( "mount `%s` is on unrecognized device, expected `none`", mount_path[ i ] );
344 0 : }
345 :
346 0 : char * path1 = strtok_r( NULL, " ", &saveptr );
347 0 : if( FD_UNLIKELY( !path1 ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
348 0 : if( FD_UNLIKELY( strcmp( path1, mount_path[ i ] ) ) ) {
349 0 : if( FD_UNLIKELY( fclose( fp ) ) )
350 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
351 0 : PARTIALLY_CONFIGURED( "mount `%s` is on unrecognized path, expected `%s`", path1, mount_path[ i ] );
352 0 : }
353 :
354 0 : char * type = strtok_r( NULL, " ", &saveptr );
355 0 : if( FD_UNLIKELY( !type ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
356 0 : if( FD_UNLIKELY( strcmp( type, "hugetlbfs" ) ) ) {
357 0 : if( FD_UNLIKELY( fclose( fp ) ) )
358 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
359 0 : PARTIALLY_CONFIGURED( "mount `%s` has unrecognized type, expected `hugetlbfs`", mount_path[ i ] );
360 0 : }
361 :
362 0 : char * options = strtok_r( NULL, " ", &saveptr );
363 0 : if( FD_UNLIKELY( !options ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
364 :
365 0 : char * saveptr2;
366 0 : char * rw = strtok_r( options, ",", &saveptr2 );
367 0 : if( FD_UNLIKELY( !rw ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
368 0 : if( FD_UNLIKELY( strcmp( rw, "rw" ) ) ) {
369 0 : if( FD_UNLIKELY( fclose( fp ) ) )
370 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
371 0 : PARTIALLY_CONFIGURED( "mount `%s` is not mounted read/write, expected `rw`", mount_path[ i ] );
372 0 : }
373 :
374 0 : char * seclabel = strtok_r( NULL, ",", &saveptr2 );
375 0 : if( FD_UNLIKELY( !seclabel ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
376 :
377 0 : char * relatime;
378 0 : if( FD_LIKELY( !strcmp( seclabel, "seclabel" ) ) ) {
379 0 : relatime = strtok_r( NULL, ",", &saveptr2 );
380 0 : if( FD_UNLIKELY( !relatime ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
381 0 : } else {
382 0 : relatime = seclabel;
383 0 : }
384 :
385 0 : if( FD_UNLIKELY( strcmp( relatime, "relatime" ) ) ) {
386 0 : if( FD_UNLIKELY( fclose( fp ) ) )
387 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
388 0 : PARTIALLY_CONFIGURED( "mount `%s` is not mounted with `relatime`, expected `relatime`", mount_path[ i ] );
389 0 : }
390 :
391 0 : char * gid = strtok_r( NULL, ",", &saveptr2 );
392 0 : if( FD_UNLIKELY( !gid ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
393 :
394 0 : char * pagesize;
395 0 : if( FD_UNLIKELY( !strncmp( "gid=", gid, 4UL ) ) ) {
396 0 : pagesize = strtok_r( NULL, ",", &saveptr2 );
397 0 : if( FD_UNLIKELY( !pagesize ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
398 0 : } else {
399 0 : pagesize = gid;
400 0 : }
401 :
402 0 : if( FD_UNLIKELY( !pagesize ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
403 0 : if( FD_UNLIKELY( strcmp( pagesize, MOUNT_PAGE_SIZE[ i ] ) ) ) {
404 0 : if( FD_UNLIKELY( fclose( fp ) ) )
405 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
406 0 : PARTIALLY_CONFIGURED( "mount `%s` has unrecognized pagesize, expected `%s` %s", mount_path[ i ], MOUNT_PAGE_SIZE[ i ], pagesize );
407 0 : }
408 :
409 0 : char * _min_size = strtok_r( NULL, ",", &saveptr2 );
410 0 : if( FD_UNLIKELY( !_min_size ) ) FD_LOG_ERR(( "error parsing `/proc/self/mounts`, line `%s`", line ));
411 0 : if( FD_UNLIKELY( strncmp( "min_size=", _min_size, 9UL ) ) ) {
412 0 : if( FD_UNLIKELY( fclose( fp ) ) )
413 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
414 0 : PARTIALLY_CONFIGURED( "mount `%s` has unrecognized min_size, expected at least `min_size=%lu`", mount_path[ i ], required_min_size[ i ] );
415 0 : }
416 :
417 0 : char * endptr;
418 0 : ulong min_size = strtoul( _min_size+9, &endptr, 10 );
419 0 : if( FD_UNLIKELY( *endptr ) ) {
420 0 : if( FD_UNLIKELY( fclose( fp ) ) )
421 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
422 0 : PARTIALLY_CONFIGURED( "mount `%s` has malformed min_size, expected `min_size=%lu`", mount_path[ i ], required_min_size[ i ] );
423 0 : }
424 :
425 0 : if( FD_UNLIKELY( min_size<required_min_size[ i ] ) ) {
426 0 : if( FD_UNLIKELY( fclose( fp ) ) )
427 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
428 0 : PARTIALLY_CONFIGURED( "mount `%s` has min_size `%lu`, expected at least `min_size=%lu`", mount_path[ i ], min_size, required_min_size[ i ] );
429 0 : }
430 :
431 0 : break;
432 0 : }
433 0 : }
434 :
435 0 : if( FD_UNLIKELY( ferror( fp ) ) )
436 0 : FD_LOG_ERR(( "error reading `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
437 0 : if( FD_LIKELY( fclose( fp ) ) )
438 0 : FD_LOG_ERR(( "error closing `/proc/self/mounts` (%i-%s)", errno, fd_io_strerror( errno ) ));
439 :
440 0 : if( FD_UNLIKELY( !found ) )
441 0 : PARTIALLY_CONFIGURED( "mount `%s` not found in `/proc/self/mounts`", mount_path[ i ] );
442 0 : }
443 :
444 0 : CONFIGURE_OK();
445 0 : }
446 :
447 : configure_stage_t fd_cfg_stage_hugetlbfs = {
448 : .name = "hugetlbfs",
449 : .always_recreate = 0,
450 : .enabled = NULL,
451 : .init_perm = init_perm,
452 : .fini_perm = fini_perm,
453 : .init = init,
454 : .fini = fini,
455 : .check = check,
456 : };
|