Line data Source code
1 : #ifndef HEADER_fd_src_disco_topo_fd_topo_h
2 : #define HEADER_fd_src_disco_topo_fd_topo_h
3 :
4 : #include "../stem/fd_stem.h"
5 : #include "../../tango/fd_tango.h"
6 : #include "../../waltz/xdp/fd_xdp1.h"
7 : #include "../../ballet/base58/fd_base58.h"
8 : #include "../../util/net/fd_net_headers.h"
9 :
10 : /* Maximum number of workspaces that may be present in a topology. */
11 : #define FD_TOPO_MAX_WKSPS (256UL)
12 : /* Maximum number of links that may be present in a topology. */
13 : #define FD_TOPO_MAX_LINKS (256UL)
14 : /* Maximum number of tiles that may be present in a topology. */
15 0 : #define FD_TOPO_MAX_TILES (256UL)
16 : /* Maximum number of objects that may be present in a topology. */
17 : #define FD_TOPO_MAX_OBJS (4096UL)
18 : /* Maximum number of links that may go into any one tile in the
19 : topology. */
20 : #define FD_TOPO_MAX_TILE_IN_LINKS ( 128UL)
21 : /* Maximum number of links that a tile may write to. */
22 : #define FD_TOPO_MAX_TILE_OUT_LINKS ( 32UL)
23 : /* Maximum number of objects that a tile can use. */
24 : #define FD_TOPO_MAX_TILE_OBJS ( 256UL)
25 :
26 : /* Maximum number of additional ip addresses */
27 : #define FD_NET_MAX_SRC_ADDR 4
28 :
29 : /* Maximum number of additional destinations for leader shreds and for retransmitted shreds */
30 : #define FD_TOPO_ADTL_DESTS_MAX ( 32UL)
31 :
32 0 : #define FD_TOPO_CORE_DUMP_LEVEL_DISABLED (0)
33 0 : #define FD_TOPO_CORE_DUMP_LEVEL_MINIMAL (1)
34 6 : #define FD_TOPO_CORE_DUMP_LEVEL_REGULAR (2)
35 0 : #define FD_TOPO_CORE_DUMP_LEVEL_FULL (3)
36 0 : #define FD_TOPO_CORE_DUMP_LEVEL_NEVER (4)
37 :
38 : /* A workspace is a Firedancer specific memory management structure that
39 : sits on top of 1 or more memory mapped gigantic or huge pages mounted
40 : to the hugetlbfs. */
41 : typedef struct {
42 : ulong id; /* The ID of this workspace. Indexed from [0, wksp_cnt). When placed in a topology, the ID must be the index of the workspace in the workspaces list. */
43 : char name[ 14UL ]; /* The name of this workspace, like "pack". There can be at most one of each workspace name in a topology. */
44 :
45 : ulong numa_idx; /* The index of the NUMA node on the system that this workspace should be allocated from. */
46 :
47 : ulong min_part_max; /* Artificially raise part_max */
48 : ulong min_loose_sz; /* Artificially raise loose footprint */
49 :
50 : /* Computed fields. These are not supplied as configuration but calculated as needed. */
51 : struct {
52 : ulong page_sz; /* The size of the pages that this workspace is backed by. One of FD_PAGE_SIZE_*. */
53 : ulong page_cnt; /* The number of pages that must be mapped to this workspace to store all the data needed by consumers. */
54 : ulong part_max; /* The maximum number of partitions in the underlying workspace. There can only be this many allocations made at any one time. */
55 :
56 : int core_dump_level; /* The core dump level required to be set in the application configuration to have this workspace appear in core dumps. */
57 :
58 : fd_wksp_t * wksp; /* The workspace memory in the local process. */
59 : ulong known_footprint; /* Total size in bytes of all data in Firedancer that will be stored in this workspace at startup. */
60 : ulong total_footprint; /* Total size in bytes of all data in Firedancer that could be stored in this workspace, includes known data and loose data. */
61 : };
62 : } fd_topo_wksp_t;
63 :
64 : /* A link is an mcache in a workspace that has one producer and one or
65 : more consumers. A link may optionally also have a dcache, that holds
66 : fragments referred to by the mcache entries.
67 :
68 : A link belongs to exactly one workspace. A link has exactly one
69 : producer, and 1 or more consumers. Each consumer is either reliable
70 : or not reliable. A link has a depth and a MTU, which correspond to
71 : the depth and MTU of the mcache and dcache respectively. A MTU of
72 : zero means no dcache is needed, as there is no data. */
73 : typedef struct {
74 : ulong id; /* The ID of this link. Indexed from [0, link_cnt). When placed in a topology, the ID must be the index of the link in the links list. */
75 : char name[ 14UL ]; /* The name of this link, like "pack_execle". There can be multiple of each link name in a topology. */
76 : ulong kind_id; /* The ID of this link within its name. If there are N links of a particular name, they have IDs [0, N). The pair (name, kind_id) uniquely identifies a link, as does "id" on its own. */
77 :
78 : ulong depth; /* The depth of the mcache representing the link. */
79 : ulong mtu; /* The MTU of data fragments in the mcache. A value of 0 means there is no dcache. */
80 : ulong burst; /* The max amount of MTU sized data fragments that might be bursted to the dcache. */
81 :
82 : ulong mcache_obj_id;
83 : ulong dcache_obj_id;
84 :
85 : /* Computed fields. These are not supplied as configuration but calculated as needed. */
86 : struct {
87 : fd_frag_meta_t * mcache; /* The mcache of this link. */
88 : void * dcache; /* The dcache of this link, if it has one. */
89 : };
90 :
91 : uint permit_no_consumers : 1; /* Permit a topology where this link has no consumers */
92 : uint permit_no_producers : 1; /* Permit a topology where this link has no producers */
93 : } fd_topo_link_t;
94 :
95 : /* Be careful: ip and host are in different byte order */
96 : typedef struct {
97 : uint ip; /* in network byte order */
98 : ushort port; /* in host byte order */
99 : } fd_topo_ip_port_t;
100 :
101 : struct fd_topo_net_tile {
102 : ulong umem_dcache_obj_id; /* dcache for XDP UMEM frames */
103 : uint bind_address;
104 :
105 : ushort shred_listen_port;
106 : ushort quic_transaction_listen_port;
107 : ushort legacy_transaction_listen_port;
108 : ushort gossip_listen_port;
109 : ushort repair_intake_listen_port;
110 : ushort repair_serve_listen_port;
111 : ushort txsend_src_port;
112 : };
113 : typedef struct fd_topo_net_tile fd_topo_net_tile_t;
114 :
115 : /* A tile is a unique process that is spawned by Firedancer to represent
116 : one thread of execution. Firedancer sandboxes all tiles to their own
117 : process for security reasons.
118 :
119 : A tile belongs to exactly one workspace. A tile is a consumer of 0
120 : or more links, it's inputs. A tile is a producer of 0 or more output
121 : links.
122 :
123 : All input links will be automatically polled by the tile
124 : infrastructure, and output links will automatically source and manage
125 : credits from consumers. */
126 : struct fd_topo_tile {
127 : ulong id; /* The ID of this tile. Indexed from [0, tile_cnt). When placed in a topology, the ID must be the index of the tile in the tiles list. */
128 : char name[ 7UL ]; /* The name of this tile. There can be multiple of each tile name in a topology. */
129 : ulong kind_id; /* The ID of this tile within its name. If there are n tile of a particular name, they have IDs [0, N). The pair (name, kind_id) uniquely identifies a tile, as does "id" on its own. */
130 : int is_agave; /* If the tile needs to run in the Agave (Anza) address space or not. */
131 : int allow_shutdown; /* If the tile is allowed to shutdown gracefully. If false, when the tile exits it will tear down the entire application. */
132 :
133 : ulong cpu_idx; /* The CPU index to pin the tile on. A value of ULONG_MAX or more indicates the tile should be floating and not pinned to a core. */
134 :
135 : ulong in_cnt; /* The number of links that this tile reads from. */
136 : ulong in_link_id[ FD_TOPO_MAX_TILE_IN_LINKS ]; /* The link_id of each link that this tile reads from, indexed in [0, in_cnt). */
137 : int in_link_reliable[ FD_TOPO_MAX_TILE_IN_LINKS ]; /* If each link that this tile reads from is a reliable or unreliable consumer, indexed in [0, in_cnt). */
138 : int in_link_poll[ FD_TOPO_MAX_TILE_IN_LINKS ]; /* If each link that this tile reads from should be polled by the tile infrastructure, indexed in [0, in_cnt).
139 : If the link is not polled, the tile will not receive frags for it and the tile writer is responsible for
140 : reading from the link. The link must be marked as unreliable as it is not flow controlled. */
141 :
142 : ulong out_cnt; /* The number of links that this tile writes to. */
143 : ulong out_link_id[ FD_TOPO_MAX_TILE_OUT_LINKS ]; /* The link_id of each link that this tile writes to, indexed in [0, link_cnt). */
144 :
145 : ulong tile_obj_id;
146 : ulong metrics_obj_id;
147 : ulong id_keyswitch_obj_id; /* keyswitch object id for identity key updates */
148 : ulong av_keyswitch_obj_id; /* keyswitch object id for authority key updates */
149 : ulong in_link_fseq_obj_id[ FD_TOPO_MAX_TILE_IN_LINKS ];
150 :
151 : ulong uses_obj_cnt;
152 : ulong uses_obj_id[ FD_TOPO_MAX_TILE_OBJS ];
153 : int uses_obj_mode[ FD_TOPO_MAX_TILE_OBJS ];
154 :
155 : /* Computed fields. These are not supplied as configuration but calculated as needed. */
156 : struct {
157 : ulong * metrics; /* The shared memory for metrics that this tile should write. Consumer by monitoring and metrics writing tiles. */
158 :
159 : /* The fseq of each link that this tile reads from. Multiple fseqs
160 : may point to the link, if there are multiple consumers. An fseq
161 : can be uniquely identified via (link_id, tile_id), or (link_kind,
162 : link_kind_id, tile_kind, tile_kind_id) */
163 : ulong * in_link_fseq[ FD_TOPO_MAX_TILE_IN_LINKS ];
164 : };
165 :
166 : /* Configuration fields. These are required to be known by the topology so it can determine the
167 : total size of Firedancer in memory. */
168 : union {
169 : fd_topo_net_tile_t net;
170 :
171 : struct {
172 : fd_topo_net_tile_t net;
173 :
174 : char if_virt[ 16 ]; /* device name (virtual, for routing) */
175 : char if_phys[ 16 ]; /* device name (physical, for RX/TX) */
176 : uint if_queue; /* device queue index */
177 :
178 : /* xdp specific options */
179 : ulong xdp_rx_queue_size;
180 : ulong xdp_tx_queue_size;
181 : ulong free_ring_depth;
182 : long tx_flush_timeout_ns;
183 : char xdp_mode[8];
184 : int zero_copy;
185 :
186 : ulong netdev_dbl_buf_obj_id; /* dbl_buf containing netdev_tbl */
187 : ulong fib4_main_obj_id; /* fib4 containing main route table */
188 : ulong fib4_local_obj_id; /* fib4 containing local route table */
189 : ulong neigh4_obj_id; /* neigh4 hash map */
190 :
191 : int xsk_core_dump;
192 : } xdp;
193 :
194 : struct {
195 : fd_topo_net_tile_t net;
196 : /* sock specific options */
197 : int so_sndbuf;
198 : int so_rcvbuf;
199 : } sock;
200 :
201 : struct {
202 : ulong netdev_dbl_buf_obj_id; /* dbl_buf containing netdev_tbl */
203 : ulong fib4_main_obj_id; /* fib4 containing main route table */
204 : ulong fib4_local_obj_id; /* fib4 containing local route table */
205 : char neigh_if[ 16 ]; /* neigh4 interface name */
206 : ulong neigh4_obj_id; /* neigh4 hash map */
207 : } netlink;
208 :
209 0 : #define FD_TOPO_GOSSIP_ENTRYPOINTS_MAX 16UL
210 :
211 : struct {
212 : char identity_key_path[ PATH_MAX ];
213 :
214 : ulong entrypoints_cnt;
215 : fd_ip4_port_t entrypoints[ FD_TOPO_GOSSIP_ENTRYPOINTS_MAX ];
216 :
217 : long boot_timestamp_nanos;
218 :
219 : ulong tcache_depth;
220 :
221 : ushort shred_version;
222 : int allow_private_address;
223 : } gossvf;
224 :
225 : struct {
226 : char identity_key_path[ PATH_MAX ];
227 :
228 : ulong entrypoints_cnt;
229 : fd_ip4_port_t entrypoints[ FD_TOPO_GOSSIP_ENTRYPOINTS_MAX ];
230 :
231 : long boot_timestamp_nanos;
232 :
233 : uint ip_addr;
234 : ushort shred_version;
235 :
236 : ulong max_entries;
237 : ulong max_purged;
238 : ulong max_failed;
239 :
240 : fd_hash_t wait_for_supermajority_with_bank_hash;
241 :
242 : struct {
243 : ushort gossip;
244 : ushort tvu;
245 : ushort tvu_quic;
246 : ushort tpu;
247 : ushort tpu_quic;
248 : ushort repair;
249 : } ports;
250 : } gossip;
251 :
252 : struct {
253 : uint out_depth;
254 : uint reasm_cnt;
255 : ulong max_concurrent_connections;
256 : ulong max_concurrent_handshakes;
257 : ushort quic_transaction_listen_port;
258 : long idle_timeout_millis;
259 : uint ack_delay_millis;
260 : int retry;
261 : char key_log_path[ PATH_MAX ];
262 : } quic;
263 :
264 : struct {
265 : ulong tcache_depth;
266 : } verify;
267 :
268 : struct {
269 : ulong tcache_depth;
270 : } dedup;
271 :
272 : struct {
273 : char url[ 256 ];
274 : ulong url_len;
275 : char sni[ 256 ];
276 : ulong sni_len;
277 : char identity_key_path[ PATH_MAX ];
278 : char key_log_path[ PATH_MAX ];
279 : ulong buf_sz;
280 : ulong out_depth;
281 : ulong ssl_heap_sz;
282 : ulong keepalive_interval_nanos;
283 : uchar tls_cert_verify : 1;
284 : } bundle;
285 :
286 : struct {
287 : char url[ 256 ];
288 : char identity_key_path[ PATH_MAX ];
289 : } event;
290 :
291 : struct {
292 : ulong max_pending_transactions;
293 : ulong execle_tile_count;
294 : int larger_max_cost_per_block;
295 : int larger_shred_limits_per_block;
296 : int use_consumed_cus;
297 : int schedule_strategy;
298 : struct {
299 : int enabled;
300 : uchar tip_distribution_program_addr[ 32 ];
301 : uchar tip_payment_program_addr[ 32 ];
302 : uchar tip_distribution_authority[ 32 ];
303 : ulong commission_bps;
304 : char identity_key_path[ PATH_MAX ];
305 : char vote_account_path[ PATH_MAX ]; /* or pubkey is okay */
306 : } bundle;
307 : } pack;
308 :
309 : struct {
310 : int lagged_consecutive_leader_start;
311 : int plugins_enabled;
312 : ulong execle_cnt;
313 : char identity_key_path[ PATH_MAX ];
314 : struct {
315 : int enabled;
316 : uchar tip_payment_program_addr[ 32 ];
317 : uchar tip_distribution_program_addr[ 32 ];
318 : char vote_account_path[ PATH_MAX ];
319 : } bundle;
320 : } pohh;
321 :
322 : struct {
323 : ulong execle_cnt;
324 : char identity_key_path[ PATH_MAX ];
325 : } poh;
326 :
327 : struct {
328 : ulong depth;
329 : ulong fec_resolver_depth;
330 : char identity_key_path[ PATH_MAX ];
331 : ushort shred_listen_port;
332 : int larger_shred_limits_per_block;
333 : ushort expected_shred_version;
334 : ulong adtl_dests_retransmit_cnt;
335 : fd_topo_ip_port_t adtl_dests_retransmit[ FD_TOPO_ADTL_DESTS_MAX ];
336 : ulong adtl_dests_leader_cnt;
337 : fd_topo_ip_port_t adtl_dests_leader[ FD_TOPO_ADTL_DESTS_MAX ];
338 : } shred;
339 :
340 : struct {
341 : ulong disable_blockstore_from_slot;
342 : } store;
343 :
344 : struct {
345 : char identity_key_path[ PATH_MAX ];
346 : ulong authorized_voter_paths_cnt;
347 : char authorized_voter_paths[ 16 ][ PATH_MAX ];
348 : } sign;
349 :
350 : struct {
351 : uint listen_addr;
352 : ushort listen_port;
353 :
354 : int is_voting;
355 :
356 : char cluster[ 32 ];
357 : char identity_key_path[ PATH_MAX ];
358 : char vote_key_path[ PATH_MAX ];
359 :
360 : ulong max_http_connections;
361 : ulong max_websocket_connections;
362 : ulong max_http_request_length;
363 : ulong send_buffer_size_mb;
364 : int schedule_strategy;
365 :
366 : int websocket_compression;
367 : int frontend_release_channel;
368 : ulong tile_cnt;
369 :
370 : char wfs_bank_hash[ FD_BASE58_ENCODED_32_SZ ];
371 : ushort expected_shred_version;
372 : } gui;
373 :
374 : struct {
375 : uint listen_addr;
376 : ushort listen_port;
377 :
378 : ulong max_http_connections;
379 : ulong send_buffer_size_mb;
380 : ulong max_http_request_length;
381 :
382 : ulong max_live_slots;
383 : ulong accdb_max_depth;
384 :
385 : char identity_key_path[ PATH_MAX ];
386 : int delay_startup;
387 : } rpc;
388 :
389 : struct {
390 : uint prometheus_listen_addr;
391 : ushort prometheus_listen_port;
392 : } metric;
393 :
394 : struct {
395 : int is_voting;
396 : } diag;
397 :
398 : struct {
399 : ulong fec_max;
400 :
401 : ulong txncache_obj_id;
402 :
403 : char shred_cap[ PATH_MAX ];
404 :
405 : char identity_key_path[ PATH_MAX ];
406 : uint ip_addr;
407 : char vote_account_path[ PATH_MAX ];
408 :
409 : fd_hash_t wait_for_supermajority_with_bank_hash;
410 : ushort expected_shred_version;
411 : int wait_for_vote_to_start_leader;
412 :
413 : ulong heap_size_gib;
414 : ulong sched_depth;
415 : ulong max_live_slots;
416 : ulong write_delay_slots;
417 :
418 : /* not specified in TOML */
419 :
420 : ulong enable_features_cnt;
421 : char enable_features[ 16 ][ FD_BASE58_ENCODED_32_SZ ];
422 :
423 : char genesis_path[ PATH_MAX ];
424 :
425 : int larger_max_cost_per_block;
426 :
427 : ulong capture_start_slot;
428 : char solcap_capture[ PATH_MAX ];
429 : char dump_proto_dir[ PATH_MAX ];
430 : int dump_block_to_pb;
431 :
432 : struct {
433 : int enabled;
434 : uchar tip_payment_program_addr[ 32 ];
435 : uchar tip_distribution_program_addr[ 32 ];
436 : char vote_account_path[ PATH_MAX ];
437 : } bundle;
438 :
439 : } replay;
440 :
441 : struct {
442 : ulong txncache_obj_id;
443 : ulong acc_pool_obj_id;
444 :
445 : ulong max_live_slots;
446 : ulong accdb_max_depth;
447 :
448 : ulong capture_start_slot;
449 : char solcap_capture[ PATH_MAX ];
450 : char dump_proto_dir[ PATH_MAX ];
451 : char dump_syscall_name_filter[ PATH_MAX ];
452 : char dump_instr_program_id_filter[ FD_BASE58_ENCODED_32_SZ ];
453 : int dump_instr_to_pb;
454 : int dump_txn_to_pb;
455 : int dump_txn_as_fixture;
456 : int dump_syscall_to_pb;
457 : } execrp;
458 :
459 : struct {
460 : ushort send_to_port;
461 : uint send_to_ip_addr;
462 : ulong conn_cnt;
463 : int no_quic;
464 : } benchs;
465 :
466 : struct {
467 : ushort rpc_port;
468 : uint rpc_ip_addr;
469 : } bencho;
470 :
471 : struct {
472 : ulong accounts_cnt;
473 : int mode;
474 : float contending_fraction;
475 : float cu_price_spread;
476 : } benchg;
477 :
478 : struct {
479 : ushort repair_intake_listen_port;
480 : ushort repair_serve_listen_port;
481 : char identity_key_path[ PATH_MAX ];
482 : ulong max_pending_shred_sets;
483 : ulong slot_max;
484 :
485 : /* non-config */
486 :
487 : ulong repair_sign_depth;
488 : ulong repair_sign_cnt;
489 : } repair;
490 :
491 : struct {
492 : ushort txsend_src_port;
493 :
494 : /* non-config */
495 :
496 : uint ip_addr;
497 : char identity_key_path[ PATH_MAX ];
498 : } txsend;
499 :
500 : struct {
501 : uint fake_dst_ip;
502 : } pktgen;
503 :
504 : struct {
505 : ulong end_slot;
506 : char rocksdb_path[ PATH_MAX ];
507 : char ingest_mode[ 32 ];
508 :
509 : /* Set internally by the archiver tile */
510 : int archive_fd;
511 : } archiver;
512 :
513 : struct {
514 : int ingest_dead_slots;
515 : ulong root_distance;
516 : ulong end_slot;
517 : char rocksdb_path[ PATH_MAX ];
518 : char shredcap_path[ PATH_MAX ];
519 : } backtest;
520 :
521 : struct {
522 : ulong authorized_voter_paths_cnt;
523 : char authorized_voter_paths[ 16 ][ PATH_MAX ];
524 : int hard_fork_fatal;
525 : ulong max_live_slots;
526 : ulong accdb_max_depth;
527 : char identity_key[ PATH_MAX ];
528 : char vote_account[ PATH_MAX ];
529 : char base_path[PATH_MAX];
530 : } tower;
531 :
532 : struct {
533 : char folder_path[ PATH_MAX ];
534 : ushort repair_intake_listen_port;
535 : ulong write_buffer_size; /* Size of the write buffer for the capture tile */
536 : int enable_publish_stake_weights;
537 : char manifest_path[ PATH_MAX ];
538 :
539 : /* Set internally by the capture tile */
540 : int shreds_fd;
541 : int requests_fd;
542 : int fecs_fd;
543 : int peers_fd;
544 : int bank_hashes_fd;
545 : int slices_fd;
546 : } shredcap;
547 :
548 : #define FD_TOPO_SNAPSHOTS_GOSSIP_LIST_MAX (32UL)
549 0 : #define FD_TOPO_SNAPSHOTS_SERVERS_MAX (16UL)
550 0 : #define FD_TOPO_MAX_RESOLVED_ADDRS ( 4UL)
551 0 : #define FD_TOPO_SNAPSHOTS_SERVERS_MAX_RESOLVED (FD_TOPO_MAX_RESOLVED_ADDRS*FD_TOPO_SNAPSHOTS_SERVERS_MAX)
552 :
553 : struct fd_topo_tile_snapct {
554 : char snapshots_path[ PATH_MAX ];
555 :
556 : struct {
557 : uint max_local_full_effective_age;
558 : uint max_local_incremental_age;
559 :
560 : struct {
561 : int allow_any;
562 : ulong allow_list_cnt;
563 : fd_pubkey_t allow_list[ FD_TOPO_SNAPSHOTS_GOSSIP_LIST_MAX ];
564 : ulong block_list_cnt;
565 : fd_pubkey_t block_list[ FD_TOPO_SNAPSHOTS_GOSSIP_LIST_MAX ];
566 : } gossip;
567 :
568 : ulong servers_cnt;
569 : struct {
570 : fd_ip4_port_t addr;
571 : char hostname[ 256UL ];
572 : int is_https;
573 : } servers[ FD_TOPO_SNAPSHOTS_SERVERS_MAX_RESOLVED ];
574 : } sources;
575 :
576 : int incremental_snapshots;
577 : uint max_full_snapshots_to_keep;
578 : uint max_incremental_snapshots_to_keep;
579 : uint max_retry_abort;
580 : } snapct;
581 :
582 : struct {
583 : char snapshots_path[ PATH_MAX ];
584 : int incremental_snapshots;
585 : uint min_download_speed_mibs;
586 : } snapld;
587 :
588 : struct {
589 : ulong max_live_slots;
590 : ulong accdb_max_depth;
591 : ulong funk_obj_id;
592 : ulong funk_locks_obj_id;
593 : ulong txncache_obj_id;
594 :
595 : uint lthash_disabled : 1;
596 : uint use_vinyl : 1;
597 : } snapin;
598 :
599 : struct {
600 : ulong vinyl_meta_map_obj_id;
601 : ulong vinyl_meta_pool_obj_id;
602 : ulong snapwr_depth;
603 : char vinyl_path[ PATH_MAX ];
604 : uint lthash_disabled : 1;
605 : ulong max_accounts;
606 : } snapwm;
607 :
608 : struct {
609 : ulong dcache_obj_id;
610 : char vinyl_path[ PATH_MAX ];
611 : uint lthash_disabled : 1;
612 : } snapwr;
613 :
614 : struct {
615 : ulong dcache_obj_id;
616 : int io_uring_enabled;
617 : char vinyl_path[ PATH_MAX ];
618 : } snaplh;
619 :
620 : struct {
621 :
622 : uint bind_address;
623 : ushort bind_port;
624 :
625 : ushort expected_shred_version;
626 : ulong entrypoints_cnt;
627 : fd_ip4_port_t entrypoints[ FD_TOPO_GOSSIP_ENTRYPOINTS_MAX ];
628 : } ipecho;
629 :
630 : struct {
631 : ulong max_live_slots;
632 : ulong accdb_max_depth;
633 : ulong txncache_obj_id;
634 : ulong acc_pool_obj_id;
635 : } execle;
636 :
637 : struct {
638 : int validate_genesis_hash;
639 : int allow_download;
640 :
641 : ushort expected_shred_version;
642 : ulong entrypoints_cnt;
643 : fd_ip4_port_t entrypoints[ FD_TOPO_GOSSIP_ENTRYPOINTS_MAX ];
644 :
645 : int has_expected_genesis_hash;
646 : uchar expected_genesis_hash[ 32UL ];
647 :
648 : char genesis_path[ PATH_MAX ];
649 :
650 : uint target_gid;
651 : uint target_uid;
652 :
653 : ulong accdb_max_depth;
654 : } genesi;
655 :
656 : struct {
657 : ulong meta_map_obj_id;
658 : ulong meta_pool_obj_id;
659 : ulong line_max;
660 : ulong data_obj_id;
661 : char bstream_path[ PATH_MAX ];
662 : ulong pair_cnt_limit;
663 :
664 : int io_type; /* FD_VINYL_IO_TYPE_* */
665 : uint uring_depth;
666 : } accdb;
667 :
668 : struct {
669 : ulong capture_start_slot;
670 : char solcap_capture[ PATH_MAX ];
671 : int recent_only;
672 : ulong recent_slots_per_file;
673 : } solcap;
674 :
675 : struct {
676 : ulong accdb_max_depth;
677 : } resolv;
678 : };
679 : };
680 :
681 : typedef struct fd_topo_tile fd_topo_tile_t;
682 :
683 : typedef struct {
684 : ulong id;
685 : char name[ 13UL ]; /* object type */
686 : ulong wksp_id;
687 :
688 : /* Optional label for object */
689 : char label[ 13UL ]; /* object label */
690 : ulong label_idx; /* index of object for this label (ULONG_MAX if not labelled) */
691 :
692 : ulong offset;
693 : ulong footprint;
694 : } fd_topo_obj_t;
695 :
696 : /* An fd_topo_t represents the overall structure of a Firedancer
697 : configuration, describing all the workspaces, tiles, and links
698 : between them. */
699 : struct fd_topo {
700 : char app_name[ 256UL ];
701 : uchar props[ 16384UL ];
702 :
703 : ulong wksp_cnt;
704 : ulong link_cnt;
705 : ulong tile_cnt;
706 : ulong obj_cnt;
707 :
708 : fd_topo_wksp_t workspaces[ FD_TOPO_MAX_WKSPS ];
709 : fd_topo_link_t links[ FD_TOPO_MAX_LINKS ];
710 : fd_topo_tile_t tiles[ FD_TOPO_MAX_TILES ];
711 : fd_topo_obj_t objs[ FD_TOPO_MAX_OBJS ];
712 :
713 : ulong agave_affinity_cnt;
714 : ulong agave_affinity_cpu_idx[ FD_TILE_MAX ];
715 : ulong blocklist_cores_cnt;
716 : ulong blocklist_cores_cpu_idx[ FD_TILE_MAX ];
717 :
718 : ulong max_page_size; /* 2^21 or 2^30 */
719 : ulong gigantic_page_threshold; /* see [hugetlbfs.gigantic_page_threshold_mib]*/
720 : };
721 : typedef struct fd_topo fd_topo_t;
722 :
723 : typedef struct {
724 : char const * name;
725 :
726 : int keep_host_networking;
727 : int allow_connect;
728 : int allow_renameat;
729 : ulong rlimit_file_cnt;
730 : ulong rlimit_address_space;
731 : ulong rlimit_data;
732 : ulong rlimit_nproc;
733 : int for_tpool;
734 :
735 : ulong (*populate_allowed_seccomp)( fd_topo_t const * topo, fd_topo_tile_t const * tile, ulong out_cnt, struct sock_filter * out );
736 : ulong (*populate_allowed_fds )( fd_topo_t const * topo, fd_topo_tile_t const * tile, ulong out_fds_sz, int * out_fds );
737 : ulong (*scratch_align )( void );
738 : ulong (*scratch_footprint )( fd_topo_tile_t const * tile );
739 : ulong (*loose_footprint )( fd_topo_tile_t const * tile );
740 : void (*privileged_init )( fd_topo_t * topo, fd_topo_tile_t * tile );
741 : void (*unprivileged_init )( fd_topo_t * topo, fd_topo_tile_t * tile );
742 : void (*run )( fd_topo_t * topo, fd_topo_tile_t * tile );
743 : ulong (*rlimit_file_cnt_fn )( fd_topo_t const * topo, fd_topo_tile_t const * tile );
744 : } fd_topo_run_tile_t;
745 :
746 : struct fd_topo_obj_callbacks {
747 : char const * name;
748 : ulong (* footprint )( fd_topo_t const * topo, fd_topo_obj_t const * obj );
749 : ulong (* align )( fd_topo_t const * topo, fd_topo_obj_t const * obj );
750 : ulong (* loose )( fd_topo_t const * topo, fd_topo_obj_t const * obj );
751 : void (* new )( fd_topo_t const * topo, fd_topo_obj_t const * obj );
752 : };
753 :
754 : typedef struct fd_topo_obj_callbacks fd_topo_obj_callbacks_t;
755 :
756 : FD_PROTOTYPES_BEGIN
757 :
758 : FD_FN_CONST static inline ulong
759 0 : fd_topo_workspace_align( void ) {
760 : /* This needs to be the max( align ) of all the child members that
761 : could be aligned into this workspace, otherwise our footprint
762 : calculation will not be correct. For now just set to 4096 but this
763 : should probably be calculated dynamically, or we should reduce
764 : those child aligns if we can. */
765 0 : return 4096UL;
766 0 : }
767 :
768 : void *
769 : fd_topo_obj_laddr( fd_topo_t const * topo,
770 : ulong obj_id );
771 :
772 : /* Returns a pointer in the local address space to the base address of
773 : the workspace out of which the given object was allocated. */
774 :
775 : static inline void *
776 : fd_topo_obj_wksp_base( fd_topo_t const * topo,
777 0 : ulong obj_id ) {
778 0 : FD_TEST( obj_id<FD_TOPO_MAX_OBJS );
779 0 : fd_topo_obj_t const * obj = &topo->objs[ obj_id ];
780 0 : FD_TEST( obj->id == obj_id );
781 0 : ulong const wksp_id = obj->wksp_id;
782 :
783 0 : FD_TEST( wksp_id<FD_TOPO_MAX_WKSPS );
784 0 : fd_topo_wksp_t const * wksp = &topo->workspaces[ wksp_id ];
785 0 : FD_TEST( wksp->id == wksp_id );
786 0 : return wksp->wksp;
787 0 : }
788 :
789 : FD_FN_PURE static inline ulong
790 : fd_topo_tile_name_cnt( fd_topo_t const * topo,
791 3 : char const * name ) {
792 3 : ulong cnt = 0;
793 6 : for( ulong i=0; i<topo->tile_cnt; i++ ) {
794 3 : if( FD_UNLIKELY( !strcmp( topo->tiles[ i ].name, name ) ) ) cnt++;
795 3 : }
796 3 : return cnt;
797 3 : }
798 :
799 : /* Finds the workspace of a given name in the topology. Returns
800 : ULONG_MAX if there is no such workspace. There can be at most one
801 : workspace of a given name. */
802 :
803 : FD_FN_PURE static inline ulong
804 : fd_topo_find_wksp( fd_topo_t const * topo,
805 66 : char const * name ) {
806 66 : for( ulong i=0; i<topo->wksp_cnt; i++ ) {
807 66 : if( FD_UNLIKELY( !strcmp( topo->workspaces[ i ].name, name ) ) ) return i;
808 66 : }
809 0 : return ULONG_MAX;
810 66 : }
811 :
812 : /* Find the tile of a given name and kind_id in the topology, there will
813 : be at most one such tile, since kind_id is unique among the name.
814 : Returns ULONG_MAX if there is no such tile. */
815 :
816 : FD_FN_PURE static inline ulong
817 : fd_topo_find_tile( fd_topo_t const * topo,
818 : char const * name,
819 21 : ulong kind_id ) {
820 21 : for( ulong i=0; i<topo->tile_cnt; i++ ) {
821 21 : if( FD_UNLIKELY( !strcmp( topo->tiles[ i ].name, name ) ) && topo->tiles[ i ].kind_id == kind_id ) return i;
822 21 : }
823 0 : return ULONG_MAX;
824 21 : }
825 :
826 : /* Find the link of a given name and kind_id in the topology, there will
827 : be at most one such link, since kind_id is unique among the name.
828 : Returns ULONG_MAX if there is no such link. */
829 :
830 : FD_FN_PURE static inline ulong
831 : fd_topo_find_link( fd_topo_t const * topo,
832 : char const * name,
833 18 : ulong kind_id ) {
834 39 : for( ulong i=0; i<topo->link_cnt; i++ ) {
835 39 : if( FD_UNLIKELY( !strcmp( topo->links[ i ].name, name ) ) && topo->links[ i ].kind_id == kind_id ) return i;
836 39 : }
837 0 : return ULONG_MAX;
838 18 : }
839 :
840 : FD_FN_PURE static inline ulong
841 : fd_topo_find_tile_in_link( fd_topo_t const * topo,
842 : fd_topo_tile_t const * tile,
843 : char const * name,
844 0 : ulong kind_id ) {
845 0 : for( ulong i=0; i<tile->in_cnt; i++ ) {
846 0 : if( FD_UNLIKELY( !strcmp( topo->links[ tile->in_link_id[ i ] ].name, name ) )
847 0 : && topo->links[ tile->in_link_id[ i ] ].kind_id == kind_id ) return i;
848 0 : }
849 0 : return ULONG_MAX;
850 0 : }
851 :
852 : FD_FN_PURE static inline ulong
853 : fd_topo_find_tile_out_link( fd_topo_t const * topo,
854 : fd_topo_tile_t const * tile,
855 : char const * name,
856 0 : ulong kind_id ) {
857 0 : for( ulong i=0; i<tile->out_cnt; i++ ) {
858 0 : if( FD_UNLIKELY( !strcmp( topo->links[ tile->out_link_id[ i ] ].name, name ) )
859 0 : && topo->links[ tile->out_link_id[ i ] ].kind_id == kind_id ) return i;
860 0 : }
861 0 : return ULONG_MAX;
862 0 : }
863 :
864 : /* Find the id of the tile which is a producer for the given link. If
865 : no tile is a producer for the link, returns ULONG_MAX. This should
866 : not be possible for a well formed and validated topology. */
867 : FD_FN_PURE static inline ulong
868 : fd_topo_find_link_producer( fd_topo_t const * topo,
869 0 : fd_topo_link_t const * link ) {
870 0 : for( ulong i=0; i<topo->tile_cnt; i++ ) {
871 0 : fd_topo_tile_t const * tile = &topo->tiles[ i ];
872 :
873 0 : for( ulong j=0; j<tile->out_cnt; j++ ) {
874 0 : if( FD_UNLIKELY( tile->out_link_id[ j ] == link->id ) ) return i;
875 0 : }
876 0 : }
877 0 : return ULONG_MAX;
878 0 : }
879 :
880 : /* Given a link, count the number of consumers of that link among all
881 : the tiles in the topology. */
882 : FD_FN_PURE static inline ulong
883 : fd_topo_link_consumer_cnt( fd_topo_t const * topo,
884 0 : fd_topo_link_t const * link ) {
885 0 : ulong cnt = 0;
886 0 : for( ulong i=0; i<topo->tile_cnt; i++ ) {
887 0 : fd_topo_tile_t const * tile = &topo->tiles[ i ];
888 0 : for( ulong j=0; j<tile->in_cnt; j++ ) {
889 0 : if( FD_UNLIKELY( tile->in_link_id[ j ] == link->id ) ) cnt++;
890 0 : }
891 0 : }
892 :
893 0 : return cnt;
894 0 : }
895 :
896 : /* Given a link, count the number of reliable consumers of that link
897 : among all the tiles in the topology. */
898 : FD_FN_PURE static inline ulong
899 : fd_topo_link_reliable_consumer_cnt( fd_topo_t const * topo,
900 0 : fd_topo_link_t const * link ) {
901 0 : ulong cnt = 0;
902 0 : for( ulong i=0; i<topo->tile_cnt; i++ ) {
903 0 : fd_topo_tile_t const * tile = &topo->tiles[ i ];
904 0 : for( ulong j=0; j<tile->in_cnt; j++ ) {
905 0 : if( FD_UNLIKELY( tile->in_link_id[ j ] == link->id && tile->in_link_reliable[ j ] ) ) cnt++;
906 0 : }
907 0 : }
908 :
909 0 : return cnt;
910 0 : }
911 :
912 : FD_FN_PURE static inline ulong
913 : fd_topo_tile_consumer_cnt( fd_topo_t const * topo,
914 0 : fd_topo_tile_t const * tile ) {
915 0 : (void)topo;
916 0 : return tile->out_cnt;
917 0 : }
918 :
919 : FD_FN_PURE static inline ulong
920 : fd_topo_tile_reliable_consumer_cnt( fd_topo_t const * topo,
921 0 : fd_topo_tile_t const * tile ) {
922 0 : ulong reliable_cons_cnt = 0UL;
923 0 : for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
924 0 : fd_topo_tile_t const * consumer_tile = &topo->tiles[ i ];
925 0 : for( ulong j=0UL; j<consumer_tile->in_cnt; j++ ) {
926 0 : for( ulong k=0UL; k<tile->out_cnt; k++ ) {
927 0 : if( FD_UNLIKELY( consumer_tile->in_link_id[ j ]==tile->out_link_id[ k ] && consumer_tile->in_link_reliable[ j ] ) ) {
928 0 : reliable_cons_cnt++;
929 0 : }
930 0 : }
931 0 : }
932 0 : }
933 0 : return reliable_cons_cnt;
934 0 : }
935 :
936 : FD_FN_PURE static inline ulong
937 : fd_topo_tile_producer_cnt( fd_topo_t const * topo,
938 0 : fd_topo_tile_t const * tile ) {
939 0 : (void)topo;
940 0 : ulong in_cnt = 0UL;
941 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
942 0 : if( FD_UNLIKELY( !tile->in_link_poll[ i ] ) ) continue;
943 0 : in_cnt++;
944 0 : }
945 0 : return in_cnt;
946 0 : }
947 :
948 : FD_FN_PURE FD_FN_UNUSED static ulong
949 : fd_topo_obj_cnt( fd_topo_t const * topo,
950 : char const * obj_type,
951 0 : char const * label ) {
952 0 : ulong cnt = 0UL;
953 0 : for( ulong i=0UL; i<topo->obj_cnt; i++ ) {
954 0 : fd_topo_obj_t const * obj = &topo->objs[ i ];
955 0 : if( strncmp( obj->name, obj_type, sizeof(obj->name) ) ) continue;
956 0 : if( label &&
957 0 : strncmp( obj->label, label, sizeof(obj->label) ) ) continue;
958 0 : cnt++;
959 0 : }
960 0 : return cnt;
961 0 : }
962 :
963 : FD_FN_PURE FD_FN_UNUSED static fd_topo_obj_t const *
964 : fd_topo_find_obj( fd_topo_t const * topo,
965 : char const * obj_type,
966 : char const * label,
967 0 : ulong label_idx ) {
968 0 : for( ulong i=0UL; i<topo->obj_cnt; i++ ) {
969 0 : fd_topo_obj_t const * obj = &topo->objs[ i ];
970 0 : if( strncmp( obj->name, obj_type, sizeof(obj->name) ) ) continue;
971 0 : if( label &&
972 0 : strncmp( obj->label, label, sizeof(obj->label) ) ) continue;
973 0 : if( label_idx != ULONG_MAX && obj->label_idx != label_idx ) continue;
974 0 : return obj;
975 0 : }
976 0 : return NULL;
977 0 : }
978 :
979 : FD_FN_PURE FD_FN_UNUSED static fd_topo_obj_t const *
980 : fd_topo_find_tile_obj( fd_topo_t const * topo,
981 : fd_topo_tile_t const * tile,
982 0 : char const * obj_type ) {
983 0 : for( ulong i=0UL; i<(tile->uses_obj_cnt); i++ ) {
984 0 : fd_topo_obj_t const * obj = &topo->objs[ tile->uses_obj_id[ i ] ];
985 0 : if( strncmp( obj->name, obj_type, sizeof(obj->name) ) ) continue;
986 0 : return obj;
987 0 : }
988 0 : return NULL;
989 0 : }
990 :
991 : /* Join (map into the process) all shared memory (huge/gigantic pages)
992 : needed by the tile, in the given topology. All memory associated
993 : with the tile (aka. used by links that the tile either produces to or
994 : consumes from, or used by the tile itself for its cnc) will be
995 : attached (mapped into the process).
996 :
997 : This is needed to play nicely with the sandbox. Once a process is
998 : sandboxed we can no longer map any memory. */
999 : void
1000 : fd_topo_join_tile_workspaces( fd_topo_t * topo,
1001 : fd_topo_tile_t * tile,
1002 : int core_dump_level );
1003 :
1004 : /* Join (map into the process) the shared memory (huge/gigantic pages)
1005 : for the given workspace. Mode is one of
1006 : FD_SHMEM_JOIN_MODE_READ_WRITE or FD_SHMEM_JOIN_MODE_READ_ONLY and
1007 : determines the prot argument that will be passed to mmap when mapping
1008 : the pages in (PROT_WRITE or PROT_READ respectively).
1009 :
1010 : Dump should be set to 1 if the workspace memory should be dumpable
1011 : when the process crashes, or 0 if not. */
1012 : void
1013 : fd_topo_join_workspace( fd_topo_t * topo,
1014 : fd_topo_wksp_t * wksp,
1015 : int mode,
1016 : int dump );
1017 :
1018 : /* Join (map into the process) all shared memory (huge/gigantic pages)
1019 : needed by all tiles in the topology. Mode is one of
1020 : FD_SHMEM_JOIN_MODE_READ_WRITE or FD_SHMEM_JOIN_MODE_READ_ONLY and
1021 : determines the prot argument that will be passed to mmap when
1022 : mapping the pages in (PROT_WRITE or PROT_READ respectively). */
1023 : void
1024 : fd_topo_join_workspaces( fd_topo_t * topo,
1025 : int mode,
1026 : int core_dump_level );
1027 :
1028 : /* Leave (unmap from the process) the shared memory needed for the
1029 : given workspace in the topology, if it was previously mapped.
1030 :
1031 : topo and wksp are assumed non-NULL. It is OK if the workspace
1032 : has not been previously joined, in which case this is a no-op. */
1033 :
1034 : void
1035 : fd_topo_leave_workspace( fd_topo_t * topo,
1036 : fd_topo_wksp_t * wksp );
1037 :
1038 : /* Leave (unmap from the process) all shared memory needed by all
1039 : tiles in the topology, if each of them was mapped.
1040 :
1041 : topo is assumed non-NULL. Only workspaces which were previously
1042 : joined are unmapped. */
1043 :
1044 : void
1045 : fd_topo_leave_workspaces( fd_topo_t * topo );
1046 :
1047 : /* Create the given workspace needed by the topology on the system.
1048 : This does not "join" the workspaces (map their memory into the
1049 : process), but only creates the .wksp file and formats it correctly
1050 : as a workspace.
1051 :
1052 : Returns 0 on success and -1 on failure, with errno set to the error.
1053 : The only reason for failure currently that will be returned is
1054 : ENOMEM, as other unexpected errors will cause the program to exit.
1055 :
1056 : If update_existing is 1, the workspace will not be created from
1057 : scratch but it will be assumed that it already exists from a prior
1058 : run and needs to be maybe resized and then have the header
1059 : structures reinitialized. This can save a very expensive operation
1060 : of zeroing all of the workspace pages. This is dangerous in
1061 : production because it can leave stray memory from prior runs around,
1062 : and should only be used in development environments. */
1063 :
1064 : int
1065 : fd_topo_create_workspace( fd_topo_t * topo,
1066 : fd_topo_wksp_t * wksp,
1067 : int update_existing );
1068 :
1069 : /* Join the standard IPC objects needed by the topology of this particular
1070 : tile */
1071 :
1072 : void
1073 : fd_topo_fill_tile( fd_topo_t * topo,
1074 : fd_topo_tile_t * tile );
1075 :
1076 : /* Same as fd_topo_fill_tile but fills in all the objects for a
1077 : particular workspace with the given mode. */
1078 : void
1079 : fd_topo_workspace_fill( fd_topo_t * topo,
1080 : fd_topo_wksp_t * wksp );
1081 :
1082 : /* Apply a new function to every object that is resident in the given
1083 : workspace in the topology. */
1084 :
1085 : void
1086 : fd_topo_wksp_new( fd_topo_t const * topo,
1087 : fd_topo_wksp_t const * wksp,
1088 : fd_topo_obj_callbacks_t ** callbacks );
1089 :
1090 : /* Same as fd_topo_fill_tile but fills in all tiles in the topology. */
1091 :
1092 : void
1093 : fd_topo_fill( fd_topo_t * topo );
1094 :
1095 : /* fd_topo_tile_stack_join joins a huge page optimized stack for the
1096 : provided tile. The stack is assumed to already exist at a known
1097 : path in the hugetlbfs mount. */
1098 :
1099 : void *
1100 : fd_topo_tile_stack_join( char const * app_name,
1101 : char const * tile_name,
1102 : ulong tile_kind_id );
1103 :
1104 : /* fd_topo_run_single_process runs all the tiles in a single process
1105 : (the calling process). This spawns a thread for each tile, switches
1106 : that thread to the given UID and GID and then runs the tile in it.
1107 : Each thread will never exit, as tiles are expected to run forever.
1108 : An error is logged and the application will exit if a tile exits.
1109 : The function itself does return after spawning all the threads.
1110 :
1111 : The threads will not be sandboxed in any way, except switching to the
1112 : provided UID and GID, so they will share the same address space, and
1113 : not have any seccomp restrictions or use any Linux namespaces. The
1114 : calling thread will also switch to the provided UID and GID before
1115 : it returns.
1116 :
1117 : In production, when running with an Agave child process this is
1118 : used for spawning certain tiles inside the Agave address space.
1119 : It's also useful for tooling and debugging, but is not how the main
1120 : production Firedancer process runs. For production, each tile is run
1121 : in its own address space with a separate process and full security
1122 : sandbox.
1123 :
1124 : The agave argument determines which tiles are started. If the
1125 : argument is 0 or 1, only non-agave (or only agave) tiles are started.
1126 : If the argument is any other value, all tiles in the topology are
1127 : started regardless of if they are Agave tiles or not. */
1128 :
1129 : void
1130 : fd_topo_run_single_process( fd_topo_t * topo,
1131 : int agave,
1132 : uint uid,
1133 : uint gid,
1134 : fd_topo_run_tile_t (* tile_run )( fd_topo_tile_t const * tile ) );
1135 :
1136 : /* fd_topo_run_tile runs the given tile directly within the current
1137 : process (and thread). The function will never return, as tiles are
1138 : expected to run forever. An error is logged and the application will
1139 : exit if the tile exits.
1140 :
1141 : The sandbox argument determines if the current process will be
1142 : sandboxed fully before starting the tile. The thread will switch to
1143 : the UID and GID provided before starting the tile, even if the thread
1144 : is not being sandboxed. Although POSIX specifies that all threads in
1145 : a process must share a UID and GID, this is not the case on Linux.
1146 : The thread will switch to the provided UID and GID without switching
1147 : the other threads in the process.
1148 :
1149 : If keep_controlling_terminal is set to 0, and the sandbox is enabled
1150 : the controlling terminal will be detached as an additional sandbox
1151 : measure, but you will not be able to send Ctrl+C or other signals
1152 : from the terminal. See fd_sandbox.h for more information.
1153 :
1154 : The allow_fd argument is only used if sandbox is true, and is a file
1155 : descriptor which will be allowed to exist in the process. Normally
1156 : the sandbox code rejects and aborts if there is an unexpected file
1157 : descriptor present on boot. This is helpful to allow a parent
1158 : process to be notified on termination of the tile by waiting for a
1159 : pipe file descriptor to get closed.
1160 :
1161 : wait and debugger are both used in debugging. If wait is non-NULL,
1162 : the runner will wait until the value pointed to by wait is non-zero
1163 : before launching the tile. Likewise, if debugger is non-NULL, the
1164 : runner will wait until a debugger is attached before setting the
1165 : value pointed to by debugger to non-zero. These are intended to be
1166 : used as a pair, where many tiles share a waiting reference, and then
1167 : one of the tiles (a tile you want to attach the debugger to) has the
1168 : same reference provided as the debugger, so all tiles will stop and
1169 : wait for the debugger to attach to it before proceeding. */
1170 :
1171 : void
1172 : fd_topo_run_tile( fd_topo_t * topo,
1173 : fd_topo_tile_t * tile,
1174 : int sandbox,
1175 : int keep_controlling_terminal,
1176 : int dumpable,
1177 : uint uid,
1178 : uint gid,
1179 : int allow_fd,
1180 : volatile int * wait,
1181 : volatile int * debugger,
1182 : fd_topo_run_tile_t * tile_run );
1183 :
1184 : /* This is for determining the value of RLIMIT_MLOCK that we need to
1185 : successfully run all tiles in separate processes. The value returned
1186 : is the maximum amount of memory that will be locked with mlock() by
1187 : any individual process in the tree. Specifically, if we have three
1188 : tile processes, and they each need to lock 5, 9, and 2 MiB of memory
1189 : respectively, RLIMIT_MLOCK needs to be 9 MiB to allow all three
1190 : process mlock() calls to succeed.
1191 :
1192 : Tiles lock memory in three ways. Any workspace they are using, they
1193 : lock the entire workspace. Then each tile uses huge pages for the
1194 : stack which are also locked, and finally some tiles use private
1195 : locked mmaps outside the workspace for storing key material. The
1196 : results here include all of this memory together.
1197 :
1198 : The result is not necessarily the amount of memory used by the tile
1199 : process, although it will be quite close. Tiles could potentially
1200 : allocate memory (eg, with brk) without needing to lock it, which
1201 : would not need to included, and some kernel memory that tiles cause
1202 : to be allocated (for example XSK buffers) is also not included. The
1203 : actual amount of memory used will not be less than this value. */
1204 : FD_FN_PURE ulong
1205 : fd_topo_mlock_max_tile( fd_topo_t const * topo );
1206 :
1207 : /* Same as fd_topo_mlock_max_tile, but for loading the entire topology
1208 : into one process, rather than a separate process per tile. This is
1209 : used, for example, by the configuration code when it creates all the
1210 : workspaces, or the monitor that maps the entire system into one
1211 : address space. */
1212 : FD_FN_PURE ulong
1213 : fd_topo_mlock( fd_topo_t const * topo );
1214 :
1215 : /* This returns the number of gigantic pages needed by the topology on
1216 : the provided numa node. It includes pages needed by the workspaces,
1217 : as well as additional allocations like huge pages for process stacks
1218 : and private key storage. */
1219 :
1220 : FD_FN_PURE ulong
1221 : fd_topo_gigantic_page_cnt( fd_topo_t const * topo,
1222 : ulong numa_idx );
1223 :
1224 : /* This returns the number of huge pages in the application needed by
1225 : the topology on the provided numa node. It includes pages needed by
1226 : things placed in the hugetlbfs (workspaces, process stacks). If
1227 : include_anonymous is true, it also includes anonymous hugepages which
1228 : are needed but are not placed in the hugetlbfs. */
1229 :
1230 : FD_FN_PURE ulong
1231 : fd_topo_huge_page_cnt( fd_topo_t const * topo,
1232 : ulong numa_idx,
1233 : int include_anonymous );
1234 :
1235 : /* Returns the number of normal (4 KiB) pages needed by the topology
1236 : for extra allocations like private key storage and XSK rings. */
1237 :
1238 : FD_FN_PURE ulong
1239 : fd_topo_normal_page_cnt( fd_topo_t const * topo );
1240 :
1241 : /* Prints a message describing the topology to an output stream. If
1242 : stdout is true, will be written to stdout, otherwise will be written
1243 : as a NOTICE log message to the log file. */
1244 : void
1245 : fd_topo_print_log( int stdout,
1246 : fd_topo_t * topo );
1247 :
1248 : FD_PROTOTYPES_END
1249 :
1250 : #endif /* HEADER_fd_src_disco_topo_fd_topo_h */
|