LCOV - code coverage report
Current view: top level - app/firedancer - topology.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 767 0.0 %
Date: 2025-08-05 05:04:49 Functions: 0 18 0.0 %

          Line data    Source code
       1             : #include "topology.h"
       2             : 
       3             : #include "../../discof/repair/fd_fec_chainer.h"
       4             : #include "../../discof/replay/fd_replay_notif.h"
       5             : #include "../../disco/net/fd_net_tile.h"
       6             : #include "../../disco/quic/fd_tpu.h"
       7             : #include "../../disco/tiles.h"
       8             : #include "../../disco/topo/fd_topob.h"
       9             : #include "../../disco/topo/fd_cpu_topo.h"
      10             : #include "../../util/pod/fd_pod_format.h"
      11             : #include "../../util/tile/fd_tile_private.h"
      12             : #include "../../discof/restore/utils/fd_ssmsg.h"
      13             : #include "../../flamenco/runtime/fd_runtime.h"
      14             : 
      15             : #include <sys/random.h>
      16             : #include <sys/types.h>
      17             : #include <sys/socket.h>
      18             : #include <netdb.h>
      19             : 
      20             : extern fd_topo_obj_callbacks_t * CALLBACKS[];
      21             : 
      22             : fd_topo_obj_t *
      23           0 : setup_topo_bank_hash_cmp( fd_topo_t * topo, char const * wksp_name ) {
      24           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "bh_cmp", wksp_name );
      25           0 :   return obj;
      26           0 : }
      27             : 
      28             : fd_topo_obj_t *
      29             : setup_topo_banks( fd_topo_t *  topo,
      30             :                   char const * wksp_name,
      31             :                   ulong        max_total_banks,
      32           0 :                   ulong        max_fork_width ) {
      33           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "banks", wksp_name );
      34           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_total_banks, "obj.%lu.max_total_banks", obj->id ) );
      35           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_fork_width, "obj.%lu.max_fork_width", obj->id ) );
      36           0 :   return obj;
      37           0 : }
      38             : 
      39             : static fd_topo_obj_t *
      40           0 : setup_topo_fec_sets( fd_topo_t * topo, char const * wksp_name, ulong sz ) {
      41           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "fec_sets", wksp_name );
      42           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, sz, "obj.%lu.sz",   obj->id ) );
      43           0 :   return obj;
      44           0 : }
      45             : 
      46             : fd_topo_obj_t *
      47             : setup_topo_funk( fd_topo_t *  topo,
      48             :                  char const * wksp_name,
      49             :                  ulong        max_account_records,
      50             :                  ulong        max_database_transactions,
      51             :                  ulong        heap_size_gib,
      52           0 :                  int          lock_pages ) {
      53           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "funk", wksp_name );
      54           0 :   FD_TEST( fd_pod_insert_ulong(  topo->props, "funk", obj->id ) );
      55           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_account_records,       "obj.%lu.rec_max",  obj->id ) );
      56           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_database_transactions, "obj.%lu.txn_max",  obj->id ) );
      57           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, heap_size_gib*(1UL<<30),   "obj.%lu.heap_max", obj->id ) );
      58           0 :   ulong funk_footprint = fd_funk_footprint( max_database_transactions, max_account_records );
      59           0 :   if( FD_UNLIKELY( !funk_footprint ) ) FD_LOG_ERR(( "Invalid [funk] parameters" ));
      60             : 
      61             :   /* Increase workspace partition count */
      62           0 :   ulong wksp_idx = fd_topo_find_wksp( topo, wksp_name );
      63           0 :   FD_TEST( wksp_idx!=ULONG_MAX );
      64           0 :   fd_topo_wksp_t * wksp = &topo->workspaces[ wksp_idx ];
      65           0 :   ulong part_max = fd_wksp_part_max_est( funk_footprint+(heap_size_gib*(1UL<<30)), 1U<<14U );
      66           0 :   if( FD_UNLIKELY( !part_max ) ) FD_LOG_ERR(( "fd_wksp_part_max_est(%lu,16KiB) failed", funk_footprint ));
      67           0 :   wksp->part_max += part_max;
      68           0 :   wksp->is_locked = lock_pages;
      69             : 
      70           0 :   return obj;
      71           0 : }
      72             : 
      73             : fd_topo_obj_t *
      74             : setup_topo_runtime_pub( fd_topo_t *  topo,
      75             :                         char const * wksp_name,
      76           0 :                         ulong        mem_max ) {
      77           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "runtime_pub", wksp_name );
      78           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, mem_max, "obj.%lu.mem_max",  obj->id ) );
      79           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, 12UL,    "obj.%lu.wksp_tag", obj->id ) );
      80           0 :   return obj;
      81           0 : }
      82             : 
      83             : fd_topo_obj_t *
      84             : setup_topo_store( fd_topo_t *  topo,
      85             :                   char const * wksp_name,
      86             :                   ulong        fec_max,
      87           0 :                   uint         part_cnt ) {
      88           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "store", wksp_name );
      89           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, fec_max,  "obj.%lu.fec_max",  obj->id ) );
      90           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, part_cnt, "obj.%lu.part_cnt", obj->id ) );
      91           0 :   return obj;
      92           0 : }
      93             : 
      94             : fd_topo_obj_t *
      95             : setup_topo_txncache( fd_topo_t *  topo,
      96             :                      char const * wksp_name,
      97             :                      ulong        max_rooted_slots,
      98             :                      ulong        max_live_slots,
      99           0 :                      ulong        max_txn_per_slot ) {
     100           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "txncache", wksp_name );
     101             : 
     102           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_rooted_slots, "obj.%lu.max_rooted_slots", obj->id ) );
     103           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_live_slots,   "obj.%lu.max_live_slots",   obj->id ) );
     104           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_txn_per_slot, "obj.%lu.max_txn_per_slot", obj->id ) );
     105             : 
     106           0 :   return obj;
     107           0 : }
     108             : 
     109             : static int
     110             : resolve_address( char const * address,
     111           0 :                  uint       * ip_addr ) {
     112           0 :   struct addrinfo hints = { .ai_family = AF_INET };
     113           0 :   struct addrinfo * res;
     114           0 :   int err = getaddrinfo( address, NULL, &hints, &res );
     115           0 :   if( FD_UNLIKELY( err ) ) {
     116           0 :     FD_LOG_WARNING(( "cannot resolve address \"%s\": %i-%s", address, err, gai_strerror( err ) ));
     117           0 :     return 0;
     118           0 :   }
     119             : 
     120           0 :   int resolved = 0;
     121           0 :   for( struct addrinfo * cur=res; cur; cur=cur->ai_next ) {
     122           0 :     if( FD_UNLIKELY( cur->ai_addr->sa_family!=AF_INET ) ) continue;
     123           0 :     struct sockaddr_in const * addr = (struct sockaddr_in const *)cur->ai_addr;
     124           0 :     *ip_addr = addr->sin_addr.s_addr;
     125           0 :     resolved = 1;
     126           0 :     break;
     127           0 :   }
     128             : 
     129           0 :   freeaddrinfo( res );
     130           0 :   return resolved;
     131           0 : }
     132             : 
     133             : static int
     134             : resolve_gossip_entrypoint( char const *    host_port,
     135           0 :                            fd_ip4_port_t * ip4_port ) {
     136             : 
     137             :   /* Split host:port */
     138             : 
     139           0 :   char const * colon = strrchr( host_port, ':' );
     140           0 :   if( FD_UNLIKELY( !colon ) ) {
     141           0 :     FD_LOG_ERR(( "invalid [gossip.entrypoints] entry \"%s\": no port number", host_port ));
     142           0 :   }
     143             : 
     144           0 :   char fqdn[ 255 ];
     145           0 :   ulong fqdn_len = (ulong)( colon-host_port );
     146           0 :   if( FD_UNLIKELY( fqdn_len>254 ) ) {
     147           0 :     FD_LOG_ERR(( "invalid [gossip.entrypoints] entry \"%s\": hostname too long", host_port ));
     148           0 :   }
     149           0 :   fd_memcpy( fqdn, host_port, fqdn_len );
     150           0 :   fqdn[ fqdn_len ] = '\0';
     151             : 
     152             :   /* Parse port number */
     153             : 
     154           0 :   char const * port_str = colon+1;
     155           0 :   char const * endptr   = NULL;
     156           0 :   ulong port = strtoul( port_str, (char **)&endptr, 10 );
     157           0 :   if( FD_UNLIKELY( !endptr || !port || port>USHORT_MAX || *endptr!='\0' ) ) {
     158           0 :     FD_LOG_ERR(( "invalid [gossip.entrypoints] entry \"%s\": invalid port number", host_port ));
     159           0 :   }
     160           0 :   ip4_port->port = (ushort)fd_ushort_bswap( (ushort)port );
     161             : 
     162             :   /* Resolve hostname */
     163           0 :   int resolved = resolve_address( fqdn, &ip4_port->addr );
     164           0 :   return resolved;
     165           0 : }
     166             : 
     167             : static void
     168           0 : resolve_gossip_entrypoints( config_t * config ) {
     169           0 :   ulong entrypoint_cnt = config->gossip.entrypoints_cnt;
     170           0 :   ulong resolved_entrypoints = 0UL;
     171           0 :   for( ulong j=0UL; j<entrypoint_cnt; j++ ) {
     172           0 :     if( resolve_gossip_entrypoint( config->gossip.entrypoints[j], &config->gossip.resolved_entrypoints[resolved_entrypoints] ) ) {
     173           0 :       resolved_entrypoints++;
     174           0 :     }
     175           0 :   }
     176           0 :   config->gossip.resolved_entrypoints_cnt = resolved_entrypoints;
     177           0 : }
     178             : 
     179             : static void
     180             : setup_snapshots( config_t *       config,
     181           0 :                  fd_topo_tile_t * tile ) {
     182           0 :   fd_memcpy( tile->snaprd.snapshots_path, config->paths.snapshots, PATH_MAX );
     183           0 :   fd_memcpy( tile->snaprd.cluster, config->firedancer.snapshots.cluster, sizeof(tile->snaprd.cluster) );
     184           0 :   tile->snaprd.incremental_snapshot_fetch   = config->firedancer.snapshots.incremental_snapshots;
     185           0 :   tile->snaprd.do_download                  = config->firedancer.snapshots.download;
     186           0 :   tile->snaprd.maximum_local_snapshot_age   = config->firedancer.snapshots.maximum_local_snapshot_age;
     187           0 :   tile->snaprd.minimum_download_speed_mib   = config->firedancer.snapshots.minimum_download_speed_mib;
     188           0 :   tile->snaprd.maximum_download_retry_abort = config->firedancer.snapshots.maximum_download_retry_abort;
     189             :   /* TODO: set up known validators and known validators cnt */
     190           0 : }
     191             : 
     192             : void
     193           0 : fd_topo_initialize( config_t * config ) {
     194           0 :   resolve_gossip_entrypoints( config );
     195             : 
     196           0 :   ulong net_tile_cnt    = config->layout.net_tile_count;
     197           0 :   ulong shred_tile_cnt  = config->layout.shred_tile_count;
     198           0 :   ulong quic_tile_cnt   = config->layout.quic_tile_count;
     199           0 :   ulong verify_tile_cnt = config->layout.verify_tile_count;
     200           0 :   ulong bank_tile_cnt   = config->layout.bank_tile_count;
     201           0 :   ulong exec_tile_cnt   = config->firedancer.layout.exec_tile_count;
     202           0 :   ulong writer_tile_cnt = config->firedancer.layout.writer_tile_count;
     203           0 :   ulong resolv_tile_cnt = config->layout.resolv_tile_count;
     204             : 
     205           0 :   int enable_rpc = ( config->rpc.port != 0 );
     206             : 
     207           0 :   fd_topo_t * topo = { fd_topob_new( &config->topo, config->name ) };
     208           0 :   topo->max_page_size = fd_cstr_to_shmem_page_sz( config->hugetlbfs.max_page_size );
     209           0 :   topo->gigantic_page_threshold = config->hugetlbfs.gigantic_page_threshold_mib << 20;
     210             : 
     211             :   /*             topo, name */
     212           0 :   fd_topob_wksp( topo, "metric_in"  );
     213           0 :   fd_topob_wksp( topo, "net_shred"  );
     214           0 :   fd_topob_wksp( topo, "net_gossip" );
     215           0 :   fd_topob_wksp( topo, "net_repair" );
     216           0 :   fd_topob_wksp( topo, "net_quic"   );
     217           0 :   fd_topob_wksp( topo, "net_send"   );
     218             : 
     219           0 :   fd_topob_wksp( topo, "quic_verify"  );
     220           0 :   fd_topob_wksp( topo, "verify_dedup" );
     221           0 :   fd_topob_wksp( topo, "dedup_pack"   );
     222             : 
     223             : //  fd_topob_wksp( topo, "dedup_resolv" );
     224           0 :   fd_topob_wksp( topo, "resolv_pack"  );
     225             : 
     226           0 :   fd_topob_wksp( topo, "shred_repair" );
     227           0 :   fd_topob_wksp( topo, "stake_out"    );
     228             : 
     229           0 :   fd_topob_wksp( topo, "poh_shred"    );
     230             : 
     231           0 :   fd_topob_wksp( topo, "shred_sign"   );
     232           0 :   fd_topob_wksp( topo, "sign_shred"   );
     233             : 
     234           0 :   fd_topob_wksp( topo, "gossip_sign"  );
     235           0 :   fd_topob_wksp( topo, "sign_gossip"  );
     236             : 
     237           0 :   fd_topob_wksp( topo, "replay_exec"  );
     238           0 :   fd_topob_wksp( topo, "exec_writer"  );
     239             : 
     240           0 :   fd_topob_wksp( topo, "send_sign"    );
     241           0 :   fd_topob_wksp( topo, "sign_send"    );
     242             : 
     243           0 :   fd_topob_wksp( topo, "crds_shred"   );
     244           0 :   fd_topob_wksp( topo, "gossip_repai" );
     245           0 :   fd_topob_wksp( topo, "gossip_verif" );
     246           0 :   fd_topob_wksp( topo, "gossip_tower" );
     247           0 :   fd_topob_wksp( topo, "replay_tower" );
     248             : 
     249           0 :   fd_topob_wksp( topo, "repair_sign"  );
     250           0 :   fd_topob_wksp( topo, "sign_repair"  );
     251             : 
     252           0 :   fd_topob_wksp( topo, "repair_repla" );
     253           0 :   fd_topob_wksp( topo, "replay_poh"   );
     254           0 :   fd_topob_wksp( topo, "bank_busy"    );
     255           0 :   fd_topob_wksp( topo, "tower_send"  );
     256           0 :   fd_topob_wksp( topo, "gossip_send"  );
     257           0 :   fd_topob_wksp( topo, "send_txns"    );
     258             : 
     259           0 :   fd_topob_wksp( topo, "quic"        );
     260           0 :   fd_topob_wksp( topo, "verify"      );
     261           0 :   fd_topob_wksp( topo, "dedup"       );
     262           0 :   fd_topob_wksp( topo, "shred"       );
     263           0 :   fd_topob_wksp( topo, "pack"        );
     264           0 :   fd_topob_wksp( topo, "resolv"      );
     265           0 :   fd_topob_wksp( topo, "sign"        );
     266           0 :   fd_topob_wksp( topo, "repair"      );
     267           0 :   fd_topob_wksp( topo, "gossip"      );
     268           0 :   fd_topob_wksp( topo, "metric"      );
     269           0 :   fd_topob_wksp( topo, "replay"      );
     270           0 :   fd_topob_wksp( topo, "runtime_pub" );
     271           0 :   fd_topob_wksp( topo, "banks"       );
     272           0 :   fd_topob_wksp( topo, "bh_cmp" );
     273           0 :   fd_topob_wksp( topo, "exec"        );
     274           0 :   fd_topob_wksp( topo, "writer"      );
     275           0 :   fd_topob_wksp( topo, "store"       );
     276           0 :   fd_topob_wksp( topo, "fec_sets"    );
     277           0 :   fd_topob_wksp( topo, "tcache"      );
     278           0 :   fd_topob_wksp( topo, "poh"         );
     279           0 :   fd_topob_wksp( topo, "send"        );
     280           0 :   fd_topob_wksp( topo, "tower"       );
     281           0 :   fd_topob_wksp( topo, "exec_spad"   );
     282           0 :   fd_topob_wksp( topo, "exec_fseq"   );
     283           0 :   fd_topob_wksp( topo, "writer_fseq" );
     284           0 :   fd_topob_wksp( topo, "funk" );
     285             : 
     286           0 :   fd_topob_wksp( topo, "snapdc" );
     287           0 :   fd_topob_wksp( topo, "snaprd" );
     288           0 :   fd_topob_wksp( topo, "snapin" );
     289           0 :   fd_topob_wksp( topo, "snapdc_rd" );
     290           0 :   fd_topob_wksp( topo, "snapin_rd" );
     291           0 :   fd_topob_wksp( topo, "snap_stream" );
     292           0 :   fd_topob_wksp( topo, "snap_zstd" );
     293           0 :   fd_topob_wksp( topo, "snap_out" );
     294           0 :   fd_topob_wksp( topo, "replay_manif" );
     295             : 
     296           0 :   fd_topob_wksp( topo, "slot_fseqs"  ); /* fseqs for marked slots eg. turbine slot */
     297           0 :   if( enable_rpc ) fd_topob_wksp( topo, "rpcsrv" );
     298             : 
     299           0 :   #define FOR(cnt) for( ulong i=0UL; i<cnt; i++ )
     300             : 
     301           0 :   ulong pending_fec_shreds_depth = fd_ulong_min( fd_ulong_pow2_up( config->tiles.shred.max_pending_shred_sets * FD_REEDSOL_DATA_SHREDS_MAX ), USHORT_MAX + 1 /* dcache max */ );
     302             : 
     303             :   /*                                  topo, link_name,      wksp_name,      depth,                                    mtu,                           burst */
     304           0 :   FOR(quic_tile_cnt)   fd_topob_link( topo, "quic_net",     "net_quic",     config->net.ingress_buffer_size,          FD_NET_MTU,                    1UL );
     305           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "shred_net",    "net_shred",    config->net.ingress_buffer_size,          FD_NET_MTU,                    1UL );
     306           0 :   FOR(quic_tile_cnt)   fd_topob_link( topo, "quic_verify",  "quic_verify",  config->tiles.verify.receive_buffer_size, FD_TPU_REASM_MTU,              config->tiles.quic.txn_reassembly_count );
     307           0 :   FOR(verify_tile_cnt) fd_topob_link( topo, "verify_dedup", "verify_dedup", config->tiles.verify.receive_buffer_size, FD_TPU_PARSED_MTU,             1UL );
     308           0 :   /**/                 fd_topob_link( topo, "dedup_pack",   "dedup_pack",   config->tiles.verify.receive_buffer_size, FD_TPU_PARSED_MTU,             1UL );
     309             : 
     310           0 :   /**/                 fd_topob_link( topo, "stake_out",    "stake_out",    128UL,                                    FD_STAKE_OUT_MTU,              1UL );
     311             : 
     312           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "shred_sign",   "shred_sign",   128UL,                                    32UL,                          1UL );
     313           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "sign_shred",   "sign_shred",   128UL,                                    64UL,                          1UL );
     314             : 
     315             :   /**/                 fd_topob_link( topo, "gossip_sign",  "gossip_sign",  128UL,                                    2048UL,                        1UL );
     316           0 :   /**/                 fd_topob_link( topo, "sign_gossip",  "sign_gossip",  128UL,                                    64UL,                          1UL );
     317             : 
     318             : //  /**/                 fd_topob_link( topo, "dedup_resolv", "dedup_resolv", 65536UL,                                  FD_TPU_PARSED_MTU,             1UL );
     319           0 :   FOR(resolv_tile_cnt) fd_topob_link( topo, "resolv_pack",  "resolv_pack",  65536UL,                                  FD_TPU_RESOLVED_MTU,           1UL );
     320             : 
     321             :   /* TODO: The MTU is currently relatively arbitrary and needs to be resized to the size of the largest
     322             :      message that is outbound from the replay to exec. */
     323           0 :   FOR(exec_tile_cnt)   fd_topob_link( topo, "replay_exec",  "replay_exec",  128UL,                                    10240UL,                       exec_tile_cnt );
     324             :   /* Assuming the number of writer tiles is sufficient to keep up with
     325             :      the number of exec tiles, under equilibrium, we should have at least
     326             :      enough link space to buffer worst case input shuffling done by the
     327             :      stem.  That is, when a link is so unlucky, that the stem RNG decided
     328             :      to process every other link except this one, for all writer tiles.
     329             :      This would be fd_ulong_pow2_up( exec_tile_cnt*writer_tile_cnt+1UL ).
     330             : 
     331             :      This is all assuming we have true pipelining between exec and writer
     332             :      tiles.  Right now, we don't.  So in reality there can be at most 1
     333             :      in-flight transaction per exec tile, and hence a depth of 1 is in
     334             :      theory sufficient for each exec_writer link. */
     335             : 
     336           0 :   FOR(exec_tile_cnt)   fd_topob_link( topo, "exec_writer",  "exec_writer",  128UL,                                    FD_EXEC_WRITER_MTU,            1UL );
     337             : 
     338           0 :   /**/                 fd_topob_link( topo, "gossip_verif", "gossip_verif", config->tiles.verify.receive_buffer_size, FD_TPU_MTU,                    1UL );
     339           0 :   /**/                 fd_topob_link( topo, "gossip_tower", "gossip_tower", 128UL,                                    FD_TPU_MTU,                    1UL );
     340           0 :   /**/                 fd_topob_link( topo, "replay_tower", "replay_tower", 128UL,                                    65536UL,                       1UL );
     341           0 :   /**/                 fd_topob_link( topo, "tower_replay", "replay_tower", 128UL,                                    0,                             1UL );
     342             : 
     343             :   /**/                 fd_topob_link( topo, "crds_shred",   "crds_shred",   128UL,                                    8UL  + 40200UL * 38UL,         1UL );
     344           0 :   /**/                 fd_topob_link( topo, "gossip_repai", "gossip_repai", 128UL,                                    40200UL * 38UL, 1UL );
     345           0 :   /**/                 fd_topob_link( topo, "gossip_send",  "gossip_send",  128UL,                                    40200UL * 38UL, 1UL );
     346             : 
     347           0 :   /**/                 fd_topob_link( topo, "gossip_net",   "net_gossip",   config->net.ingress_buffer_size,          FD_NET_MTU,                    1UL );
     348           0 :   /**/                 fd_topob_link( topo, "send_net",     "net_send",     config->net.ingress_buffer_size,          FD_NET_MTU,                    2UL );
     349             : 
     350           0 :   /**/                 fd_topob_link( topo, "repair_net",   "net_repair",   config->net.ingress_buffer_size,          FD_NET_MTU,                    1UL );
     351           0 :   /**/                 fd_topob_link( topo, "repair_sign",  "repair_sign",  128UL,                                    2048UL,                        1UL );
     352           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "shred_repair", "shred_repair", pending_fec_shreds_depth,                 FD_SHRED_REPAIR_MTU,           2UL /* at most 2 msgs per after_frag */ );
     353             : 
     354           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "repair_shred", "shred_repair", pending_fec_shreds_depth,                 sizeof(fd_ed25519_sig_t),                                    1UL );
     355           0 :   /**/                 fd_topob_link( topo, "sign_repair",  "sign_repair",  128UL,                                    64UL,                                                        1UL );
     356           0 :   /**/                 fd_topob_link( topo, "repair_repla", "repair_repla", 65536UL,                                  sizeof(fd_fec_out_t),                                        1UL );
     357           0 :   /**/                 fd_topob_link( topo, "poh_shred",    "poh_shred",    16384UL,                                  USHORT_MAX,                                                  1UL );
     358           0 :   /**/                 fd_topob_link( topo, "poh_pack",     "replay_poh",   128UL,                                    sizeof(fd_became_leader_t) ,                                 1UL );
     359           0 :   FOR(bank_tile_cnt)   fd_topob_link( topo, "replay_poh",   "replay_poh",   128UL,                                    (4096UL*sizeof(fd_txn_p_t))+sizeof(fd_microblock_trailer_t), 1UL );
     360             : 
     361             :   /**/                 fd_topob_link( topo, "tower_send",   "tower_send",   65536UL,                                  sizeof(fd_txn_p_t),            1UL   );
     362           0 :   /**/                 fd_topob_link( topo, "send_txns",    "send_txns",    128UL,                                    FD_TXN_MTU,                    1UL   );
     363           0 :   /**/                 fd_topob_link( topo, "send_sign",    "send_sign",    128UL,                                    FD_TXN_MTU,                    1UL   );
     364           0 :   /**/                 fd_topob_link( topo, "sign_send",    "sign_send",    128UL,                                    64UL,                          1UL   );
     365             : 
     366           0 :   FD_TEST( sizeof(fd_snapshot_manifest_t)<=(5UL*(1UL<<30UL)) );
     367           0 :   /**/                 fd_topob_link( topo, "snap_zstd",    "snap_zstd",    8192UL,                                   16384UL,                       1UL );
     368           0 :   /**/                 fd_topob_link( topo, "snap_stream",  "snap_stream",  2048UL,                                   USHORT_MAX,                    1UL );
     369           0 :   /**/                 fd_topob_link( topo, "snap_out",     "snap_out",     2UL,                                      5UL*(1UL<<30UL),               1UL );
     370           0 :   /**/                 fd_topob_link( topo, "snapdc_rd",    "snapdc_rd",    128UL,                                    0UL,                           1UL );
     371           0 :   /**/                 fd_topob_link( topo, "snapin_rd",    "snapin_rd",    128UL,                                    0UL,                           1UL );
     372             : 
     373             :   /* Replay decoded manifest dcache topo obj */
     374           0 :   fd_topo_obj_t * replay_manifest_dcache = fd_topob_obj( topo, "dcache", "replay_manif" );
     375           0 :   fd_pod_insertf_ulong( topo->props, 2UL << 30UL, "obj.%lu.data_sz", replay_manifest_dcache->id );
     376           0 :   fd_pod_insert_ulong(  topo->props, "manifest_dcache", replay_manifest_dcache->id );
     377             : 
     378           0 :   ushort parsed_tile_to_cpu[ FD_TILE_MAX ];
     379             :   /* Unassigned tiles will be floating, unless auto topology is enabled. */
     380           0 :   for( ulong i=0UL; i<FD_TILE_MAX; i++ ) parsed_tile_to_cpu[ i ] = USHORT_MAX;
     381             : 
     382           0 :   int is_auto_affinity = !strcmp( config->layout.affinity, "auto" );
     383           0 :   int is_bench_auto_affinity = !strcmp( config->development.bench.affinity, "auto" );
     384             : 
     385           0 :   if( FD_UNLIKELY( is_auto_affinity != is_bench_auto_affinity ) ) {
     386           0 :     FD_LOG_ERR(( "The CPU affinity string in the configuration file under [layout.affinity] and [development.bench.affinity] must all be set to 'auto' or all be set to a specific CPU affinity string." ));
     387           0 :   }
     388             : 
     389           0 :   fd_topo_cpus_t cpus[1];
     390           0 :   fd_topo_cpus_init( cpus );
     391             : 
     392           0 :   ulong affinity_tile_cnt = 0UL;
     393           0 :   if( FD_LIKELY( !is_auto_affinity ) ) affinity_tile_cnt = fd_tile_private_cpus_parse( config->layout.affinity, parsed_tile_to_cpu );
     394             : 
     395           0 :   ulong tile_to_cpu[ FD_TILE_MAX ] = {0};
     396           0 :   for( ulong i=0UL; i<affinity_tile_cnt; i++ ) {
     397           0 :     if( FD_UNLIKELY( parsed_tile_to_cpu[ i ]!=USHORT_MAX && parsed_tile_to_cpu[ i ]>=cpus->cpu_cnt ) )
     398           0 :       FD_LOG_ERR(( "The CPU affinity string in the configuration file under [layout.affinity] specifies a CPU index of %hu, but the system "
     399           0 :                   "only has %lu CPUs. You should either change the CPU allocations in the affinity string, or increase the number of CPUs "
     400           0 :                   "in the system.",
     401           0 :                   parsed_tile_to_cpu[ i ], cpus->cpu_cnt ));
     402           0 :     tile_to_cpu[ i ] = fd_ulong_if( parsed_tile_to_cpu[ i ]==USHORT_MAX, ULONG_MAX, (ulong)parsed_tile_to_cpu[ i ] );
     403           0 :   }
     404             : 
     405           0 :   fd_topos_net_tiles( topo, config->layout.net_tile_count, &config->net, config->tiles.netlink.max_routes, config->tiles.netlink.max_peer_routes, config->tiles.netlink.max_neighbors, tile_to_cpu );
     406             : 
     407           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_gossip", i, config->net.ingress_buffer_size );
     408           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_repair", i, config->net.ingress_buffer_size );
     409           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_quic",   i, config->net.ingress_buffer_size );
     410           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_shred",  i, config->net.ingress_buffer_size );
     411           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_send",   i, config->net.ingress_buffer_size );
     412             : 
     413             :   /*                                              topo, tile_name, tile_wksp, metrics_wksp, cpu_idx,                       is_agave, uses_keyswitch */
     414           0 :   FOR(quic_tile_cnt)               fd_topob_tile( topo, "quic",    "quic",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     415           0 :   FOR(verify_tile_cnt)             fd_topob_tile( topo, "verify",  "verify",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     416           0 :   /**/                             fd_topob_tile( topo, "dedup",   "dedup",   "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     417           0 :   FOR(resolv_tile_cnt)             fd_topob_tile( topo, "resolv",  "resolv",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 1,        0 );
     418           0 :   FOR(shred_tile_cnt)              fd_topob_tile( topo, "shred",   "shred",   "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        1 );
     419           0 :   /**/                             fd_topob_tile( topo, "sign",    "sign",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        1 );
     420           0 :   /**/                             fd_topob_tile( topo, "metric",  "metric",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     421           0 :   fd_topo_tile_t * pack_tile =     fd_topob_tile( topo, "pack",    "pack",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     422           0 :   /**/                             fd_topob_tile( topo, "poh",     "poh",     "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,          1 );
     423           0 :   /**/                             fd_topob_tile( topo, "gossip",  "gossip",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     424           0 :   fd_topo_tile_t * repair_tile =   fd_topob_tile( topo, "repair",  "repair",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     425           0 :   /**/                             fd_topob_tile( topo, "send",    "send",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     426             : 
     427           0 :   fd_topo_tile_t * replay_tile =   fd_topob_tile( topo, "replay",  "replay",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     428           0 :   FOR(exec_tile_cnt)               fd_topob_tile( topo, "exec",    "exec",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     429           0 :   /**/                             fd_topob_tile( topo, "tower",   "tower",   "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     430           0 :   FOR(writer_tile_cnt)             fd_topob_tile( topo, "writer",  "writer",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     431             : 
     432           0 :   fd_topo_tile_t * rpcserv_tile = NULL;
     433           0 :   if( enable_rpc ) rpcserv_tile =  fd_topob_tile( topo, "rpcsrv",  "rpcsrv",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        1 );
     434             : 
     435           0 :   fd_topo_tile_t * snaprd_tile = fd_topob_tile( topo, "snaprd", "snaprd", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     436           0 :   snaprd_tile->allow_shutdown = 1;
     437           0 :   fd_topo_tile_t * snapdc_tile = fd_topob_tile( topo, "snapdc", "snapdc", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     438           0 :   snapdc_tile->allow_shutdown = 1;
     439           0 :   fd_topo_tile_t * snapin_tile = fd_topob_tile( topo, "snapin", "snapin", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     440           0 :   snapin_tile->allow_shutdown = 1;
     441             : 
     442             :   /* Database cache */
     443             : 
     444           0 :   fd_topo_obj_t * funk_obj = setup_topo_funk( topo, "funk",
     445           0 :       config->firedancer.funk.max_account_records,
     446           0 :       config->firedancer.funk.max_database_transactions,
     447           0 :       config->firedancer.funk.heap_size_gib,
     448           0 :       config->firedancer.funk.lock_pages );
     449             : 
     450           0 :   FOR(exec_tile_cnt)   fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], funk_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     451           0 :   /*                */ fd_topob_tile_uses( topo, replay_tile,  funk_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     452           0 :   if(rpcserv_tile)     fd_topob_tile_uses( topo, rpcserv_tile, funk_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     453           0 :   FOR(writer_tile_cnt) fd_topob_tile_uses( topo,  &topo->tiles[ fd_topo_find_tile( topo, "writer", i ) ], funk_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     454             : 
     455             :   /* Setup a shared wksp object for banks. */
     456             : 
     457           0 :   fd_topo_obj_t * banks_obj = setup_topo_banks( topo, "banks", config->firedancer.runtime.limits.max_total_banks, config->firedancer.runtime.limits.max_fork_width );
     458           0 :   fd_topob_tile_uses( topo, replay_tile, banks_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     459           0 :   FOR(exec_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], banks_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     460           0 :   FOR(writer_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "writer", i ) ], banks_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     461           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, banks_obj->id, "banks" ) );
     462             : 
     463             :   /* Setup a shared wksp object for bank hash cmp. */
     464             : 
     465           0 :   fd_topo_obj_t * bank_hash_cmp_obj = setup_topo_bank_hash_cmp( topo, "bh_cmp" );
     466           0 :   fd_topob_tile_uses( topo, replay_tile, bank_hash_cmp_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     467           0 :   FOR(exec_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], bank_hash_cmp_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     468           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, bank_hash_cmp_obj->id, "bh_cmp" ) );
     469             : 
     470             :   /* Setup a shared wksp object for fec sets. */
     471             : 
     472           0 :   ulong shred_depth = 65536UL; /* from fdctl/topology.c shred_store link. MAKE SURE TO KEEP IN SYNC. */
     473           0 :   ulong fec_set_cnt = shred_depth + config->tiles.shred.max_pending_shred_sets + 4UL;
     474           0 :   ulong fec_sets_sz = fec_set_cnt*sizeof(fd_shred34_t)*4; /* mirrors # of dcache entires in frankendancer */
     475           0 :   fd_topo_obj_t * fec_sets_obj = setup_topo_fec_sets( topo, "fec_sets", shred_tile_cnt*fec_sets_sz );
     476           0 :   for( ulong i=0UL; i<shred_tile_cnt; i++ ) {
     477           0 :     fd_topo_tile_t * shred_tile = &topo->tiles[ fd_topo_find_tile( topo, "shred", i ) ];
     478           0 :     fd_topob_tile_uses( topo, shred_tile,  fec_sets_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     479           0 :   }
     480           0 :   fd_topob_tile_uses( topo, repair_tile, fec_sets_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     481           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, fec_sets_obj->id, "fec_sets" ) );
     482             : 
     483             :   /* Setup a shared wksp object for runtime pub. */
     484             : 
     485           0 :   fd_topo_obj_t * runtime_pub_obj = setup_topo_runtime_pub( topo, "runtime_pub", config->firedancer.runtime.heap_size_gib<<30 );
     486             : 
     487           0 :   fd_topob_tile_uses( topo, replay_tile, runtime_pub_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     488           0 :   fd_topob_tile_uses( topo, pack_tile,   runtime_pub_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     489           0 :   FOR(exec_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], runtime_pub_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     490           0 :   FOR(writer_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "writer", i ) ], runtime_pub_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     491           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, runtime_pub_obj->id, "runtime_pub" ) );
     492             : 
     493             :   /* Setup a shared wksp object for store. */
     494             : 
     495           0 :   fd_topo_obj_t * store_obj = setup_topo_store( topo, "store", config->firedancer.store.max_completed_shred_sets, (uint)shred_tile_cnt );
     496           0 :   FOR(shred_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "shred", i ) ], store_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     497           0 :   fd_topob_tile_uses( topo, replay_tile, store_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     498           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, store_obj->id, "store" ) );
     499             : 
     500             :   /* Create a txncache to be used by replay. */
     501           0 :   fd_topo_obj_t * txncache_obj = setup_topo_txncache( topo, "tcache",
     502           0 :       config->firedancer.runtime.limits.max_rooted_slots,
     503           0 :       config->firedancer.runtime.limits.max_live_slots,
     504           0 :       config->firedancer.runtime.limits.max_transactions_per_slot );
     505           0 :   fd_topob_tile_uses( topo, replay_tile, txncache_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     506           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, txncache_obj->id, "txncache" ) );
     507             : 
     508           0 :   for( ulong i=0UL; i<bank_tile_cnt; i++ ) {
     509           0 :     fd_topo_obj_t * busy_obj = fd_topob_obj( topo, "fseq", "bank_busy" );
     510           0 :     fd_topob_tile_uses( topo, replay_tile, busy_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     511           0 :     fd_topob_tile_uses( topo, pack_tile, busy_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     512           0 :     FD_TEST( fd_pod_insertf_ulong( topo->props, busy_obj->id, "bank_busy.%lu", i ) );
     513           0 :   }
     514             : 
     515           0 :   for( ulong i=0UL; i<exec_tile_cnt; i++ ) {
     516           0 :     fd_topo_obj_t * exec_spad_obj = fd_topob_obj( topo, "exec_spad", "exec_spad" );
     517           0 :     fd_topob_tile_uses( topo, replay_tile, exec_spad_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     518           0 :     fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], exec_spad_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     519           0 :     for( ulong j=0UL; j<writer_tile_cnt; j++ ) {
     520             :       /* For txn_ctx. */
     521           0 :       fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "writer", j ) ], exec_spad_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     522           0 :     }
     523           0 :     FD_TEST( fd_pod_insertf_ulong( topo->props, exec_spad_obj->id, "exec_spad.%lu", i ) );
     524           0 :   }
     525             : 
     526           0 :   for( ulong i=0UL; i<exec_tile_cnt; i++ ) {
     527           0 :     fd_topo_obj_t * exec_fseq_obj = fd_topob_obj( topo, "fseq", "exec_fseq" );
     528           0 :     fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], exec_fseq_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     529           0 :     fd_topob_tile_uses( topo, replay_tile, exec_fseq_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     530           0 :     FD_TEST( fd_pod_insertf_ulong( topo->props, exec_fseq_obj->id, "exec_fseq.%lu", i ) );
     531           0 :   }
     532             : 
     533           0 :   for( ulong i=0UL; i<writer_tile_cnt; i++ ) {
     534           0 :     fd_topo_obj_t * writer_fseq_obj = fd_topob_obj( topo, "fseq", "writer_fseq" );
     535           0 :     fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "writer", i ) ], writer_fseq_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     536           0 :     fd_topob_tile_uses( topo, replay_tile, writer_fseq_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     537           0 :     FD_TEST( fd_pod_insertf_ulong( topo->props, writer_fseq_obj->id, "writer_fseq.%lu", i ) );
     538           0 :   }
     539             : 
     540           0 :   fd_topob_tile_uses( topo, snapin_tile, funk_obj,               FD_SHMEM_JOIN_MODE_READ_WRITE );
     541           0 :   fd_topob_tile_uses( topo, snapin_tile, runtime_pub_obj,        FD_SHMEM_JOIN_MODE_READ_WRITE );
     542           0 :   fd_topob_tile_uses( topo, snapin_tile, replay_manifest_dcache, FD_SHMEM_JOIN_MODE_READ_WRITE );
     543           0 :   fd_topob_tile_uses( topo, replay_tile, replay_manifest_dcache, FD_SHMEM_JOIN_MODE_READ_ONLY );
     544             : 
     545             :   /* There's another special fseq that's used to communicate the shred
     546             :     version from the Agave boot path to the shred tile. */
     547           0 :   fd_topo_obj_t * poh_shred_obj = fd_topob_obj( topo, "fseq", "poh_shred" );
     548           0 :   fd_topo_tile_t * poh_tile = &topo->tiles[ fd_topo_find_tile( topo, "gossip", 0UL ) ];
     549           0 :   fd_topob_tile_uses( topo, poh_tile, poh_shred_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     550             : 
     551             :   /* root_slot is an fseq marking the validator's current Tower root. */
     552             : 
     553           0 :   fd_topo_obj_t * root_slot_obj = fd_topob_obj( topo, "fseq", "slot_fseqs" );
     554           0 :   fd_topob_tile_uses( topo, replay_tile, root_slot_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     555           0 :   fd_topob_tile_uses( topo, repair_tile, root_slot_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     556           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, root_slot_obj->id, "root_slot" ) );
     557             : 
     558             :   /* turbine_slot0 is an fseq marking the slot number of the first shred
     559             :      we observed from Turbine.  This is a useful heuristic for
     560             :      determining when replay has progressed past the slot in which we
     561             :      last voted.  The idea is once replay has proceeded past the slot
     562             :      from which validator stopped replaying and therefore also stopped
     563             :      voting (crashed, shutdown, etc.), it will have "read-back" its
     564             :      latest tower in the ledger.  Note this logic is not true in the
     565             :      case our latest tower vote was for a minority fork. */
     566             : 
     567           0 :   fd_topo_obj_t * turbine_slot0_obj = fd_topob_obj( topo, "fseq", "slot_fseqs" );
     568           0 :   fd_topob_tile_uses( topo, repair_tile, turbine_slot0_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     569           0 :   fd_topob_tile_uses( topo, replay_tile, turbine_slot0_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     570           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, turbine_slot0_obj->id, "turbine_slot0" ) );
     571             : 
     572             :   /* turbine_slot is an fseq marking the highest slot we've observed on
     573             :      a shred.  This is continuously updated as the validator is running
     574             :      and is used to determine whether the validator is caught up with
     575             :      the rest of the cluster. */
     576             : 
     577           0 :   fd_topo_obj_t * turbine_slot_obj = fd_topob_obj( topo, "fseq", "slot_fseqs" );
     578           0 :   fd_topob_tile_uses( topo, repair_tile, turbine_slot_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     579           0 :   fd_topob_tile_uses( topo, replay_tile, turbine_slot_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     580           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, turbine_slot_obj->id, "turbine_slot" ) );
     581             : 
     582           0 :   for( ulong i=0UL; i<shred_tile_cnt; i++ ) {
     583           0 :     fd_topo_tile_t * shred_tile = &topo->tiles[ fd_topo_find_tile( topo, "shred", i ) ];
     584           0 :     fd_topob_tile_uses( topo, shred_tile, poh_shred_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     585           0 :   }
     586           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, poh_shred_obj->id, "poh_shred" ) );
     587             : 
     588           0 :   if( FD_LIKELY( !is_auto_affinity ) ) {
     589           0 :     if( FD_UNLIKELY( affinity_tile_cnt<topo->tile_cnt ) )
     590           0 :       FD_LOG_ERR(( "The topology you are using has %lu tiles, but the CPU affinity specified in the config tile as [layout.affinity] only provides for %lu cores. "
     591           0 :                   "You should either increase the number of cores dedicated to Firedancer in the affinity string, or decrease the number of cores needed by reducing "
     592           0 :                   "the total tile count. You can reduce the tile count by decreasing individual tile counts in the [layout] section of the configuration file.",
     593           0 :                   topo->tile_cnt, affinity_tile_cnt ));
     594           0 :     if( FD_UNLIKELY( affinity_tile_cnt>topo->tile_cnt ) )
     595           0 :       FD_LOG_WARNING(( "The topology you are using has %lu tiles, but the CPU affinity specified in the config tile as [layout.affinity] provides for %lu cores. "
     596           0 :                       "Not all cores in the affinity will be used by Firedancer. You may wish to increase the number of tiles in the system by increasing "
     597           0 :                       "individual tile counts in the [layout] section of the configuration file.",
     598           0 :                       topo->tile_cnt, affinity_tile_cnt ));
     599           0 :   }
     600             : 
     601             :   /*                                      topo, tile_name, tile_kind_id, fseq_wksp,   link_name,      link_kind_id, reliable,            polled */
     602           0 :   for( ulong j=0UL; j<shred_tile_cnt; j++ )
     603           0 :                   fd_topos_tile_in_net(  topo,                          "metric_in", "shred_net",    j,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     604           0 :   for( ulong j=0UL; j<quic_tile_cnt; j++ )
     605           0 :                   fd_topos_tile_in_net(  topo,                          "metric_in", "quic_net",     j,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     606           0 :   FOR(quic_tile_cnt) for( ulong j=0UL; j<net_tile_cnt; j++ )
     607           0 :                       fd_topob_tile_in(  topo, "quic",    i,            "metric_in", "net_quic",     j,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     608           0 :   FOR(quic_tile_cnt)   fd_topob_tile_out( topo, "quic",    i,                         "quic_verify",  i                                                  );
     609           0 :   FOR(quic_tile_cnt)   fd_topob_tile_out( topo, "quic",    i,                         "quic_net",     i                                                  );
     610             :   /* All verify tiles read from all QUIC tiles, packets are round robin. */
     611           0 :   FOR(verify_tile_cnt) for( ulong j=0UL; j<quic_tile_cnt; j++ )
     612           0 :                       fd_topob_tile_in(  topo, "verify",  i,            "metric_in", "quic_verify",   j,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers, verify tiles may be overrun */
     613           0 :   FOR(verify_tile_cnt) fd_topob_tile_out( topo, "verify",  i,                         "verify_dedup", i                                                  );
     614           0 :   FOR(verify_tile_cnt) fd_topob_tile_in(  topo, "verify",  i,            "metric_in", "gossip_verif", 0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     615           0 :   /**/                 fd_topob_tile_in(  topo, "gossip",  0UL,          "metric_in", "send_txns",    0UL,          FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     616           0 :   /**/                 fd_topob_tile_in(  topo, "verify",  0UL,          "metric_in", "send_txns",    0UL,          FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     617           0 :   FOR(verify_tile_cnt) fd_topob_tile_in(  topo, "dedup",   0UL,          "metric_in", "verify_dedup", i,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     618           0 :   /**/                 fd_topob_tile_out( topo, "dedup",   0UL,                       "dedup_pack",   0UL                                                );
     619             : //  FOR(resolv_tile_cnt) fd_topob_tile_in(  topo, "resolv",  i,            "metric_in", "dedup_resolv", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     620             : //  FOR(resolv_tile_cnt) fd_topob_tile_in(  topo, "resolv",  i,            "metric_in", "replay_resol", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     621           0 :   FOR(resolv_tile_cnt) fd_topob_tile_out( topo, "resolv",  i,                         "resolv_pack",  i                                                  );
     622           0 :   /**/                 fd_topob_tile_in(  topo, "pack",    0UL,          "metric_in", "resolv_pack",  0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     623             : 
     624           0 :   /**/             fd_topos_tile_in_net(  topo,                          "metric_in", "gossip_net",   0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     625           0 :   /**/             fd_topos_tile_in_net(  topo,                          "metric_in", "repair_net",   0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     626           0 :   /**/             fd_topos_tile_in_net(  topo,                          "metric_in", "send_net",     0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     627             : 
     628           0 :   FOR(shred_tile_cnt) for( ulong j=0UL; j<net_tile_cnt; j++ )
     629           0 :                       fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "net_shred",     j,            FD_TOPOB_UNRELIABLE,   FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     630           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "poh_shred",     0UL,          FD_TOPOB_RELIABLE,     FD_TOPOB_POLLED );
     631           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "stake_out",     0UL,          FD_TOPOB_RELIABLE,     FD_TOPOB_POLLED );
     632           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "crds_shred",    0UL,          FD_TOPOB_RELIABLE,     FD_TOPOB_POLLED );
     633           0 :   FOR(shred_tile_cnt)  fd_topob_tile_out( topo, "shred",  i,                          "shred_repair",  i                                                    );
     634           0 :   FOR(shred_tile_cnt)  fd_topob_tile_out( topo, "shred",  i,                          "shred_net",     i                                                    );
     635             : 
     636           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "shred",  i,             "metric_in",  "repair_shred", i,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     637             : 
     638             :   /**/                 fd_topob_tile_out( topo, "repair",  0UL,                       "repair_net",    0UL                                                  );
     639             : 
     640           0 :   /**/                 fd_topob_tile_in(  topo, "tower",   0UL,          "metric_in", "gossip_tower", 0UL,           FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     641           0 :   /**/                 fd_topob_tile_in(  topo, "tower",   0UL,          "metric_in", "replay_tower", 0UL,           FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     642             : 
     643             :   /**/                 fd_topob_tile_out( topo, "tower",  0UL,                        "tower_replay", 0UL                                                   );
     644           0 :   /**/                 fd_topob_tile_out( topo, "tower",  0UL,                        "tower_send",   0UL                                                   );
     645             : 
     646             :   /* Sign links don't need to be reliable because they are synchronous,
     647             :     so there's at most one fragment in flight at a time anyway.  The
     648             :     sign links are also not polled by fd_stem, instead the tiles will
     649             :     read the sign responses out of band in a dedicated spin loop. */
     650           0 :   for( ulong i=0UL; i<shred_tile_cnt; i++ ) {
     651           0 :     /**/               fd_topob_tile_in(  topo, "sign",   0UL,           "metric_in", "shred_sign",    i,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     652           0 :     /**/               fd_topob_tile_out( topo, "shred",  i,                          "shred_sign",    i                                                    );
     653           0 :     /**/               fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "sign_shred",    i,            FD_TOPOB_UNRELIABLE, FD_TOPOB_UNPOLLED );
     654           0 :     /**/               fd_topob_tile_out( topo, "sign",   0UL,                        "sign_shred",    i                                                    );
     655           0 :   }
     656             : 
     657           0 :   FOR(net_tile_cnt)    fd_topob_tile_in(  topo, "gossip",   0UL,          "metric_in", "net_gossip",   i,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     658           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_net",   0UL                                                  );
     659           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "crds_shred",   0UL                                                  );
     660           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_repai", 0UL                                                  );
     661           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_verif", 0UL                                                  );
     662           0 :   /**/                 fd_topob_tile_in(  topo, "sign",     0UL,          "metric_in", "gossip_sign",  0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     663           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_sign",  0UL                                                  );
     664           0 :   /**/                 fd_topob_tile_in(  topo, "gossip",   0UL,          "metric_in", "sign_gossip",  0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_UNPOLLED );
     665           0 :   /**/                 fd_topob_tile_out( topo, "sign",     0UL,                       "sign_gossip",  0UL                                                  );
     666           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_send",  0UL                                                  );
     667           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_tower", 0UL                                                  );
     668             : 
     669           0 :   FOR(net_tile_cnt)    fd_topob_tile_in(  topo, "repair",  0UL,          "metric_in", "net_repair",    i,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     670           0 :   /**/                 fd_topob_tile_in(  topo, "repair",  0UL,          "metric_in", "gossip_repai",  0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     671           0 :   /**/                 fd_topob_tile_in(  topo, "repair",  0UL,          "metric_in", "stake_out",     0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     672           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "repair",  0UL,          "metric_in", "shred_repair",  i,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     673             : 
     674           0 :   /**/                 fd_topob_tile_in(  topo, "replay",  0UL,          "metric_in", "repair_repla",  0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED  );
     675           0 :   /**/                 fd_topob_tile_out( topo, "replay",  0UL,                       "stake_out",     0UL                                                  );
     676           0 :   /**/                 fd_topob_tile_in(  topo, "replay",  0UL,          "metric_in", "tower_replay",  0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     677           0 :   /**/                 fd_topob_tile_out( topo, "replay",  0UL,                       "replay_tower",  0UL                                                  );
     678           0 :   FOR(bank_tile_cnt)   fd_topob_tile_out( topo, "replay",  0UL,                       "replay_poh",    i                                                    );
     679           0 :   FOR(exec_tile_cnt)   fd_topob_tile_out( topo, "replay",  0UL,                       "replay_exec",   i                                                    ); /* TODO check order in fd_replay.c macros*/
     680             : 
     681           0 :   FOR(exec_tile_cnt)   fd_topob_tile_in(  topo, "exec",    i,            "metric_in", "replay_exec",  i,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED    );
     682           0 :   FOR(exec_tile_cnt)   fd_topob_tile_out( topo, "exec",    i,                         "exec_writer",  i                                                     );
     683             :   /* All writer tiles read from all exec tiles.  Each exec tile has a
     684             :      single out link, over which all the writer tiles round-robin. */
     685           0 :   FOR(writer_tile_cnt) for( ulong j=0UL; j<exec_tile_cnt; j++ )
     686           0 :                        fd_topob_tile_in(  topo, "writer",  i,            "metric_in", "exec_writer",  j,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED    );
     687             : 
     688           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "stake_out",     0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     689           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "gossip_send",   0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     690           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "tower_send",    0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     691           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "net_send",      0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     692           0 :   /**/                 fd_topob_tile_out( topo, "send",   0UL,                      "send_net",      0UL                                            );
     693           0 :   /**/                 fd_topob_tile_out( topo, "send",   0UL,                      "send_txns",     0UL                                            );
     694           0 :   /**/                 fd_topob_tile_out( topo, "send",   0UL,                      "send_sign",     0UL                                            );
     695           0 :   /**/                 fd_topob_tile_in ( topo, "sign",   0UL,         "metric_in", "send_sign",     0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     696           0 :   /**/                 fd_topob_tile_out( topo, "sign",   0UL,                      "sign_send",     0UL                                            );
     697           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "sign_send",     0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_UNPOLLED );
     698             : 
     699           0 :   /**/                 fd_topob_tile_in ( topo, "pack",   0UL,         "metric_in",  "dedup_pack",   0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     700           0 :   /**/                 fd_topob_tile_in ( topo, "pack",   0UL,         "metric_in",  "poh_pack",     0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     701           0 :   FOR(bank_tile_cnt)   fd_topob_tile_in ( topo, "poh",    0UL,         "metric_in",  "replay_poh",   i,      FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     702           0 :   /**/                 fd_topob_tile_in ( topo, "poh",    0UL,         "metric_in",  "stake_out",    0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     703           0 :   /**/                 fd_topob_tile_out( topo, "poh",    0UL,                       "poh_shred",    0UL                                            );
     704             : 
     705           0 :                        fd_topob_tile_out( topo, "poh",    0UL,                       "poh_pack",     0UL                                            );
     706             : 
     707           0 :   /**/                 fd_topob_tile_in(  topo, "sign",   0UL,         "metric_in",  "repair_sign",  0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     708           0 :   /**/                 fd_topob_tile_out( topo, "repair", 0UL,                       "repair_sign",  0UL                                            );
     709           0 :   /**/                 fd_topob_tile_in(  topo, "repair", 0UL,         "metric_in",  "sign_repair",  0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_UNPOLLED );
     710           0 :   /**/                 fd_topob_tile_out( topo, "repair", 0UL,                       "repair_repla", 0UL                                            );
     711           0 :   FOR(shred_tile_cnt)  fd_topob_tile_out( topo, "repair", 0UL,                       "repair_shred", i                                              );
     712           0 :   /**/                 fd_topob_tile_out( topo, "sign",   0UL,                       "sign_repair",  0UL                                            );
     713             : 
     714           0 :   fd_topob_tile_out( topo, "snaprd", 0UL, "snap_zstd", 0UL );
     715           0 :   fd_topob_tile_in( topo, "snapdc", 0UL, "metric_in", "snap_zstd", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     716           0 :   fd_topob_tile_out( topo, "snapdc", 0UL, "snap_stream", 0UL );
     717           0 :   fd_topob_tile_in  ( topo, "snapin", 0UL, "metric_in", "snap_stream", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED   );
     718           0 :   fd_topob_tile_out( topo, "snapin", 0UL, "snap_out", 0UL );
     719           0 :   fd_topob_tile_in( topo, "replay", 0UL, "metric_in", "snap_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     720             : 
     721           0 :   fd_topob_tile_in( topo, "snaprd", 0UL, "metric_in", "snapdc_rd", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     722           0 :   fd_topob_tile_out( topo, "snapdc", 0UL, "snapdc_rd", 0UL );
     723           0 :   fd_topob_tile_in( topo, "snaprd", 0UL, "metric_in", "snapin_rd", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     724           0 :   fd_topob_tile_out( topo, "snapin", 0UL, "snapin_rd", 0UL );
     725             : 
     726           0 :   if( config->tiles.archiver.enabled ) {
     727           0 :     fd_topob_wksp( topo, "arch_f" );
     728           0 :     fd_topob_wksp( topo, "arch_w" );
     729           0 :     /**/ fd_topob_tile( topo, "arch_f", "arch_f", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     730           0 :     /**/ fd_topob_tile( topo, "arch_w", "arch_w", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     731             : 
     732           0 :     fd_topob_wksp( topo, "feeder" );
     733           0 :     fd_topob_link( topo, "feeder", "feeder", 65536UL, 4UL*FD_SHRED_STORE_MTU, 4UL+config->tiles.shred.max_pending_shred_sets );
     734           0 :     /**/ fd_topob_tile_out( topo, "replay", 0UL, "feeder", 0UL );
     735           0 :     /**/ fd_topob_tile_in(  topo, "arch_f", 0UL, "metric_in", "feeder", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     736             : 
     737           0 :     fd_topob_wksp( topo, "arch_f2w" );
     738           0 :     fd_topob_link( topo, "arch_f2w", "arch_f2w", 128UL, 4UL*FD_SHRED_STORE_MTU, 1UL );
     739           0 :     /**/ fd_topob_tile_out( topo, "arch_f", 0UL, "arch_f2w", 0UL );
     740           0 :     /**/ fd_topob_tile_in( topo, "arch_w", 0UL, "metric_in", "arch_f2w", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     741           0 :   }
     742             : 
     743           0 :   if( config->tiles.shredcap.enabled ) {
     744           0 :     fd_topob_wksp( topo, "scap" );
     745             : 
     746           0 :     fd_topob_wksp( topo, "repair_scap" );
     747           0 :     fd_topob_wksp( topo, "replay_scap" );
     748             : 
     749           0 :     fd_topo_tile_t * scap_tile = fd_topob_tile( topo, "scap", "scap", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     750             : 
     751           0 :     fd_topob_link( topo, "repair_scap", "repair_scap", 128UL, FD_SLICE_MAX_WITH_HEADERS, 1UL );
     752           0 :     fd_topob_link( topo, "replay_scap", "replay_scap", 128UL, sizeof(fd_hash_t)+sizeof(ulong), 1UL );
     753             : 
     754           0 :     fd_topob_tile_in(  topo, "scap", 0UL, "metric_in", "repair_net", 0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     755           0 :     for( ulong j=0UL; j<net_tile_cnt; j++ ) {
     756           0 :       fd_topob_tile_in(  topo, "scap", 0UL, "metric_in", "net_shred", j, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     757           0 :     }
     758           0 :     for( ulong j=0UL; j<shred_tile_cnt; j++ ) {
     759           0 :       fd_topob_tile_in(  topo, "scap", 0UL, "metric_in", "shred_repair", j, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     760           0 :     }
     761           0 :     fd_topob_tile_in( topo, "scap", 0UL, "metric_in", "crds_shred", 0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     762           0 :     fd_topob_tile_in( topo, "scap", 0UL, "metric_in", "gossip_repai", 0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     763             : 
     764           0 :     fd_topob_tile_in( topo, "scap", 0UL, "metric_in", "repair_scap", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     765           0 :     fd_topob_tile_in( topo, "scap", 0UL, "metric_in", "replay_scap", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     766             : 
     767           0 :     fd_topob_tile_out( topo, "repair", 0UL, "repair_scap", 0UL );
     768           0 :     fd_topob_tile_out( topo, "replay", 0UL, "replay_scap", 0UL );
     769             : 
     770           0 :     fd_topob_tile_uses( topo, scap_tile, root_slot_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     771             :     /* No default fd_topob_tile_in connection to stake_out */
     772           0 :   }
     773             : 
     774           0 :   fd_topob_wksp( topo, "replay_notif" );
     775             :   /* We may be notifying an external service, so always publish on this link. */
     776           0 :   /**/ fd_topob_link( topo, "replay_notif", "replay_notif", FD_REPLAY_NOTIF_DEPTH, FD_REPLAY_NOTIF_MTU, 1UL )->permit_no_consumers = 1;
     777           0 :   /**/ fd_topob_tile_out( topo, "replay",  0UL, "replay_notif", 0UL );
     778             : 
     779           0 :   if( enable_rpc ) {
     780           0 :     fd_topob_tile_in(  topo, "rpcsrv", 0UL, "metric_in",  "replay_notif", 0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     781           0 :     fd_topob_tile_in(  topo, "rpcsrv", 0UL, "metric_in",  "stake_out",    0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     782           0 :   }
     783             : 
     784             :   /* For now the only plugin consumer is the GUI */
     785           0 :   int plugins_enabled = config->tiles.gui.enabled;
     786           0 :   if( FD_LIKELY( plugins_enabled ) ) {
     787           0 :     fd_topob_wksp( topo, "plugin_in"    );
     788           0 :     fd_topob_wksp( topo, "plugin_out"   );
     789           0 :     fd_topob_wksp( topo, "plugin"       );
     790             : 
     791             :     /**/                 fd_topob_link( topo, "plugin_out",   "plugin_out",   128UL,                                    8UL+40200UL*(58UL+12UL*34UL), 1UL );
     792           0 :     /**/                 fd_topob_link( topo, "replay_plugi", "plugin_in",    128UL,                                    4098*8UL,               1UL );
     793           0 :     /**/                 fd_topob_link( topo, "gossip_plugi", "plugin_in",    128UL,                                    8UL+40200UL*(58UL+12UL*34UL), 1UL );
     794           0 :     /**/                 fd_topob_link( topo, "votes_plugin", "plugin_in",    128UL,                                    8UL+40200UL*(58UL+12UL*34UL), 1UL );
     795             : 
     796             :     /**/                 fd_topob_tile( topo, "plugin",  "plugin",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     797             : 
     798             :     /**/                 fd_topob_tile_out( topo, "replay", 0UL,                        "replay_plugi", 0UL                                                  );
     799           0 :     /**/                 fd_topob_tile_out( topo, "replay", 0UL,                        "votes_plugin", 0UL                                                  );
     800           0 :     /**/                 fd_topob_tile_out( topo, "gossip", 0UL,                        "gossip_plugi", 0UL                                                  );
     801           0 :     /**/                 fd_topob_tile_out( topo, "plugin", 0UL,                        "plugin_out", 0UL                                                    );
     802             : 
     803           0 :     /**/                 fd_topob_tile_in(  topo, "plugin", 0UL,           "metric_in", "replay_plugi", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     804           0 :     /**/                 fd_topob_tile_in(  topo, "plugin", 0UL,           "metric_in", "gossip_plugi", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     805           0 :     /**/                 fd_topob_tile_in(  topo, "plugin", 0UL,           "metric_in", "stake_out",    0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     806           0 :     /**/                 fd_topob_tile_in(  topo, "plugin", 0UL,           "metric_in", "votes_plugin", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     807           0 :   }
     808             : 
     809           0 :   if( FD_LIKELY( config->tiles.gui.enabled ) ) {
     810           0 :     fd_topob_wksp( topo, "gui"          );
     811           0 :     /**/                 fd_topob_tile(     topo, "gui",     "gui",     "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0, 1 );
     812           0 :     /**/                 fd_topob_tile_in(  topo, "gui",    0UL,        "metric_in",     "plugin_out",   0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     813           0 :   }
     814             : 
     815           0 :   FOR(net_tile_cnt) fd_topos_net_tile_finish( topo, i );
     816             : 
     817           0 :   for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
     818           0 :     fd_topo_tile_t * tile = &topo->tiles[ i ];
     819           0 :     if( !fd_topo_configure_tile( tile, config ) ) {
     820           0 :       FD_LOG_ERR(( "unknown tile name %lu `%s`", i, tile->name ));
     821           0 :     }
     822           0 :   }
     823             : 
     824           0 :   if( FD_UNLIKELY( is_auto_affinity ) ) fd_topob_auto_layout( topo, 0 );
     825             : 
     826           0 :   fd_topob_finish( topo, CALLBACKS );
     827             : 
     828           0 :   const char * status_cache = config->tiles.replay.status_cache;
     829           0 :   if ( strlen( status_cache ) > 0 ) {
     830             :     /* Make the status cache workspace match the parameters used to create the
     831             :       checkpoint. This is a bit nonintuitive because of the way
     832             :       fd_topo_create_workspace works. */
     833           0 :     fd_wksp_preview_t preview[1];
     834           0 :     int err = fd_wksp_preview( status_cache, preview );
     835           0 :     if( FD_UNLIKELY( err ) ) FD_LOG_ERR(( "unable to preview %s: error %d", status_cache, err ));
     836           0 :     fd_topo_wksp_t * wksp = &topo->workspaces[ topo->objs[ txncache_obj->id ].wksp_id ];
     837           0 :     wksp->part_max = preview->part_max;
     838           0 :     wksp->known_footprint = 0;
     839           0 :     wksp->total_footprint = preview->data_max;
     840           0 :     ulong page_sz = FD_SHMEM_GIGANTIC_PAGE_SZ;
     841           0 :     wksp->page_sz = page_sz;
     842           0 :     ulong footprint = fd_wksp_footprint( preview->part_max, preview->data_max );
     843           0 :     wksp->page_cnt = footprint / page_sz;
     844           0 :   }
     845             : 
     846           0 :   config->topo = *topo;
     847           0 : }
     848             : 
     849             : int
     850             : fd_topo_configure_tile( fd_topo_tile_t * tile,
     851           0 :                         fd_config_t *    config ) {
     852           0 :     if( FD_UNLIKELY( !strcmp( tile->name, "net" ) || !strcmp( tile->name, "sock" ) ) ) {
     853             : 
     854           0 :       tile->net.shred_listen_port              = config->tiles.shred.shred_listen_port;
     855           0 :       tile->net.quic_transaction_listen_port   = config->tiles.quic.quic_transaction_listen_port;
     856           0 :       tile->net.legacy_transaction_listen_port = config->tiles.quic.regular_transaction_listen_port;
     857           0 :       tile->net.gossip_listen_port             = config->gossip.port;
     858           0 :       tile->net.repair_intake_listen_port      = config->tiles.repair.repair_intake_listen_port;
     859           0 :       tile->net.repair_serve_listen_port       = config->tiles.repair.repair_serve_listen_port;
     860           0 :       tile->net.send_src_port                  = config->tiles.send.send_src_port;
     861             : 
     862           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "netlnk" ) ) ) {
     863             : 
     864             :       /* already configured */
     865             : 
     866           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "quic" ) ) ) {
     867             : 
     868           0 :       tile->quic.reasm_cnt                      = config->tiles.quic.txn_reassembly_count;
     869           0 :       tile->quic.out_depth                      = config->tiles.verify.receive_buffer_size;
     870           0 :       tile->quic.max_concurrent_connections     = config->tiles.quic.max_concurrent_connections;
     871           0 :       tile->quic.max_concurrent_handshakes      = config->tiles.quic.max_concurrent_handshakes;
     872           0 :       tile->quic.quic_transaction_listen_port   = config->tiles.quic.quic_transaction_listen_port;
     873           0 :       tile->quic.idle_timeout_millis            = config->tiles.quic.idle_timeout_millis;
     874           0 :       tile->quic.ack_delay_millis               = config->tiles.quic.ack_delay_millis;
     875           0 :       tile->quic.retry                          = config->tiles.quic.retry;
     876           0 :       fd_cstr_fini( fd_cstr_append_cstr_safe( fd_cstr_init( tile->quic.key_log_path ), config->tiles.quic.ssl_key_log_file, sizeof(tile->quic.key_log_path) ) );
     877             : 
     878           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "verify" ) ) ) {
     879           0 :       tile->verify.tcache_depth = config->tiles.verify.signature_cache_size;
     880             : 
     881           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "dedup" ) ) ) {
     882           0 :       tile->dedup.tcache_depth = config->tiles.dedup.signature_cache_size;
     883           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "resolv" ) ) ) {
     884             : 
     885           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "shred" ) ) ) {
     886           0 :       strncpy( tile->shred.identity_key_path, config->paths.identity_key, sizeof(tile->shred.identity_key_path) );
     887             : 
     888           0 :       tile->shred.depth                         = 65536UL;
     889           0 :       tile->shred.fec_resolver_depth            = config->tiles.shred.max_pending_shred_sets;
     890           0 :       tile->shred.expected_shred_version        = config->consensus.expected_shred_version;
     891           0 :       tile->shred.shred_listen_port             = config->tiles.shred.shred_listen_port;
     892           0 :       tile->shred.larger_shred_limits_per_block = config->development.bench.larger_shred_limits_per_block;
     893             : 
     894           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "gossip" ) ) ) {
     895           0 :       if( FD_UNLIKELY( strcmp( config->gossip.host, "" ) ) ) {
     896           0 :         if( !resolve_address( config->gossip.host, &tile->gossip.ip_addr ) )
     897           0 :           FD_LOG_ERR(( "could not resolve [gossip.host] %s", config->gossip.host ));
     898           0 :       } else {
     899           0 :         tile->gossip.ip_addr = config->net.ip_addr;
     900           0 :       }
     901           0 :       strncpy( tile->gossip.identity_key_path, config->paths.identity_key, sizeof(tile->gossip.identity_key_path) );
     902           0 :       tile->gossip.gossip_listen_port =  config->gossip.port;
     903           0 :       tile->gossip.tvu_port = config->tiles.shred.shred_listen_port;
     904           0 :       if( FD_UNLIKELY( tile->gossip.tvu_port>(ushort)(USHORT_MAX-6) ) )
     905           0 :         FD_LOG_ERR(( "shred_listen_port in the config must not be greater than %hu", (ushort)(USHORT_MAX-6) ));
     906           0 :       tile->gossip.expected_shred_version = config->consensus.expected_shred_version;
     907           0 :       tile->gossip.tpu_port             = config->tiles.quic.regular_transaction_listen_port;
     908           0 :       tile->gossip.tpu_quic_port        = config->tiles.quic.quic_transaction_listen_port;
     909           0 :       tile->gossip.tpu_vote_port        = config->tiles.quic.regular_transaction_listen_port; /* TODO: support separate port for tpu vote */
     910           0 :       tile->gossip.repair_serve_port    = config->tiles.repair.repair_serve_listen_port;
     911           0 :       tile->gossip.entrypoints_cnt      = fd_ulong_min( config->gossip.resolved_entrypoints_cnt, FD_TOPO_GOSSIP_ENTRYPOINTS_MAX );
     912           0 :       fd_memcpy( tile->gossip.entrypoints, config->gossip.resolved_entrypoints, tile->gossip.entrypoints_cnt * sizeof(fd_ip4_port_t) );
     913             : 
     914           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "repair" ) ) ) {
     915           0 :       tile->repair.max_pending_shred_sets    = config->tiles.shred.max_pending_shred_sets;
     916           0 :       tile->repair.repair_intake_listen_port = config->tiles.repair.repair_intake_listen_port;
     917           0 :       tile->repair.repair_serve_listen_port  = config->tiles.repair.repair_serve_listen_port;
     918           0 :       tile->repair.slot_max                  = config->tiles.repair.slot_max;
     919           0 :       strncpy( tile->repair.good_peer_cache_file, config->tiles.repair.good_peer_cache_file, sizeof(tile->repair.good_peer_cache_file) );
     920             : 
     921           0 :       strncpy( tile->repair.identity_key_path, config->paths.identity_key, sizeof(tile->repair.identity_key_path) );
     922             : 
     923           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "replay" ) )) {
     924             : 
     925           0 :       tile->replay.fec_max = config->tiles.shred.max_pending_shred_sets;
     926           0 :       tile->replay.max_vote_accounts = config->firedancer.runtime.limits.max_vote_accounts;
     927             : 
     928             :       /* specified by [tiles.replay] */
     929             : 
     930           0 :       strncpy( tile->replay.blockstore_file,    config->firedancer.blockstore.file,    sizeof(tile->replay.blockstore_file) );
     931           0 :       strncpy( tile->replay.blockstore_checkpt, config->firedancer.blockstore.checkpt, sizeof(tile->replay.blockstore_checkpt) );
     932             : 
     933           0 :       tile->replay.tx_metadata_storage = config->rpc.extended_tx_metadata_storage;
     934           0 :       strncpy( tile->replay.funk_checkpt, config->tiles.replay.funk_checkpt, sizeof(tile->replay.funk_checkpt) );
     935             : 
     936           0 :       tile->replay.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
     937           0 :       tile->replay.plugins_enabled = fd_topo_find_tile( &config->topo, "plugin", 0UL ) != ULONG_MAX;
     938             : 
     939           0 :       if( FD_UNLIKELY( !strncmp( config->tiles.replay.genesis,  "", 1 ) &&
     940           0 :                        !strncmp( config->paths.snapshots, "", 1 ) ) ) {
     941           0 :         fd_cstr_printf_check( config->tiles.replay.genesis, PATH_MAX, NULL, "%s/genesis.bin", config->paths.ledger );
     942           0 :       }
     943           0 :       strncpy( tile->replay.genesis, config->tiles.replay.genesis, sizeof(tile->replay.genesis) );
     944             : 
     945           0 :       strncpy( tile->replay.slots_replayed, config->tiles.replay.slots_replayed, sizeof(tile->replay.slots_replayed) );
     946           0 :       strncpy( tile->replay.status_cache, config->tiles.replay.status_cache, sizeof(tile->replay.status_cache) );
     947           0 :       strncpy( tile->replay.cluster_version, config->tiles.replay.cluster_version, sizeof(tile->replay.cluster_version) );
     948           0 :       strncpy( tile->replay.tower_checkpt, config->tiles.replay.tower_checkpt, sizeof(tile->replay.tower_checkpt) );
     949             : 
     950           0 :       tile->replay.max_exec_slices = config->tiles.replay.max_exec_slices;
     951             : 
     952             :       /* not specified by [tiles.replay] */
     953             : 
     954           0 :       strncpy( tile->replay.identity_key_path, config->paths.identity_key, sizeof(tile->replay.identity_key_path) );
     955           0 :       tile->replay.ip_addr = config->net.ip_addr;
     956           0 :       strncpy( tile->replay.vote_account_path, config->paths.vote_account, sizeof(tile->replay.vote_account_path) );
     957           0 :       tile->replay.enable_bank_hash_cmp = 1;
     958             : 
     959           0 :       tile->replay.capture_start_slot = config->capture.capture_start_slot;
     960           0 :       strncpy( tile->replay.solcap_capture, config->capture.solcap_capture, sizeof(tile->replay.solcap_capture) );
     961           0 :       strncpy( tile->replay.dump_proto_dir, config->capture.dump_proto_dir, sizeof(tile->replay.dump_proto_dir) );
     962           0 :       tile->replay.dump_block_to_pb = config->capture.dump_block_to_pb;
     963             : 
     964           0 :       FD_TEST( tile->replay.funk_obj_id == fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX ) );
     965             : 
     966           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "sign" ) ) ) {
     967           0 :       strncpy( tile->sign.identity_key_path, config->paths.identity_key, sizeof(tile->sign.identity_key_path) );
     968             : 
     969           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "metric" ) ) ) {
     970           0 :       if( FD_UNLIKELY( !fd_cstr_to_ip4_addr( config->tiles.metric.prometheus_listen_address, &tile->metric.prometheus_listen_addr ) ) )
     971           0 :         FD_LOG_ERR(( "failed to parse prometheus listen address `%s`", config->tiles.metric.prometheus_listen_address ));
     972           0 :       tile->metric.prometheus_listen_port = config->tiles.metric.prometheus_listen_port;
     973           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "pack" ) ) ) {
     974           0 :       tile->pack.max_pending_transactions      = config->tiles.pack.max_pending_transactions;
     975           0 :       tile->pack.bank_tile_count               = config->layout.bank_tile_count;
     976           0 :       tile->pack.larger_max_cost_per_block     = config->development.bench.larger_max_cost_per_block;
     977           0 :       tile->pack.larger_shred_limits_per_block = config->development.bench.larger_shred_limits_per_block;
     978           0 :       tile->pack.use_consumed_cus              = config->tiles.pack.use_consumed_cus;
     979           0 :       tile->pack.schedule_strategy             = config->tiles.pack.schedule_strategy_enum;
     980           0 :       if( FD_UNLIKELY( tile->pack.use_consumed_cus ) ) FD_LOG_ERR(( "Firedancer does not support CU rebating yet.  [tiles.pack.use_consumed_cus] must be false" ));
     981           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "poh" ) ) ) {
     982           0 :       strncpy( tile->poh.identity_key_path, config->paths.identity_key, sizeof(tile->poh.identity_key_path) );
     983             : 
     984           0 :       tile->poh.bank_cnt = config->layout.bank_tile_count;
     985           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "send" ) ) ) {
     986           0 :       tile->send.send_src_port = config->tiles.send.send_src_port;
     987           0 :       tile->send.ip_addr = config->net.ip_addr;
     988           0 :       strncpy( tile->send.identity_key_path, config->paths.identity_key, sizeof(tile->send.identity_key_path) );
     989           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "tower" ) ) ) {
     990           0 :       tile->tower.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
     991           0 :       strncpy( tile->tower.identity_key_path, config->paths.identity_key, sizeof(tile->tower.identity_key_path) );
     992           0 :       strncpy( tile->tower.vote_acc_path, config->paths.vote_account, sizeof(tile->tower.vote_acc_path) );
     993           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "rpcsrv" ) ) ) {
     994           0 :       strncpy( tile->replay.blockstore_file, config->firedancer.blockstore.file, sizeof(tile->replay.blockstore_file) );
     995           0 :       tile->rpcserv.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
     996           0 :       tile->rpcserv.rpc_port = config->rpc.port;
     997           0 :       tile->rpcserv.tpu_port = config->tiles.quic.regular_transaction_listen_port;
     998           0 :       tile->rpcserv.tpu_ip_addr = config->net.ip_addr;
     999           0 :       tile->rpcserv.block_index_max = config->rpc.block_index_max;
    1000           0 :       tile->rpcserv.txn_index_max = config->rpc.txn_index_max;
    1001           0 :       tile->rpcserv.acct_index_max = config->rpc.acct_index_max;
    1002           0 :       strncpy( tile->rpcserv.history_file, config->rpc.history_file, sizeof(tile->rpcserv.history_file) );
    1003           0 :       strncpy( tile->rpcserv.identity_key_path, config->paths.identity_key, sizeof(tile->rpcserv.identity_key_path) );
    1004           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "gui" ) ) ) {
    1005           0 :       if( FD_UNLIKELY( !fd_cstr_to_ip4_addr( config->tiles.gui.gui_listen_address, &tile->gui.listen_addr ) ) )
    1006           0 :         FD_LOG_ERR(( "failed to parse gui listen address `%s`", config->tiles.gui.gui_listen_address ));
    1007           0 :       tile->gui.listen_port = config->tiles.gui.gui_listen_port;
    1008           0 :       tile->gui.is_voting = strcmp( config->paths.vote_account, "" );
    1009           0 :       strncpy( tile->gui.cluster, config->cluster, sizeof(tile->gui.cluster) );
    1010           0 :       strncpy( tile->gui.identity_key_path, config->paths.identity_key, sizeof(tile->gui.identity_key_path) );
    1011           0 :       strncpy( tile->gui.vote_key_path, config->paths.vote_account, sizeof(tile->gui.vote_key_path) );
    1012           0 :       tile->gui.max_http_connections      = config->tiles.gui.max_http_connections;
    1013           0 :       tile->gui.max_websocket_connections = config->tiles.gui.max_websocket_connections;
    1014           0 :       tile->gui.max_http_request_length   = config->tiles.gui.max_http_request_length;
    1015           0 :       tile->gui.send_buffer_size_mb       = config->tiles.gui.send_buffer_size_mb;
    1016           0 :       tile->gui.schedule_strategy         = config->tiles.pack.schedule_strategy_enum;
    1017           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "plugin" ) ) ) {
    1018             : 
    1019           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "exec" ) ) ) {
    1020           0 :       tile->exec.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
    1021             : 
    1022           0 :       tile->exec.capture_start_slot = config->capture.capture_start_slot;
    1023           0 :       strncpy( tile->exec.dump_proto_dir, config->capture.dump_proto_dir, sizeof(tile->exec.dump_proto_dir) );
    1024           0 :       tile->exec.dump_instr_to_pb = config->capture.dump_instr_to_pb;
    1025           0 :       tile->exec.dump_txn_to_pb = config->capture.dump_txn_to_pb;
    1026           0 :       tile->exec.dump_syscall_to_pb = config->capture.dump_syscall_to_pb;
    1027           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "writer" ) ) ) {
    1028           0 :       tile->writer.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
    1029           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "snaprd" ) ) ) {
    1030           0 :       setup_snapshots( config, tile );
    1031           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "snapdc" ) ) ) {
    1032             : 
    1033           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "snapin" ) ) ) {
    1034           0 :       tile->snapin.funk_obj_id            = fd_pod_query_ulong( config->topo.props, "funk",      ULONG_MAX );
    1035           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "arch_f" ) ||
    1036           0 :                             !strcmp( tile->name, "arch_w" ) ) ) {
    1037           0 :       strncpy( tile->archiver.rocksdb_path, config->tiles.archiver.rocksdb_path, sizeof(tile->archiver.rocksdb_path) );
    1038           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "back" ) ) ) {
    1039           0 :         tile->archiver.end_slot = config->tiles.archiver.end_slot;
    1040           0 :         strncpy( tile->archiver.ingest_mode, config->tiles.archiver.ingest_mode, sizeof(tile->archiver.ingest_mode) );
    1041           0 :         if( FD_UNLIKELY( 0==strlen( tile->archiver.ingest_mode ) ) ) {
    1042           0 :           FD_LOG_ERR(( "`archiver.ingest_mode` not specified in toml" ));
    1043           0 :         }
    1044             : 
    1045             :         /* Validate arguments based on the ingest mode */
    1046           0 :         if( !strcmp( tile->archiver.ingest_mode, "rocksdb" ) ) {
    1047           0 :           strncpy( tile->archiver.rocksdb_path, config->tiles.archiver.rocksdb_path, PATH_MAX );
    1048           0 :           if( FD_UNLIKELY( 0==strlen( tile->archiver.rocksdb_path ) ) ) {
    1049           0 :             FD_LOG_ERR(( "`archiver.rocksdb_path` not specified in toml" ));
    1050           0 :           }
    1051           0 :         } else if( !strcmp( tile->archiver.ingest_mode, "shredcap" ) ) {
    1052           0 :           strncpy( tile->archiver.shredcap_path, config->tiles.archiver.shredcap_path, PATH_MAX );
    1053           0 :           if( FD_UNLIKELY( 0==strlen( tile->archiver.shredcap_path ) ) ) {
    1054           0 :             FD_LOG_ERR(( "`archiver.shredcap_path` not specified in toml" ));
    1055           0 :           }
    1056           0 :           strncpy( tile->archiver.bank_hash_path, config->tiles.archiver.bank_hash_path, PATH_MAX );
    1057           0 :           if( FD_UNLIKELY( 0==strlen( tile->archiver.bank_hash_path ) ) ) {
    1058           0 :             FD_LOG_ERR(( "`archiver.bank_hash_path` not specified in toml" ));
    1059           0 :           }
    1060           0 :         } else {
    1061           0 :           FD_LOG_ERR(( "Invalid ingest mode: %s", tile->archiver.ingest_mode ));
    1062           0 :         }
    1063           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "scap" ) ) ) {
    1064           0 :       tile->shredcap.repair_intake_listen_port = config->tiles.repair.repair_intake_listen_port;
    1065           0 :       strncpy( tile->shredcap.folder_path, config->tiles.shredcap.folder_path, sizeof(tile->shredcap.folder_path) );
    1066           0 :       tile->shredcap.write_buffer_size = config->tiles.shredcap.write_buffer_size;
    1067           0 :       tile->shredcap.enable_publish_stake_weights = 0; /* this is not part of the config */
    1068           0 :       strncpy( tile->shredcap.manifest_path, "", PATH_MAX ); /* this is not part of the config */
    1069           0 :     } else {
    1070           0 :       return 0;
    1071           0 :     }
    1072           0 :   return 1;
    1073           0 : }

Generated by: LCOV version 1.14