LCOV - code coverage report
Current view: top level - app/firedancer - topology.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 753 0.0 %
Date: 2025-07-01 05:00:49 Functions: 0 12 0.0 %

          Line data    Source code
       1             : #include "topology.h"
       2             : 
       3             : #include "../../discof/replay/fd_replay_notif.h"
       4             : #include "../../disco/net/fd_net_tile.h"
       5             : #include "../../disco/quic/fd_tpu.h"
       6             : #include "../../disco/tiles.h"
       7             : #include "../../disco/topo/fd_topob.h"
       8             : #include "../../disco/topo/fd_cpu_topo.h"
       9             : #include "../../util/pod/fd_pod_format.h"
      10             : #include "../../flamenco/runtime/fd_blockstore.h"
      11             : #include "../../flamenco/runtime/fd_txncache.h"
      12             : #include "../../flamenco/snapshot/fd_snapshot_base.h"
      13             : #include "../../util/tile/fd_tile_private.h"
      14             : 
      15             : #include <sys/random.h>
      16             : #include <sys/types.h>
      17             : #include <sys/socket.h>
      18             : #include <netdb.h>
      19             : 
      20             : extern fd_topo_obj_callbacks_t * CALLBACKS[];
      21             : 
      22             : fd_topo_obj_t *
      23             : setup_topo_blockstore( fd_topo_t *  topo,
      24             :                        char const * wksp_name,
      25             :                        ulong        shred_max,
      26             :                        ulong        block_max,
      27             :                        ulong        idx_max,
      28             :                        ulong        txn_max,
      29           0 :                        ulong        alloc_max ) {
      30           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "blockstore", wksp_name );
      31             : 
      32           0 :   ulong seed;
      33           0 :   FD_TEST( sizeof(ulong) == getrandom( &seed, sizeof(ulong), 0 ) );
      34             : 
      35           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, 1UL,        "obj.%lu.wksp_tag",   obj->id ) );
      36           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, seed,       "obj.%lu.seed",       obj->id ) );
      37           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, shred_max,  "obj.%lu.shred_max",  obj->id ) );
      38           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, block_max,  "obj.%lu.block_max",  obj->id ) );
      39           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, idx_max,    "obj.%lu.idx_max",    obj->id ) );
      40           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, txn_max,    "obj.%lu.txn_max",    obj->id ) );
      41           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, alloc_max,  "obj.%lu.alloc_max",  obj->id ) );
      42             : 
      43             :   /* DO NOT MODIFY LOOSE WITHOUT CHANGING HOW BLOCKSTORE ALLOCATES INTERNAL STRUCTURES */
      44             : 
      45           0 :   ulong blockstore_footprint = fd_blockstore_footprint( shred_max, block_max, idx_max, txn_max ) + alloc_max;
      46           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, blockstore_footprint,  "obj.%lu.loose", obj->id ) );
      47             : 
      48           0 :   return obj;
      49           0 : }
      50             : 
      51             : fd_topo_obj_t *
      52           0 : setup_topo_bank_hash_cmp( fd_topo_t * topo, char const * wksp_name ) {
      53           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "bh_cmp", wksp_name );
      54           0 :   return obj;
      55           0 : }
      56             : 
      57             : fd_topo_obj_t *
      58             : setup_topo_banks( fd_topo_t *  topo,
      59             :                   char const * wksp_name,
      60           0 :                   ulong        max_banks ) {
      61           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "banks", wksp_name );
      62           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_banks, "obj.%lu.max_banks", obj->id ) );
      63           0 :   return obj;
      64           0 : }
      65             : 
      66             : static fd_topo_obj_t *
      67           0 : setup_topo_fec_sets( fd_topo_t * topo, char const * wksp_name, ulong sz ) {
      68           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "fec_sets", wksp_name );
      69           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, sz, "obj.%lu.sz",   obj->id ) );
      70           0 :   return obj;
      71           0 : }
      72             : 
      73             : fd_topo_obj_t *
      74             : setup_topo_runtime_pub( fd_topo_t *  topo,
      75             :                         char const * wksp_name,
      76           0 :                         ulong        mem_max ) {
      77           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "runtime_pub", wksp_name );
      78           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, mem_max, "obj.%lu.mem_max",  obj->id ) );
      79           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, 12UL,    "obj.%lu.wksp_tag", obj->id ) );
      80           0 :   return obj;
      81           0 : }
      82             : 
      83             : fd_topo_obj_t *
      84             : setup_topo_txncache( fd_topo_t *  topo,
      85             :                      char const * wksp_name,
      86             :                      ulong        max_rooted_slots,
      87             :                      ulong        max_live_slots,
      88             :                      ulong        max_txn_per_slot,
      89           0 :                      ulong        max_constipated_slots ) {
      90           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "txncache", wksp_name );
      91             : 
      92           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_rooted_slots, "obj.%lu.max_rooted_slots", obj->id ) );
      93           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_live_slots,   "obj.%lu.max_live_slots",   obj->id ) );
      94           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_txn_per_slot, "obj.%lu.max_txn_per_slot", obj->id ) );
      95           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_constipated_slots, "obj.%lu.max_constipated_slots", obj->id ) );
      96             : 
      97           0 :   return obj;
      98           0 : }
      99             : 
     100             : fd_topo_obj_t *
     101             : setup_topo_funk( fd_topo_t *  topo,
     102             :                  char const * wksp_name,
     103             :                  ulong        max_account_records,
     104             :                  ulong        max_database_transactions,
     105           0 :                  ulong        heap_size_gib ) {
     106           0 :   fd_topo_obj_t * obj = fd_topob_obj( topo, "funk", wksp_name );
     107           0 :   FD_TEST( fd_pod_insert_ulong(  topo->props, "funk", obj->id ) );
     108           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_account_records,       "obj.%lu.rec_max",  obj->id ) );
     109           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, max_database_transactions, "obj.%lu.txn_max",  obj->id ) );
     110           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, heap_size_gib*(1UL<<30),   "obj.%lu.heap_max", obj->id ) );
     111           0 :   ulong funk_footprint = fd_funk_footprint( max_database_transactions, max_account_records );
     112           0 :   if( FD_UNLIKELY( !funk_footprint ) ) FD_LOG_ERR(( "Invalid [funk] parameters" ));
     113             : 
     114             :   /* Increase workspace partition count */
     115           0 :   ulong wksp_idx = fd_topo_find_wksp( topo, wksp_name );
     116           0 :   FD_TEST( wksp_idx!=ULONG_MAX );
     117           0 :   fd_topo_wksp_t * wksp = &topo->workspaces[ wksp_idx ];
     118           0 :   ulong part_max = fd_wksp_part_max_est( funk_footprint, 1U<<14U );
     119           0 :   if( FD_UNLIKELY( !part_max ) ) FD_LOG_ERR(( "fd_wksp_part_max_est(%lu,16KiB) failed", funk_footprint ));
     120           0 :   wksp->part_max += part_max;
     121             : 
     122           0 :   return obj;
     123           0 : }
     124             : 
     125             : static int
     126             : resolve_gossip_entrypoint( char const *    host_port,
     127           0 :                           fd_ip4_port_t * ip4_port ) {
     128             : 
     129             :   /* Split host:port */
     130             : 
     131           0 :   char const * colon = strrchr( host_port, ':' );
     132           0 :   if( FD_UNLIKELY( !colon ) ) {
     133           0 :     FD_LOG_ERR(( "invalid [gossip.entrypoints] entry \"%s\": no port number", host_port ));
     134           0 :   }
     135             : 
     136           0 :   char fqdn[ 255 ];
     137           0 :   ulong fqdn_len = (ulong)( colon-host_port );
     138           0 :   if( FD_UNLIKELY( fqdn_len>254 ) ) {
     139           0 :     FD_LOG_ERR(( "invalid [gossip.entrypoints] entry \"%s\": hostname too long", host_port ));
     140           0 :   }
     141           0 :   fd_memcpy( fqdn, host_port, fqdn_len );
     142           0 :   fqdn[ fqdn_len ] = '\0';
     143             : 
     144             :   /* Parse port number */
     145             : 
     146           0 :   char const * port_str = colon+1;
     147           0 :   char const * endptr   = NULL;
     148           0 :   ulong port = strtoul( port_str, (char **)&endptr, 10 );
     149           0 :   if( FD_UNLIKELY( !endptr || !port || port>USHORT_MAX || *endptr!='\0' ) ) {
     150           0 :     FD_LOG_ERR(( "invalid [gossip.entrypoints] entry \"%s\": invalid port number", host_port ));
     151           0 :   }
     152           0 :   ip4_port->port = (ushort)fd_ushort_bswap( (ushort)port );
     153             : 
     154             :   /* Resolve hostname */
     155             : 
     156           0 :   struct addrinfo hints = { .ai_family = AF_INET };
     157           0 :   struct addrinfo * res;
     158           0 :   int err = getaddrinfo( fqdn, NULL, &hints, &res );
     159           0 :   if( FD_UNLIKELY( err ) ) {
     160           0 :     FD_LOG_WARNING(( "cannot resolve [gossip.entrypoints] entry \"%s\": %i-%s", fqdn, err, gai_strerror( err ) ));
     161           0 :     return 0;
     162           0 :   }
     163             : 
     164           0 :   int resolved = 0;
     165           0 :   for( struct addrinfo * cur=res; cur; cur=cur->ai_next ) {
     166           0 :     if( FD_UNLIKELY( cur->ai_addr->sa_family!=AF_INET ) ) continue;
     167           0 :     struct sockaddr_in const * addr = (struct sockaddr_in const *)cur->ai_addr;
     168           0 :     ip4_port->addr = addr->sin_addr.s_addr;
     169           0 :     resolved = 1;
     170           0 :     break;
     171           0 :   }
     172             : 
     173           0 :   freeaddrinfo( res );
     174           0 :   return resolved;
     175           0 : }
     176             : 
     177             : static void
     178           0 : resolve_gossip_entrypoints( config_t * config ) {
     179           0 :   ulong entrypoint_cnt = config->gossip.entrypoints_cnt;
     180           0 :   ulong resolved_entrypoints = 0UL;
     181           0 :   for( ulong j=0UL; j<entrypoint_cnt; j++ ) {
     182           0 :     if( resolve_gossip_entrypoint( config->gossip.entrypoints[j], &config->gossip.resolved_entrypoints[resolved_entrypoints] ) ) {
     183           0 :       resolved_entrypoints++;
     184           0 :     }
     185           0 :   }
     186           0 :   config->gossip.resolved_entrypoints_cnt = resolved_entrypoints;
     187           0 : }
     188             : 
     189             : static void
     190             : setup_snapshots( config_t *       config,
     191           0 :                  fd_topo_tile_t * tile ) {
     192           0 :   uchar incremental_is_file, incremental_is_url;
     193           0 :   if( strnlen( config->tiles.replay.incremental, PATH_MAX )>0UL ) {
     194           0 :     incremental_is_file = 1U;
     195           0 :   } else {
     196           0 :     incremental_is_file = 0U;
     197           0 :   }
     198           0 :   if( strnlen( config->tiles.replay.incremental_url, PATH_MAX )>0UL ) {
     199           0 :     incremental_is_url = 1U;
     200           0 :   } else {
     201           0 :     incremental_is_url = 0U;
     202           0 :   }
     203           0 :   if( FD_UNLIKELY( incremental_is_file && incremental_is_url ) ) {
     204           0 :     FD_LOG_ERR(( "At most one of the incremental snapshot source strings in the configuration file under [tiles.replay.incremental] and [tiles.replay.incremental_url] may be set." ));
     205           0 :   }
     206           0 :   tile->replay.incremental_src_type = INT_MAX;
     207           0 :   if( FD_LIKELY( incremental_is_url ) ) {
     208           0 :     strncpy( tile->replay.incremental, config->tiles.replay.incremental_url, sizeof(tile->replay.incremental) );
     209           0 :     tile->replay.incremental_src_type = FD_SNAPSHOT_SRC_HTTP;
     210           0 :   }
     211           0 :   if( FD_UNLIKELY( incremental_is_file ) ) {
     212           0 :     strncpy( tile->replay.incremental, config->tiles.replay.incremental, sizeof(tile->replay.incremental) );
     213           0 :     tile->replay.incremental_src_type = FD_SNAPSHOT_SRC_FILE;
     214           0 :   }
     215           0 :   tile->replay.incremental[ sizeof(tile->replay.incremental)-1UL ] = '\0';
     216             : 
     217           0 :   uchar snapshot_is_file, snapshot_is_url;
     218           0 :   if( strnlen( config->tiles.replay.snapshot, PATH_MAX )>0UL ) {
     219           0 :     snapshot_is_file = 1U;
     220           0 :   } else {
     221           0 :     snapshot_is_file = 0U;
     222           0 :   }
     223           0 :   if( strnlen( config->tiles.replay.snapshot_url, PATH_MAX )>0UL ) {
     224           0 :     snapshot_is_url = 1U;
     225           0 :   } else {
     226           0 :     snapshot_is_url = 0U;
     227           0 :   }
     228           0 :   if( FD_UNLIKELY( snapshot_is_file && snapshot_is_url ) ) {
     229           0 :     FD_LOG_ERR(( "At most one of the full snapshot source strings in the configuration file under [tiles.replay.snapshot] and [tiles.replay.snapshot_url] may be set." ));
     230           0 :   }
     231           0 :   tile->replay.snapshot_src_type = INT_MAX;
     232           0 :   if( FD_LIKELY( snapshot_is_url ) ) {
     233           0 :     strncpy( tile->replay.snapshot, config->tiles.replay.snapshot_url, sizeof(tile->replay.snapshot) );
     234           0 :     tile->replay.snapshot_src_type = FD_SNAPSHOT_SRC_HTTP;
     235           0 :   }
     236           0 :   if( FD_UNLIKELY( snapshot_is_file ) ) {
     237           0 :     strncpy( tile->replay.snapshot, config->tiles.replay.snapshot, sizeof(tile->replay.snapshot) );
     238           0 :     tile->replay.snapshot_src_type = FD_SNAPSHOT_SRC_FILE;
     239           0 :   }
     240           0 :   tile->replay.snapshot[ sizeof(tile->replay.snapshot)-1UL ] = '\0';
     241             : 
     242           0 :   strncpy( tile->replay.snapshot_dir, config->tiles.replay.snapshot_dir, sizeof(tile->replay.snapshot_dir) );
     243           0 :   tile->replay.snapshot_dir[ sizeof(tile->replay.snapshot_dir)-1UL ] = '\0';
     244           0 : }
     245             : 
     246             : void
     247           0 : fd_topo_initialize( config_t * config ) {
     248           0 :   resolve_gossip_entrypoints( config );
     249             : 
     250           0 :   ulong net_tile_cnt    = config->layout.net_tile_count;
     251           0 :   ulong shred_tile_cnt  = config->layout.shred_tile_count;
     252           0 :   ulong quic_tile_cnt   = config->layout.quic_tile_count;
     253           0 :   ulong verify_tile_cnt = config->layout.verify_tile_count;
     254           0 :   ulong bank_tile_cnt   = config->layout.bank_tile_count;
     255           0 :   ulong exec_tile_cnt   = config->firedancer.layout.exec_tile_count;
     256           0 :   ulong writer_tile_cnt = config->firedancer.layout.writer_tile_count;
     257           0 :   ulong resolv_tile_cnt = config->layout.resolv_tile_count;
     258             : 
     259           0 :   int enable_rpc = ( config->rpc.port != 0 );
     260             : 
     261           0 :   fd_topo_t * topo = { fd_topob_new( &config->topo, config->name ) };
     262           0 :   topo->max_page_size = fd_cstr_to_shmem_page_sz( config->hugetlbfs.max_page_size );
     263           0 :   topo->gigantic_page_threshold = config->hugetlbfs.gigantic_page_threshold_mib << 20;
     264             : 
     265             :   /*             topo, name */
     266           0 :   fd_topob_wksp( topo, "metric_in"  );
     267           0 :   fd_topob_wksp( topo, "net_shred"  );
     268           0 :   fd_topob_wksp( topo, "net_gossip" );
     269           0 :   fd_topob_wksp( topo, "net_repair" );
     270           0 :   fd_topob_wksp( topo, "net_quic"   );
     271           0 :   fd_topob_wksp( topo, "net_send"   );
     272             : 
     273           0 :   fd_topob_wksp( topo, "quic_verify"  );
     274           0 :   fd_topob_wksp( topo, "verify_dedup" );
     275           0 :   fd_topob_wksp( topo, "dedup_pack"   );
     276             : 
     277             : //  fd_topob_wksp( topo, "dedup_resolv" );
     278           0 :   fd_topob_wksp( topo, "resolv_pack"  );
     279             : 
     280           0 :   fd_topob_wksp( topo, "shred_repair" );
     281           0 :   fd_topob_wksp( topo, "stake_out"    );
     282             : 
     283           0 :   fd_topob_wksp( topo, "poh_shred"    );
     284             : 
     285           0 :   fd_topob_wksp( topo, "shred_sign"   );
     286           0 :   fd_topob_wksp( topo, "sign_shred"   );
     287             : 
     288           0 :   fd_topob_wksp( topo, "gossip_sign"  );
     289           0 :   fd_topob_wksp( topo, "sign_gossip"  );
     290             : 
     291           0 :   fd_topob_wksp( topo, "replay_exec"  );
     292           0 :   fd_topob_wksp( topo, "exec_writer"  );
     293             : 
     294           0 :   fd_topob_wksp( topo, "send_sign"    );
     295           0 :   fd_topob_wksp( topo, "sign_send"    );
     296             : 
     297           0 :   fd_topob_wksp( topo, "crds_shred"   );
     298           0 :   fd_topob_wksp( topo, "gossip_repai" );
     299           0 :   fd_topob_wksp( topo, "gossip_verif" );
     300           0 :   fd_topob_wksp( topo, "gossip_tower" );
     301           0 :   fd_topob_wksp( topo, "replay_tower" );
     302             : 
     303           0 :   fd_topob_wksp( topo, "repair_sign"  );
     304           0 :   fd_topob_wksp( topo, "sign_repair"  );
     305             : 
     306           0 :   fd_topob_wksp( topo, "repair_repla" );
     307           0 :   fd_topob_wksp( topo, "replay_poh"   );
     308           0 :   fd_topob_wksp( topo, "bank_busy"    );
     309           0 :   fd_topob_wksp( topo, "pack_replay"  );
     310           0 :   fd_topob_wksp( topo, "tower_send"  );
     311           0 :   fd_topob_wksp( topo, "gossip_send"  );
     312           0 :   fd_topob_wksp( topo, "send_txns"    );
     313             : 
     314           0 :   fd_topob_wksp( topo, "quic"        );
     315           0 :   fd_topob_wksp( topo, "verify"      );
     316           0 :   fd_topob_wksp( topo, "dedup"       );
     317           0 :   fd_topob_wksp( topo, "shred"       );
     318           0 :   fd_topob_wksp( topo, "pack"        );
     319           0 :   fd_topob_wksp( topo, "resolv"      );
     320           0 :   fd_topob_wksp( topo, "sign"        );
     321           0 :   fd_topob_wksp( topo, "repair"      );
     322           0 :   fd_topob_wksp( topo, "gossip"      );
     323           0 :   fd_topob_wksp( topo, "metric"      );
     324           0 :   fd_topob_wksp( topo, "replay"      );
     325           0 :   fd_topob_wksp( topo, "runtime_pub" );
     326           0 :   fd_topob_wksp( topo, "banks"       );
     327           0 :   fd_topob_wksp( topo, "bh_cmp" );
     328           0 :   fd_topob_wksp( topo, "exec"        );
     329           0 :   fd_topob_wksp( topo, "writer"      );
     330           0 :   fd_topob_wksp( topo, "blockstore"  );
     331           0 :   fd_topob_wksp( topo, "fec_sets"    );
     332           0 :   fd_topob_wksp( topo, "tcache"      );
     333           0 :   fd_topob_wksp( topo, "poh"         );
     334           0 :   fd_topob_wksp( topo, "send"        );
     335           0 :   fd_topob_wksp( topo, "tower"       );
     336           0 :   fd_topob_wksp( topo, "constipate"  );
     337           0 :   fd_topob_wksp( topo, "exec_spad"   );
     338           0 :   fd_topob_wksp( topo, "exec_fseq"   );
     339           0 :   fd_topob_wksp( topo, "writer_fseq" );
     340           0 :   fd_topob_wksp( topo, "funk" );
     341           0 :   fd_topob_wksp( topo, "slot_fseqs"  ); /* fseqs for marked slots eg. turbine slot */
     342           0 :   if( enable_rpc ) fd_topob_wksp( topo, "rpcsrv" );
     343             : 
     344           0 :   #define FOR(cnt) for( ulong i=0UL; i<cnt; i++ )
     345             : 
     346           0 :   ulong pending_fec_shreds_depth = fd_ulong_min( fd_ulong_pow2_up( config->tiles.shred.max_pending_shred_sets * FD_REEDSOL_DATA_SHREDS_MAX ), USHORT_MAX + 1 /* dcache max */ );
     347             : 
     348             :   /*                                  topo, link_name,      wksp_name,      depth,                                    mtu,                           burst */
     349           0 :   FOR(quic_tile_cnt)   fd_topob_link( topo, "quic_net",     "net_quic",     config->net.ingress_buffer_size,          FD_NET_MTU,                    1UL );
     350           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "shred_net",    "net_shred",    config->net.ingress_buffer_size,          FD_NET_MTU,                    1UL );
     351           0 :   FOR(quic_tile_cnt)   fd_topob_link( topo, "quic_verify",  "quic_verify",  config->tiles.verify.receive_buffer_size, FD_TPU_REASM_MTU,              config->tiles.quic.txn_reassembly_count );
     352           0 :   FOR(verify_tile_cnt) fd_topob_link( topo, "verify_dedup", "verify_dedup", config->tiles.verify.receive_buffer_size, FD_TPU_PARSED_MTU,             1UL );
     353           0 :   /**/                 fd_topob_link( topo, "dedup_pack",   "dedup_pack",   config->tiles.verify.receive_buffer_size, FD_TPU_PARSED_MTU,             1UL );
     354             : 
     355             :   /**/                 fd_topob_link( topo, "stake_out",    "stake_out",    128UL,                                    40UL + 40200UL * 40UL,         1UL );
     356             : 
     357           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "shred_sign",   "shred_sign",   128UL,                                    32UL,                          1UL );
     358           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "sign_shred",   "sign_shred",   128UL,                                    64UL,                          1UL );
     359             : 
     360             :   /**/                 fd_topob_link( topo, "gossip_sign",  "gossip_sign",  128UL,                                    2048UL,                        1UL );
     361           0 :   /**/                 fd_topob_link( topo, "sign_gossip",  "sign_gossip",  128UL,                                    64UL,                          1UL );
     362             : 
     363             : //  /**/                 fd_topob_link( topo, "dedup_resolv", "dedup_resolv", 65536UL,                                  FD_TPU_PARSED_MTU,             1UL );
     364           0 :   FOR(resolv_tile_cnt) fd_topob_link( topo, "resolv_pack",  "resolv_pack",  65536UL,                                  FD_TPU_RESOLVED_MTU,           1UL );
     365             : 
     366             :   /* TODO: The MTU is currently relatively arbitrary and needs to be resized to the size of the largest
     367             :      message that is outbound from the replay to exec. */
     368           0 :   FOR(exec_tile_cnt)   fd_topob_link( topo, "replay_exec",  "replay_exec",  128UL,                                    10240UL,                       exec_tile_cnt );
     369             :   /* Assuming the number of writer tiles is sufficient to keep up with
     370             :      the number of exec tiles, under equilibrium, we should have at least
     371             :      enough link space to buffer worst case input shuffling done by the
     372             :      stem.  That is, when a link is so unlucky, that the stem RNG decided
     373             :      to process every other link except this one, for all writer tiles.
     374             :      This would be fd_ulong_pow2_up( exec_tile_cnt*writer_tile_cnt+1UL ).
     375             : 
     376             :      This is all assuming we have true pipelining between exec and writer
     377             :      tiles.  Right now, we don't.  So in reality there can be at most 1
     378             :      in-flight transaction per exec tile, and hence a depth of 1 is in
     379             :      theory sufficient for each exec_writer link. */
     380           0 :   FOR(exec_tile_cnt)   fd_topob_link( topo, "exec_writer",  "exec_writer",  128UL,                                    FD_EXEC_WRITER_MTU,            1UL );
     381             : 
     382           0 :   /**/                 fd_topob_link( topo, "gossip_verif", "gossip_verif", config->tiles.verify.receive_buffer_size, FD_TPU_MTU,                    1UL );
     383           0 :   /**/                 fd_topob_link( topo, "gossip_tower", "gossip_tower", 128UL,                                    FD_TPU_MTU,                    1UL );
     384           0 :   /**/                 fd_topob_link( topo, "replay_tower", "replay_tower", 128UL,                                    65536UL,                       1UL );
     385           0 :   /**/                 fd_topob_link( topo, "tower_replay", "replay_tower", 128UL,                                    0,                             1UL );
     386             : 
     387             :   /**/                 fd_topob_link( topo, "crds_shred",   "crds_shred",   128UL,                                    8UL  + 40200UL * 38UL,         1UL );
     388           0 :   /**/                 fd_topob_link( topo, "gossip_repai", "gossip_repai", 128UL,                                    40200UL * 38UL, 1UL );
     389           0 :   /**/                 fd_topob_link( topo, "gossip_send",  "gossip_send",  128UL,                                    40200UL * 38UL, 1UL );
     390             : 
     391           0 :   /**/                 fd_topob_link( topo, "gossip_net",   "net_gossip",   config->net.ingress_buffer_size,          FD_NET_MTU,                    1UL );
     392           0 :   /**/                 fd_topob_link( topo, "send_net",     "net_send",     config->net.ingress_buffer_size,          FD_NET_MTU,                    2UL );
     393             : 
     394           0 :   /**/                 fd_topob_link( topo, "repair_net",   "net_repair",   config->net.ingress_buffer_size,          FD_NET_MTU,                    1UL );
     395           0 :   /**/                 fd_topob_link( topo, "repair_sign",  "repair_sign",  128UL,                                    2048UL,                        1UL );
     396           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "shred_repair", "shred_repair", pending_fec_shreds_depth,                 FD_SHRED_REPAIR_MTU,           2UL /* at most 2 msgs per after_frag */ );
     397             : 
     398           0 :   FOR(shred_tile_cnt)  fd_topob_link( topo, "repair_shred", "shred_repair", pending_fec_shreds_depth,                 sizeof(fd_ed25519_sig_t),      1UL );
     399           0 :   /**/                 fd_topob_link( topo, "sign_repair",  "sign_repair",  128UL,                                    64UL,                          1UL );
     400           0 :   /**/                 fd_topob_link( topo, "repair_repla", "repair_repla", 65536UL,                                  FD_DISCO_REPAIR_REPLAY_MTU,    1UL );
     401           0 :   FOR(bank_tile_cnt)   fd_topob_link( topo, "replay_poh",   "replay_poh",   128UL,                                    (4096UL*sizeof(fd_txn_p_t))+sizeof(fd_microblock_trailer_t), 1UL  );
     402           0 :   /**/                 fd_topob_link( topo, "poh_shred",    "poh_shred",    16384UL,                                  USHORT_MAX,                    1UL   );
     403           0 :   /**/                 fd_topob_link( topo, "pack_replay",  "pack_replay",  65536UL,                                  USHORT_MAX,                    1UL   );
     404           0 :   /**/                 fd_topob_link( topo, "poh_pack",     "replay_poh",   128UL,                                    sizeof(fd_became_leader_t) ,   1UL   );
     405             : 
     406             :   /**/                 fd_topob_link( topo, "tower_send",   "tower_send", 65536UL,                                  sizeof(fd_txn_p_t),            1UL   );
     407           0 :   /**/                 fd_topob_link( topo, "send_txns",    "send_txns",  128UL,                                    FD_TXN_MTU,                    1UL   );
     408           0 :   /**/                 fd_topob_link( topo, "send_sign",    "send_sign",  128UL,                                    FD_TXN_MTU,                    1UL   );
     409           0 :   /**/                 fd_topob_link( topo, "sign_send",    "sign_send",  128UL,                                    64UL,                          1UL   );
     410             : 
     411           0 :   ushort parsed_tile_to_cpu[ FD_TILE_MAX ];
     412             :   /* Unassigned tiles will be floating, unless auto topology is enabled. */
     413           0 :   for( ulong i=0UL; i<FD_TILE_MAX; i++ ) parsed_tile_to_cpu[ i ] = USHORT_MAX;
     414             : 
     415           0 :   int is_auto_affinity = !strcmp( config->layout.affinity, "auto" );
     416           0 :   int is_bench_auto_affinity = !strcmp( config->development.bench.affinity, "auto" );
     417             : 
     418           0 :   if( FD_UNLIKELY( is_auto_affinity != is_bench_auto_affinity ) ) {
     419           0 :     FD_LOG_ERR(( "The CPU affinity string in the configuration file under [layout.affinity] and [development.bench.affinity] must all be set to 'auto' or all be set to a specific CPU affinity string." ));
     420           0 :   }
     421             : 
     422           0 :   fd_topo_cpus_t cpus[1];
     423           0 :   fd_topo_cpus_init( cpus );
     424             : 
     425           0 :   ulong affinity_tile_cnt = 0UL;
     426           0 :   if( FD_LIKELY( !is_auto_affinity ) ) affinity_tile_cnt = fd_tile_private_cpus_parse( config->layout.affinity, parsed_tile_to_cpu );
     427             : 
     428           0 :   ulong tile_to_cpu[ FD_TILE_MAX ] = {0};
     429           0 :   for( ulong i=0UL; i<affinity_tile_cnt; i++ ) {
     430           0 :     if( FD_UNLIKELY( parsed_tile_to_cpu[ i ]!=USHORT_MAX && parsed_tile_to_cpu[ i ]>=cpus->cpu_cnt ) )
     431           0 :       FD_LOG_ERR(( "The CPU affinity string in the configuration file under [layout.affinity] specifies a CPU index of %hu, but the system "
     432           0 :                   "only has %lu CPUs. You should either change the CPU allocations in the affinity string, or increase the number of CPUs "
     433           0 :                   "in the system.",
     434           0 :                   parsed_tile_to_cpu[ i ], cpus->cpu_cnt ));
     435           0 :     tile_to_cpu[ i ] = fd_ulong_if( parsed_tile_to_cpu[ i ]==USHORT_MAX, ULONG_MAX, (ulong)parsed_tile_to_cpu[ i ] );
     436           0 :   }
     437             : 
     438           0 :   fd_topos_net_tiles( topo, config->layout.net_tile_count, &config->net, config->tiles.netlink.max_routes, config->tiles.netlink.max_neighbors, tile_to_cpu );
     439             : 
     440           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_gossip", i, config->net.ingress_buffer_size );
     441           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_repair", i, config->net.ingress_buffer_size );
     442           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_quic",   i, config->net.ingress_buffer_size );
     443           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_shred",  i, config->net.ingress_buffer_size );
     444           0 :   FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_send",   i, config->net.ingress_buffer_size );
     445             : 
     446             :   /*                                              topo, tile_name, tile_wksp, metrics_wksp, cpu_idx,                       is_agave, uses_keyswitch */
     447           0 :   FOR(quic_tile_cnt)               fd_topob_tile( topo, "quic",    "quic",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     448           0 :   FOR(verify_tile_cnt)             fd_topob_tile( topo, "verify",  "verify",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     449           0 :   /**/                             fd_topob_tile( topo, "dedup",   "dedup",   "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     450           0 :   FOR(resolv_tile_cnt)             fd_topob_tile( topo, "resolv",  "resolv",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 1,        0 );
     451           0 :   FOR(shred_tile_cnt)              fd_topob_tile( topo, "shred",   "shred",   "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        1 );
     452           0 :   /**/                             fd_topob_tile( topo, "sign",    "sign",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        1 );
     453           0 :   /**/                             fd_topob_tile( topo, "metric",  "metric",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     454           0 :   fd_topo_tile_t * pack_tile =     fd_topob_tile( topo, "pack",    "pack",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     455           0 :   /**/                             fd_topob_tile( topo, "poh",     "poh",     "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,          1 );
     456           0 :   /**/                             fd_topob_tile( topo, "gossip",  "gossip",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     457           0 :   fd_topo_tile_t * repair_tile =   fd_topob_tile( topo, "repair",  "repair",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     458           0 :   /**/                             fd_topob_tile( topo, "send",    "send",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     459             : 
     460           0 :   fd_topo_tile_t * replay_tile =   fd_topob_tile( topo, "replay",  "replay",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     461           0 :   FOR(exec_tile_cnt)               fd_topob_tile( topo, "exec",    "exec",    "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     462           0 :   /**/                             fd_topob_tile( topo, "tower",   "tower",   "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     463           0 :   FOR(writer_tile_cnt)             fd_topob_tile( topo, "writer",  "writer",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     464             : 
     465           0 :   fd_topo_tile_t * rpcserv_tile = NULL;
     466           0 :   if( enable_rpc ) rpcserv_tile =  fd_topob_tile( topo, "rpcsrv",  "rpcsrv",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0,        0 );
     467             : 
     468             :   /* Database cache */
     469             : 
     470           0 :   fd_topo_obj_t * funk_obj = setup_topo_funk( topo, "funk",
     471           0 :       config->firedancer.funk.max_account_records,
     472           0 :       config->firedancer.funk.max_database_transactions,
     473           0 :       config->firedancer.funk.heap_size_gib );
     474             : 
     475           0 :   FOR(exec_tile_cnt)   fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], funk_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     476           0 :   /*                */ fd_topob_tile_uses( topo, replay_tile,  funk_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     477           0 :   if(rpcserv_tile)     fd_topob_tile_uses( topo, rpcserv_tile, funk_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     478           0 :   FOR(writer_tile_cnt) fd_topob_tile_uses( topo,  &topo->tiles[ fd_topo_find_tile( topo, "writer", i ) ], funk_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     479             : 
     480             :   /* Setup a shared wksp object for the blockstore. */
     481           0 :   fd_topo_obj_t * blockstore_obj = setup_topo_blockstore( topo,
     482           0 :                                                           "blockstore",
     483           0 :                                                           config->firedancer.blockstore.shred_max,
     484           0 :                                                           config->firedancer.blockstore.block_max,
     485           0 :                                                           config->firedancer.blockstore.idx_max,
     486           0 :                                                           config->firedancer.blockstore.txn_max,
     487           0 :                                                           config->firedancer.blockstore.alloc_max );
     488           0 :   fd_topob_tile_uses( topo, replay_tile, blockstore_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     489           0 :   fd_topob_tile_uses( topo, repair_tile, blockstore_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     490           0 :   if( enable_rpc ) {
     491           0 :     fd_topob_tile_uses( topo, rpcserv_tile, blockstore_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     492           0 :   }
     493             : 
     494           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, blockstore_obj->id, "blockstore" ) );
     495             : 
     496             :   /* Setup a shared wksp object for banks. */
     497             : 
     498           0 :   fd_topo_obj_t * banks_obj = setup_topo_banks( topo, "banks", config->firedancer.runtime.limits.max_banks );
     499           0 :   fd_topob_tile_uses( topo, replay_tile, banks_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     500           0 :   FOR(exec_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], banks_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     501           0 :   FOR(writer_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "writer", i ) ], banks_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     502           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, banks_obj->id, "banks" ) );
     503             : 
     504             :   /* Setup a shared wksp object for bank hash cmp. */
     505             : 
     506           0 :   fd_topo_obj_t * bank_hash_cmp_obj = setup_topo_bank_hash_cmp( topo, "bh_cmp" );
     507           0 :   fd_topob_tile_uses( topo, replay_tile, bank_hash_cmp_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     508           0 :   FOR(exec_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], bank_hash_cmp_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     509           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, bank_hash_cmp_obj->id, "bh_cmp" ) );
     510             : 
     511             :   /* Setup a shared wksp object for fec sets. */
     512             : 
     513           0 :   ulong shred_depth = 65536UL; /* from fdctl/topology.c shred_store link. MAKE SURE TO KEEP IN SYNC. */
     514           0 :   ulong fec_set_cnt = shred_depth + config->tiles.shred.max_pending_shred_sets + 4UL;
     515           0 :   ulong fec_sets_sz = fec_set_cnt*sizeof(fd_shred34_t)*4; /* mirrors # of dcache entires in frankendancer */
     516           0 :   fd_topo_obj_t * fec_sets_obj = setup_topo_fec_sets( topo, "fec_sets", shred_tile_cnt*fec_sets_sz );
     517           0 :   for( ulong i=0UL; i<shred_tile_cnt; i++ ) {
     518           0 :     fd_topo_tile_t * shred_tile = &topo->tiles[ fd_topo_find_tile( topo, "shred", i ) ];
     519           0 :     fd_topob_tile_uses( topo, shred_tile,  fec_sets_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     520           0 :   }
     521           0 :   fd_topob_tile_uses( topo, repair_tile, fec_sets_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     522           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, fec_sets_obj->id, "fec_sets" ) );
     523             : 
     524             :   /* Setup a shared wksp object for runtime pub. */
     525             : 
     526           0 :   fd_topo_obj_t * runtime_pub_obj = setup_topo_runtime_pub( topo, "runtime_pub", config->firedancer.runtime.heap_size_gib<<30 );
     527             : 
     528           0 :   fd_topob_tile_uses( topo, replay_tile, runtime_pub_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     529           0 :   fd_topob_tile_uses( topo, pack_tile,   runtime_pub_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     530           0 :   FOR(exec_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], runtime_pub_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     531           0 :   FOR(writer_tile_cnt) fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "writer", i ) ], runtime_pub_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     532           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, runtime_pub_obj->id, "runtime_pub" ) );
     533             : 
     534             :   /* Create a txncache to be used by replay. */
     535           0 :   fd_topo_obj_t * txncache_obj = setup_topo_txncache( topo, "tcache",
     536           0 :       config->firedancer.runtime.limits.max_rooted_slots,
     537           0 :       config->firedancer.runtime.limits.max_live_slots,
     538           0 :       config->firedancer.runtime.limits.max_transactions_per_slot,
     539           0 :       fd_txncache_max_constipated_slots_est( config->firedancer.runtime.limits.snapshot_grace_period_seconds ) );
     540           0 :   fd_topob_tile_uses( topo, replay_tile, txncache_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     541           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, txncache_obj->id, "txncache" ) );
     542             : 
     543           0 :   for( ulong i=0UL; i<bank_tile_cnt; i++ ) {
     544           0 :     fd_topo_obj_t * busy_obj = fd_topob_obj( topo, "fseq", "bank_busy" );
     545           0 :     fd_topob_tile_uses( topo, replay_tile, busy_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     546           0 :     fd_topob_tile_uses( topo, pack_tile, busy_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     547           0 :     FD_TEST( fd_pod_insertf_ulong( topo->props, busy_obj->id, "bank_busy.%lu", i ) );
     548           0 :   }
     549             : 
     550           0 :   for( ulong i=0UL; i<exec_tile_cnt; i++ ) {
     551           0 :     fd_topo_obj_t * exec_spad_obj = fd_topob_obj( topo, "exec_spad", "exec_spad" );
     552           0 :     fd_topob_tile_uses( topo, replay_tile, exec_spad_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     553           0 :     fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], exec_spad_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     554           0 :     for( ulong j=0UL; j<writer_tile_cnt; j++ ) {
     555             :       /* For txn_ctx. */
     556           0 :       fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "writer", j ) ], exec_spad_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     557           0 :     }
     558           0 :     FD_TEST( fd_pod_insertf_ulong( topo->props, exec_spad_obj->id, "exec_spad.%lu", i ) );
     559           0 :   }
     560             : 
     561           0 :   for( ulong i=0UL; i<exec_tile_cnt; i++ ) {
     562           0 :     fd_topo_obj_t * exec_fseq_obj = fd_topob_obj( topo, "fseq", "exec_fseq" );
     563           0 :     fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "exec", i ) ], exec_fseq_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     564           0 :     fd_topob_tile_uses( topo, replay_tile, exec_fseq_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     565           0 :     FD_TEST( fd_pod_insertf_ulong( topo->props, exec_fseq_obj->id, "exec_fseq.%lu", i ) );
     566           0 :   }
     567             : 
     568           0 :   for( ulong i=0UL; i<writer_tile_cnt; i++ ) {
     569           0 :     fd_topo_obj_t * writer_fseq_obj = fd_topob_obj( topo, "fseq", "writer_fseq" );
     570           0 :     fd_topob_tile_uses( topo, &topo->tiles[ fd_topo_find_tile( topo, "writer", i ) ], writer_fseq_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     571           0 :     fd_topob_tile_uses( topo, replay_tile, writer_fseq_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     572           0 :     FD_TEST( fd_pod_insertf_ulong( topo->props, writer_fseq_obj->id, "writer_fseq.%lu", i ) );
     573           0 :   }
     574             : 
     575             :   /* There's another special fseq that's used to communicate the shred
     576             :     version from the Agave boot path to the shred tile. */
     577           0 :   fd_topo_obj_t * poh_shred_obj = fd_topob_obj( topo, "fseq", "poh_shred" );
     578           0 :   fd_topo_tile_t * poh_tile = &topo->tiles[ fd_topo_find_tile( topo, "gossip", 0UL ) ];
     579           0 :   fd_topob_tile_uses( topo, poh_tile, poh_shred_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     580             : 
     581             :   /* root_slot is an fseq marking the validator's current Tower root. */
     582             : 
     583           0 :   fd_topo_obj_t * root_slot_obj = fd_topob_obj( topo, "fseq", "slot_fseqs" );
     584           0 :   fd_topob_tile_uses( topo, replay_tile, root_slot_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     585           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, root_slot_obj->id, "root_slot" ) );
     586             : 
     587             :   /* turbine_slot0 is an fseq marking the slot number of the first shred
     588             :      we observed from Turbine.  This is a useful heuristic for
     589             :      determining when replay has progressed past the slot in which we
     590             :      last voted.  The idea is once replay has proceeded past the slot
     591             :      from which validator stopped replaying and therefore also stopped
     592             :      voting (crashed, shutdown, etc.), it will have "read-back" its
     593             :      latest tower in the ledger.  Note this logic is not true in the
     594             :      case our latest tower vote was for a minority fork. */
     595             : 
     596           0 :   fd_topo_obj_t * turbine_slot0_obj = fd_topob_obj( topo, "fseq", "slot_fseqs" );
     597           0 :   fd_topob_tile_uses( topo, repair_tile, turbine_slot0_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     598           0 :   fd_topob_tile_uses( topo, replay_tile, turbine_slot0_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     599           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, turbine_slot0_obj->id, "turbine_slot0" ) );
     600             : 
     601             :   /* turbine_slot is an fseq marking the highest slot we've observed on
     602             :      a shred.  This is continuously updated as the validator is running
     603             :      and is used to determine whether the validator is caught up with
     604             :      the rest of the cluster. */
     605             : 
     606           0 :   fd_topo_obj_t * turbine_slot_obj = fd_topob_obj( topo, "fseq", "slot_fseqs" );
     607           0 :   fd_topob_tile_uses( topo, repair_tile, turbine_slot_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     608           0 :   fd_topob_tile_uses( topo, replay_tile, turbine_slot_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     609           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, turbine_slot_obj->id, "turbine_slot" ) );
     610             : 
     611           0 :   for( ulong i=0UL; i<shred_tile_cnt; i++ ) {
     612           0 :     fd_topo_tile_t * shred_tile = &topo->tiles[ fd_topo_find_tile( topo, "shred", i ) ];
     613           0 :     fd_topob_tile_uses( topo, shred_tile, poh_shred_obj, FD_SHMEM_JOIN_MODE_READ_ONLY );
     614           0 :   }
     615           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, poh_shred_obj->id, "poh_shred" ) );
     616             : 
     617           0 :   fd_topo_obj_t * constipated_obj = fd_topob_obj( topo, "fseq", "constipate" );
     618           0 :   fd_topob_tile_uses( topo, replay_tile, constipated_obj, FD_SHMEM_JOIN_MODE_READ_WRITE );
     619           0 :   FD_TEST( fd_pod_insertf_ulong( topo->props, constipated_obj->id, "constipate" ) );
     620             : 
     621           0 :   if( FD_LIKELY( !is_auto_affinity ) ) {
     622           0 :     if( FD_UNLIKELY( affinity_tile_cnt<topo->tile_cnt ) )
     623           0 :       FD_LOG_ERR(( "The topology you are using has %lu tiles, but the CPU affinity specified in the config tile as [layout.affinity] only provides for %lu cores. "
     624           0 :                   "You should either increase the number of cores dedicated to Firedancer in the affinity string, or decrease the number of cores needed by reducing "
     625           0 :                   "the total tile count. You can reduce the tile count by decreasing individual tile counts in the [layout] section of the configuration file.",
     626           0 :                   topo->tile_cnt, affinity_tile_cnt ));
     627           0 :     if( FD_UNLIKELY( affinity_tile_cnt>topo->tile_cnt ) )
     628           0 :       FD_LOG_WARNING(( "The topology you are using has %lu tiles, but the CPU affinity specified in the config tile as [layout.affinity] provides for %lu cores. "
     629           0 :                       "Not all cores in the affinity will be used by Firedancer. You may wish to increase the number of tiles in the system by increasing "
     630           0 :                       "individual tile counts in the [layout] section of the configuration file.",
     631           0 :                       topo->tile_cnt, affinity_tile_cnt ));
     632           0 :   }
     633             : 
     634             :   /*                                      topo, tile_name, tile_kind_id, fseq_wksp,   link_name,      link_kind_id, reliable,            polled */
     635           0 :   for( ulong j=0UL; j<shred_tile_cnt; j++ )
     636           0 :                   fd_topos_tile_in_net(  topo,                          "metric_in", "shred_net",    j,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     637           0 :   for( ulong j=0UL; j<quic_tile_cnt; j++ )
     638           0 :                   fd_topos_tile_in_net(  topo,                          "metric_in", "quic_net",     j,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     639           0 :   FOR(quic_tile_cnt) for( ulong j=0UL; j<net_tile_cnt; j++ )
     640           0 :                       fd_topob_tile_in(  topo, "quic",    i,            "metric_in", "net_quic",     j,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     641           0 :   FOR(quic_tile_cnt)   fd_topob_tile_out( topo, "quic",    i,                         "quic_verify",  i                                                  );
     642           0 :   FOR(quic_tile_cnt)   fd_topob_tile_out( topo, "quic",    i,                         "quic_net",     i                                                  );
     643             :   /* All verify tiles read from all QUIC tiles, packets are round robin. */
     644           0 :   FOR(verify_tile_cnt) for( ulong j=0UL; j<quic_tile_cnt; j++ )
     645           0 :                       fd_topob_tile_in(  topo, "verify",  i,            "metric_in", "quic_verify",   j,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers, verify tiles may be overrun */
     646           0 :   FOR(verify_tile_cnt) fd_topob_tile_out( topo, "verify",  i,                         "verify_dedup", i                                                  );
     647           0 :   FOR(verify_tile_cnt) fd_topob_tile_in(  topo, "verify",  i,            "metric_in", "gossip_verif", 0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     648           0 :   /**/                 fd_topob_tile_in(  topo, "gossip",  0UL,          "metric_in", "send_txns",    0UL,          FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     649           0 :   /**/                 fd_topob_tile_in(  topo, "verify",  0UL,          "metric_in", "send_txns",    0UL,          FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     650           0 :   FOR(verify_tile_cnt) fd_topob_tile_in(  topo, "dedup",   0UL,          "metric_in", "verify_dedup", i,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     651           0 :   /**/                 fd_topob_tile_out( topo, "dedup",   0UL,                       "dedup_pack",   0UL                                                );
     652             : //  FOR(resolv_tile_cnt) fd_topob_tile_in(  topo, "resolv",  i,            "metric_in", "dedup_resolv", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     653             : //  FOR(resolv_tile_cnt) fd_topob_tile_in(  topo, "resolv",  i,            "metric_in", "replay_resol", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     654           0 :   FOR(resolv_tile_cnt) fd_topob_tile_out( topo, "resolv",  i,                         "resolv_pack",  i                                                  );
     655           0 :   /**/                 fd_topob_tile_in(  topo, "pack",    0UL,          "metric_in", "resolv_pack",  0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     656             : 
     657           0 :   /**/             fd_topos_tile_in_net(  topo,                          "metric_in", "gossip_net",   0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     658           0 :   /**/             fd_topos_tile_in_net(  topo,                          "metric_in", "repair_net",   0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     659           0 :   /**/             fd_topos_tile_in_net(  topo,                          "metric_in", "send_net",     0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     660             : 
     661           0 :   FOR(shred_tile_cnt) for( ulong j=0UL; j<net_tile_cnt; j++ )
     662           0 :                       fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "net_shred",     j,            FD_TOPOB_UNRELIABLE,   FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     663           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "poh_shred",     0UL,          FD_TOPOB_RELIABLE,     FD_TOPOB_POLLED );
     664           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "stake_out",     0UL,          FD_TOPOB_RELIABLE,     FD_TOPOB_POLLED );
     665           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "crds_shred",    0UL,          FD_TOPOB_RELIABLE,     FD_TOPOB_POLLED );
     666           0 :   FOR(shred_tile_cnt)  fd_topob_tile_out( topo, "shred",  i,                          "shred_repair",  i                                                    );
     667           0 :   FOR(shred_tile_cnt)  fd_topob_tile_out( topo, "shred",  i,                          "shred_net",     i                                                    );
     668             : 
     669           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "shred",  i,             "metric_in",  "repair_shred", i,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     670             : 
     671             :   /**/                 fd_topob_tile_out( topo, "repair",  0UL,                       "repair_net",    0UL                                                  );
     672             : 
     673           0 :   /**/                 fd_topob_tile_in(  topo, "tower",   0UL,          "metric_in", "gossip_tower", 0UL,           FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     674           0 :   /**/                 fd_topob_tile_in(  topo, "tower",   0UL,          "metric_in", "replay_tower", 0UL,           FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     675             : 
     676             :   /**/                 fd_topob_tile_out( topo, "tower",  0UL,                        "tower_replay", 0UL                                                   );
     677           0 :   /**/                 fd_topob_tile_out( topo, "tower",  0UL,                        "tower_send",   0UL                                                   );
     678             : 
     679             :   /* Sign links don't need to be reliable because they are synchronous,
     680             :     so there's at most one fragment in flight at a time anyway.  The
     681             :     sign links are also not polled by the mux, instead the tiles will
     682             :     read the sign responses out of band in a dedicated spin loop. */
     683           0 :   for( ulong i=0UL; i<shred_tile_cnt; i++ ) {
     684           0 :     /**/               fd_topob_tile_in(  topo, "sign",   0UL,           "metric_in", "shred_sign",    i,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     685           0 :     /**/               fd_topob_tile_out( topo, "shred",  i,                          "shred_sign",    i                                                    );
     686           0 :     /**/               fd_topob_tile_in(  topo, "shred",  i,             "metric_in", "sign_shred",    i,            FD_TOPOB_UNRELIABLE, FD_TOPOB_UNPOLLED );
     687           0 :     /**/               fd_topob_tile_out( topo, "sign",   0UL,                        "sign_shred",    i                                                    );
     688           0 :   }
     689             : 
     690           0 :   FOR(net_tile_cnt)    fd_topob_tile_in(  topo, "gossip",   0UL,          "metric_in", "net_gossip",   i,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     691           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_net",   0UL                                                  );
     692           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "crds_shred",   0UL                                                  );
     693           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_repai", 0UL                                                  );
     694           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_verif", 0UL                                                  );
     695           0 :   /**/                 fd_topob_tile_in(  topo, "sign",     0UL,          "metric_in", "gossip_sign",  0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     696           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_sign",  0UL                                                  );
     697           0 :   /**/                 fd_topob_tile_in(  topo, "gossip",   0UL,          "metric_in", "sign_gossip",  0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_UNPOLLED );
     698           0 :   /**/                 fd_topob_tile_out( topo, "sign",     0UL,                       "sign_gossip",  0UL                                                  );
     699           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_send",  0UL                                                  );
     700           0 :   /**/                 fd_topob_tile_out( topo, "gossip",   0UL,                       "gossip_tower", 0UL                                                  );
     701             : 
     702           0 :   FOR(net_tile_cnt)    fd_topob_tile_in(  topo, "repair",  0UL,          "metric_in", "net_repair",    i,            FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     703           0 :   /**/                 fd_topob_tile_in(  topo, "repair",  0UL,          "metric_in", "gossip_repai",  0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     704           0 :   /**/                 fd_topob_tile_in(  topo, "repair",  0UL,          "metric_in", "stake_out",     0UL,          FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     705           0 :   FOR(shred_tile_cnt)  fd_topob_tile_in(  topo, "repair",  0UL,          "metric_in", "shred_repair",  i,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     706             : 
     707           0 :   /**/                 fd_topob_tile_in(  topo, "replay",  0UL,          "metric_in", "repair_repla",  0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED  );
     708           0 :   /**/                 fd_topob_tile_out( topo, "replay",  0UL,                       "stake_out",     0UL                                                  );
     709           0 :   /**/                 fd_topob_tile_in(  topo, "replay",  0UL,          "metric_in", "pack_replay",   0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     710           0 :   /**/                 fd_topob_tile_in(  topo, "replay",  0UL,          "metric_in", "tower_replay",  0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     711           0 :   /**/                 fd_topob_tile_out( topo, "replay",  0UL,                       "replay_tower",  0UL                                                  );
     712           0 :   FOR(bank_tile_cnt)   fd_topob_tile_out( topo, "replay",  0UL,                       "replay_poh",    i                                                    );
     713           0 :   FOR(exec_tile_cnt)   fd_topob_tile_out( topo, "replay",  0UL,                       "replay_exec",   i                                                    ); /* TODO check order in fd_replay.c macros*/
     714             : 
     715           0 :   FOR(exec_tile_cnt)   fd_topob_tile_in(  topo, "exec",    i,            "metric_in", "replay_exec",  i,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED    );
     716           0 :   FOR(exec_tile_cnt)   fd_topob_tile_out( topo, "exec",    i,                         "exec_writer",  i                                                     );
     717             :   /* All writer tiles read from all exec tiles.  Each exec tile has a
     718             :      single out link, over which all the writer tiles round-robin. */
     719           0 :   FOR(writer_tile_cnt) for( ulong j=0UL; j<exec_tile_cnt; j++ )
     720           0 :                        fd_topob_tile_in(  topo, "writer",  i,            "metric_in", "exec_writer",  j,            FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED    );
     721             : 
     722           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "stake_out",     0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     723           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "gossip_send",   0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     724           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "tower_send",    0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     725           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "net_send",      0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     726           0 :   /**/                 fd_topob_tile_out( topo, "send",   0UL,                      "send_net",      0UL                                            );
     727           0 :   /**/                 fd_topob_tile_out( topo, "send",   0UL,                      "send_txns",     0UL                                            );
     728           0 :   /**/                 fd_topob_tile_out( topo, "send",   0UL,                      "send_sign",     0UL                                            );
     729           0 :   /**/                 fd_topob_tile_in ( topo, "sign",   0UL,         "metric_in", "send_sign",     0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     730           0 :   /**/                 fd_topob_tile_out( topo, "sign",   0UL,                      "sign_send",     0UL                                            );
     731           0 :   /**/                 fd_topob_tile_in ( topo, "send",   0UL,         "metric_in", "sign_send",     0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_UNPOLLED );
     732             : 
     733           0 :   /**/                 fd_topob_tile_in ( topo, "pack",   0UL,         "metric_in",  "dedup_pack",   0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     734           0 :   /**/                 fd_topob_tile_in ( topo, "pack",   0UL,         "metric_in",  "poh_pack",     0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     735           0 :   /**/                 fd_topob_tile_out( topo, "pack",   0UL,                       "pack_replay",  0UL                                            );
     736           0 :   FOR(bank_tile_cnt)   fd_topob_tile_in ( topo, "poh",    0UL,         "metric_in",  "replay_poh",   i,      FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     737           0 :   /**/                 fd_topob_tile_in ( topo, "poh",    0UL,         "metric_in",  "stake_out",    0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   ); /* No reliable consumers of networking fragments, may be dropped or overrun */
     738           0 :   /**/                 fd_topob_tile_out( topo, "poh",    0UL,                       "poh_shred",    0UL                                            );
     739             : 
     740           0 :   /**/                 fd_topob_tile_in(  topo, "poh",    0UL,         "metric_in",  "pack_replay",  0UL,    FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED   );
     741           0 :                        fd_topob_tile_out( topo, "poh",    0UL,                       "poh_pack",     0UL                                            );
     742             : 
     743           0 :   /**/                 fd_topob_tile_in(  topo, "sign",   0UL,         "metric_in",  "repair_sign",  0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     744           0 :   /**/                 fd_topob_tile_out( topo, "repair", 0UL,                       "repair_sign",  0UL                                            );
     745           0 :   /**/                 fd_topob_tile_in(  topo, "repair", 0UL,         "metric_in",  "sign_repair",  0UL,    FD_TOPOB_UNRELIABLE, FD_TOPOB_UNPOLLED );
     746           0 :   /**/                 fd_topob_tile_out( topo, "repair", 0UL,                       "repair_repla", 0UL                                            );
     747           0 :   FOR(shred_tile_cnt)  fd_topob_tile_out( topo, "repair", 0UL,                       "repair_shred", i                                              );
     748           0 :   /**/                 fd_topob_tile_out( topo, "sign",   0UL,                       "sign_repair",  0UL                                            );
     749             : 
     750           0 :   if( config->tiles.archiver.enabled ) {
     751           0 :     fd_topob_wksp( topo, "arch_f" );
     752           0 :     fd_topob_wksp( topo, "arch_w" );
     753           0 :     /**/ fd_topob_tile( topo, "arch_f", "arch_f", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     754           0 :     /**/ fd_topob_tile( topo, "arch_w", "arch_w", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     755             : 
     756           0 :     fd_topob_wksp( topo, "feeder" );
     757           0 :     fd_topob_link( topo, "feeder", "feeder", 65536UL, 4UL*FD_SHRED_STORE_MTU, 4UL+config->tiles.shred.max_pending_shred_sets );
     758           0 :     /**/ fd_topob_tile_out( topo, "replay", 0UL, "feeder", 0UL );
     759           0 :     /**/ fd_topob_tile_in(  topo, "arch_f", 0UL, "metric_in", "feeder", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     760             : 
     761           0 :     fd_topob_wksp( topo, "arch_f2w" );
     762           0 :     fd_topob_link( topo, "arch_f2w", "arch_f2w", 128UL, 4UL*FD_SHRED_STORE_MTU, 1UL );
     763           0 :     /**/ fd_topob_tile_out( topo, "arch_f", 0UL, "arch_f2w", 0UL );
     764           0 :     /**/ fd_topob_tile_in( topo, "arch_w", 0UL, "metric_in", "arch_f2w", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
     765           0 :   }
     766             : 
     767           0 :   if( config->tiles.shredcap.enabled ) {
     768           0 :     fd_topob_wksp( topo, "shredcap" );
     769           0 :     fd_topob_tile( topo, "shrdcp", "shredcap", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     770           0 :     fd_topob_tile_in(  topo, "shrdcp", 0UL, "metric_in", "repair_net", 0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     771           0 :     for( ulong j=0UL; j<net_tile_cnt; j++ ) {
     772           0 :       fd_topob_tile_in(  topo, "shrdcp", 0UL, "metric_in", "net_shred", j, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     773           0 :     }
     774           0 :     for( ulong j=0UL; j<shred_tile_cnt; j++ ) {
     775           0 :       fd_topob_tile_in(  topo, "shrdcp", 0UL, "metric_in", "shred_repair", j, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     776           0 :     }
     777           0 :     fd_topob_tile_in( topo, "shrdcp", 0UL, "metric_in", "crds_shred", 0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     778           0 :     fd_topob_tile_in( topo, "shrdcp", 0UL, "metric_in", "gossip_repai", 0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED );
     779           0 :   }
     780             : 
     781           0 :   fd_topob_wksp( topo, "replay_notif" );
     782             :   /* We may be notifying an external service, so always publish on this link. */
     783           0 :   /**/ fd_topob_link( topo, "replay_notif", "replay_notif", FD_REPLAY_NOTIF_DEPTH, FD_REPLAY_NOTIF_MTU, 1UL )->permit_no_consumers = 1;
     784           0 :   /**/ fd_topob_tile_out( topo, "replay",  0UL, "replay_notif", 0UL );
     785             : 
     786           0 :   if( enable_rpc ) {
     787           0 :     fd_topob_tile_in(  topo, "rpcsrv", 0UL, "metric_in",  "replay_notif", 0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     788           0 :     fd_topob_tile_in(  topo, "rpcsrv", 0UL, "metric_in",  "stake_out",    0UL, FD_TOPOB_UNRELIABLE, FD_TOPOB_POLLED   );
     789           0 :   }
     790             : 
     791             :   /* For now the only plugin consumer is the GUI */
     792           0 :   int plugins_enabled = config->tiles.gui.enabled;
     793           0 :   if( FD_LIKELY( plugins_enabled ) ) {
     794           0 :     fd_topob_wksp( topo, "plugin_in"    );
     795           0 :     fd_topob_wksp( topo, "plugin_out"   );
     796           0 :     fd_topob_wksp( topo, "plugin"       );
     797             : 
     798             :     /**/                 fd_topob_link( topo, "plugin_out",   "plugin_out",   128UL,                                    8UL+40200UL*(58UL+12UL*34UL), 1UL );
     799           0 :     /**/                 fd_topob_link( topo, "replay_plugi", "plugin_in",    128UL,                                    4098*8UL,               1UL );
     800           0 :     /**/                 fd_topob_link( topo, "gossip_plugi", "plugin_in",    128UL,                                    8UL+40200UL*(58UL+12UL*34UL), 1UL );
     801           0 :     /**/                 fd_topob_link( topo, "votes_plugin", "plugin_in",    128UL,                                    8UL+40200UL*(58UL+12UL*34UL), 1UL );
     802             : 
     803             :     /**/                 fd_topob_tile( topo, "plugin",  "plugin",  "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0, 0 );
     804             : 
     805             :     /**/                 fd_topob_tile_out( topo, "replay", 0UL,                        "replay_plugi", 0UL                                                  );
     806           0 :     /**/                 fd_topob_tile_out( topo, "replay", 0UL,                        "votes_plugin", 0UL                                                  );
     807           0 :     /**/                 fd_topob_tile_out( topo, "gossip", 0UL,                        "gossip_plugi", 0UL                                                  );
     808           0 :     /**/                 fd_topob_tile_out( topo, "plugin", 0UL,                        "plugin_out", 0UL                                                    );
     809             : 
     810           0 :     /**/                 fd_topob_tile_in(  topo, "plugin", 0UL,           "metric_in", "replay_plugi", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     811           0 :     /**/                 fd_topob_tile_in(  topo, "plugin", 0UL,           "metric_in", "gossip_plugi", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     812           0 :     /**/                 fd_topob_tile_in(  topo, "plugin", 0UL,           "metric_in", "stake_out",    0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     813           0 :     /**/                 fd_topob_tile_in(  topo, "plugin", 0UL,           "metric_in", "votes_plugin", 0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     814           0 :   }
     815             : 
     816           0 :   if( FD_LIKELY( config->tiles.gui.enabled ) ) {
     817           0 :     fd_topob_wksp( topo, "gui"          );
     818           0 :     /**/                 fd_topob_tile(     topo, "gui",     "gui",     "metric_in",  tile_to_cpu[ topo->tile_cnt ], 0, 1 );
     819           0 :     /**/                 fd_topob_tile_in(  topo, "gui",    0UL,        "metric_in",     "plugin_out",   0UL,          FD_TOPOB_RELIABLE,   FD_TOPOB_POLLED );
     820           0 :   }
     821             : 
     822           0 :   FOR(net_tile_cnt) fd_topos_net_tile_finish( topo, i );
     823             : 
     824           0 :   for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
     825           0 :     fd_topo_tile_t * tile = &topo->tiles[ i ];
     826           0 :     if( !fd_topo_configure_tile( tile, config ) ) {
     827           0 :       FD_LOG_ERR(( "unknown tile name %lu `%s`", i, tile->name ));
     828           0 :     }
     829           0 :   }
     830             : 
     831           0 :   if( FD_UNLIKELY( is_auto_affinity ) ) fd_topob_auto_layout( topo, 0 );
     832             : 
     833           0 :   fd_topob_finish( topo, CALLBACKS );
     834           0 :   FD_TEST( blockstore_obj->id );
     835             : 
     836           0 :   const char * status_cache = config->tiles.replay.status_cache;
     837           0 :   if ( strlen( status_cache ) > 0 ) {
     838             :     /* Make the status cache workspace match the parameters used to create the
     839             :       checkpoint. This is a bit nonintuitive because of the way
     840             :       fd_topo_create_workspace works. */
     841           0 :     fd_wksp_preview_t preview[1];
     842           0 :     int err = fd_wksp_preview( status_cache, preview );
     843           0 :     if( FD_UNLIKELY( err ) ) FD_LOG_ERR(( "unable to preview %s: error %d", status_cache, err ));
     844           0 :     fd_topo_wksp_t * wksp = &topo->workspaces[ topo->objs[ txncache_obj->id ].wksp_id ];
     845           0 :     wksp->part_max = preview->part_max;
     846           0 :     wksp->known_footprint = 0;
     847           0 :     wksp->total_footprint = preview->data_max;
     848           0 :     ulong page_sz = FD_SHMEM_GIGANTIC_PAGE_SZ;
     849           0 :     wksp->page_sz = page_sz;
     850           0 :     ulong footprint = fd_wksp_footprint( preview->part_max, preview->data_max );
     851           0 :     wksp->page_cnt = footprint / page_sz;
     852           0 :   }
     853             : 
     854           0 :   config->topo = *topo;
     855           0 : }
     856             : 
     857             : int
     858             : fd_topo_configure_tile( fd_topo_tile_t * tile,
     859           0 :                         fd_config_t *    config ) {
     860           0 :     if( FD_UNLIKELY( !strcmp( tile->name, "net" ) || !strcmp( tile->name, "sock" ) ) ) {
     861             : 
     862           0 :       tile->net.shred_listen_port              = config->tiles.shred.shred_listen_port;
     863           0 :       tile->net.quic_transaction_listen_port   = config->tiles.quic.quic_transaction_listen_port;
     864           0 :       tile->net.legacy_transaction_listen_port = config->tiles.quic.regular_transaction_listen_port;
     865           0 :       tile->net.gossip_listen_port             = config->gossip.port;
     866           0 :       tile->net.repair_intake_listen_port      = config->tiles.repair.repair_intake_listen_port;
     867           0 :       tile->net.repair_serve_listen_port       = config->tiles.repair.repair_serve_listen_port;
     868           0 :       tile->net.send_src_port                  = config->tiles.send.send_src_port;
     869             : 
     870           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "netlnk" ) ) ) {
     871             : 
     872             :       /* already configured */
     873             : 
     874           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "quic" ) ) ) {
     875             : 
     876           0 :       tile->quic.reasm_cnt                      = config->tiles.quic.txn_reassembly_count;
     877           0 :       tile->quic.out_depth                      = config->tiles.verify.receive_buffer_size;
     878           0 :       tile->quic.max_concurrent_connections     = config->tiles.quic.max_concurrent_connections;
     879           0 :       tile->quic.max_concurrent_handshakes      = config->tiles.quic.max_concurrent_handshakes;
     880           0 :       tile->quic.quic_transaction_listen_port   = config->tiles.quic.quic_transaction_listen_port;
     881           0 :       tile->quic.idle_timeout_millis            = config->tiles.quic.idle_timeout_millis;
     882           0 :       tile->quic.ack_delay_millis               = config->tiles.quic.ack_delay_millis;
     883           0 :       tile->quic.retry                          = config->tiles.quic.retry;
     884             : 
     885           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "verify" ) ) ) {
     886           0 :       tile->verify.tcache_depth = config->tiles.verify.signature_cache_size;
     887             : 
     888           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "dedup" ) ) ) {
     889           0 :       tile->dedup.tcache_depth = config->tiles.dedup.signature_cache_size;
     890           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "resolv" ) ) ) {
     891             : 
     892           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "shred" ) ) ) {
     893           0 :       strncpy( tile->shred.identity_key_path, config->paths.identity_key, sizeof(tile->shred.identity_key_path) );
     894             : 
     895           0 :       tile->shred.depth                         = 65536UL;
     896           0 :       tile->shred.fec_resolver_depth            = config->tiles.shred.max_pending_shred_sets;
     897           0 :       tile->shred.expected_shred_version        = config->consensus.expected_shred_version;
     898           0 :       tile->shred.shred_listen_port             = config->tiles.shred.shred_listen_port;
     899           0 :       tile->shred.larger_shred_limits_per_block = config->development.bench.larger_shred_limits_per_block;
     900             : 
     901           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "gossip" ) ) ) {
     902           0 :       tile->gossip.ip_addr = config->net.ip_addr;
     903           0 :       strncpy( tile->gossip.identity_key_path, config->paths.identity_key, sizeof(tile->gossip.identity_key_path) );
     904           0 :       tile->gossip.gossip_listen_port =  config->gossip.port;
     905           0 :       tile->gossip.tvu_port = config->tiles.shred.shred_listen_port;
     906           0 :       if( FD_UNLIKELY( tile->gossip.tvu_port>(ushort)(USHORT_MAX-6) ) )
     907           0 :         FD_LOG_ERR(( "shred_listen_port in the config must not be greater than %hu", (ushort)(USHORT_MAX-6) ));
     908           0 :       tile->gossip.expected_shred_version = config->consensus.expected_shred_version;
     909           0 :       tile->gossip.tpu_port             = config->tiles.quic.regular_transaction_listen_port;
     910           0 :       tile->gossip.tpu_quic_port        = config->tiles.quic.quic_transaction_listen_port;
     911           0 :       tile->gossip.tpu_vote_port        = config->tiles.quic.regular_transaction_listen_port; /* TODO: support separate port for tpu vote */
     912           0 :       tile->gossip.repair_serve_port    = config->tiles.repair.repair_serve_listen_port;
     913           0 :       tile->gossip.entrypoints_cnt      = fd_ulong_min( config->gossip.resolved_entrypoints_cnt, FD_TOPO_GOSSIP_ENTRYPOINTS_MAX );
     914           0 :       fd_memcpy( tile->gossip.entrypoints, config->gossip.resolved_entrypoints, tile->gossip.entrypoints_cnt * sizeof(fd_ip4_port_t) );
     915             : 
     916           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "repair" ) ) ) {
     917           0 :       tile->repair.max_pending_shred_sets    = config->tiles.shred.max_pending_shred_sets;
     918           0 :       tile->repair.repair_intake_listen_port = config->tiles.repair.repair_intake_listen_port;
     919           0 :       tile->repair.repair_serve_listen_port  = config->tiles.repair.repair_serve_listen_port;
     920           0 :       tile->repair.slot_max                  = config->tiles.repair.slot_max;
     921           0 :       strncpy( tile->repair.good_peer_cache_file, config->tiles.repair.good_peer_cache_file, sizeof(tile->repair.good_peer_cache_file) );
     922             : 
     923           0 :       strncpy( tile->repair.identity_key_path, config->paths.identity_key, sizeof(tile->repair.identity_key_path) );
     924             : 
     925           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "replay" ) )) {
     926             : 
     927           0 :       tile->replay.fec_max = config->tiles.shred.max_pending_shred_sets;
     928           0 :       tile->replay.max_vote_accounts = config->firedancer.runtime.limits.max_vote_accounts;
     929             : 
     930             :       /* specified by [tiles.replay] */
     931             : 
     932           0 :       strncpy( tile->replay.blockstore_file,    config->firedancer.blockstore.file,    sizeof(tile->replay.blockstore_file) );
     933           0 :       strncpy( tile->replay.blockstore_checkpt, config->firedancer.blockstore.checkpt, sizeof(tile->replay.blockstore_checkpt) );
     934             : 
     935           0 :       tile->replay.tx_metadata_storage = config->rpc.extended_tx_metadata_storage;
     936           0 :       strncpy( tile->replay.funk_checkpt, config->tiles.replay.funk_checkpt, sizeof(tile->replay.funk_checkpt) );
     937             : 
     938           0 :       tile->replay.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
     939           0 :       tile->replay.plugins_enabled = fd_topo_find_tile( &config->topo, "plugin", 0UL ) != ULONG_MAX;
     940             : 
     941           0 :       if( FD_UNLIKELY( !strncmp( config->tiles.replay.genesis,  "", 1 )
     942           0 :                     && !strncmp( config->tiles.replay.snapshot, "", 1 ) ) ) {
     943           0 :         fd_cstr_printf_check( config->tiles.replay.genesis, PATH_MAX, NULL, "%s/genesis.bin", config->paths.ledger );
     944           0 :       }
     945           0 :       strncpy( tile->replay.genesis, config->tiles.replay.genesis, sizeof(tile->replay.genesis) );
     946             : 
     947           0 :       setup_snapshots( config, tile );
     948             : 
     949           0 :       strncpy( tile->replay.slots_replayed, config->tiles.replay.slots_replayed, sizeof(tile->replay.slots_replayed) );
     950           0 :       strncpy( tile->replay.status_cache, config->tiles.replay.status_cache, sizeof(tile->replay.status_cache) );
     951           0 :       strncpy( tile->replay.cluster_version, config->tiles.replay.cluster_version, sizeof(tile->replay.cluster_version) );
     952           0 :       strncpy( tile->replay.tower_checkpt, config->tiles.replay.tower_checkpt, sizeof(tile->replay.tower_checkpt) );
     953             : 
     954             :       /* not specified by [tiles.replay] */
     955             : 
     956           0 :       strncpy( tile->replay.identity_key_path, config->paths.identity_key, sizeof(tile->replay.identity_key_path) );
     957           0 :       tile->replay.ip_addr = config->net.ip_addr;
     958           0 :       strncpy( tile->replay.vote_account_path, config->paths.vote_account, sizeof(tile->replay.vote_account_path) );
     959           0 :       tile->replay.enable_bank_hash_cmp = 1;
     960             : 
     961           0 :       tile->replay.capture_start_slot = config->capture.capture_start_slot;
     962           0 :       strncpy( tile->replay.solcap_capture, config->capture.solcap_capture, sizeof(tile->replay.solcap_capture) );
     963           0 :       strncpy( tile->replay.dump_proto_dir, config->capture.dump_proto_dir, sizeof(tile->replay.dump_proto_dir) );
     964           0 :       tile->replay.dump_block_to_pb = config->capture.dump_block_to_pb;
     965             : 
     966           0 :       FD_TEST( tile->replay.funk_obj_id == fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX ) );
     967             : 
     968           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "sign" ) ) ) {
     969           0 :       strncpy( tile->sign.identity_key_path, config->paths.identity_key, sizeof(tile->sign.identity_key_path) );
     970             : 
     971           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "metric" ) ) ) {
     972           0 :       if( FD_UNLIKELY( !fd_cstr_to_ip4_addr( config->tiles.metric.prometheus_listen_address, &tile->metric.prometheus_listen_addr ) ) )
     973           0 :         FD_LOG_ERR(( "failed to parse prometheus listen address `%s`", config->tiles.metric.prometheus_listen_address ));
     974           0 :       tile->metric.prometheus_listen_port = config->tiles.metric.prometheus_listen_port;
     975           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "pack" ) ) ) {
     976           0 :       tile->pack.max_pending_transactions      = config->tiles.pack.max_pending_transactions;
     977           0 :       tile->pack.bank_tile_count               = config->layout.bank_tile_count;
     978           0 :       tile->pack.larger_max_cost_per_block     = config->development.bench.larger_max_cost_per_block;
     979           0 :       tile->pack.larger_shred_limits_per_block = config->development.bench.larger_shred_limits_per_block;
     980           0 :       tile->pack.use_consumed_cus              = config->tiles.pack.use_consumed_cus;
     981           0 :       tile->pack.schedule_strategy             = config->tiles.pack.schedule_strategy_enum;
     982           0 :       if( FD_UNLIKELY( tile->pack.use_consumed_cus ) ) FD_LOG_ERR(( "Firedancer does not support CU rebating yet.  [tiles.pack.use_consumed_cus] must be false" ));
     983           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "poh" ) ) ) {
     984           0 :       strncpy( tile->poh.identity_key_path, config->paths.identity_key, sizeof(tile->poh.identity_key_path) );
     985             : 
     986           0 :       tile->poh.bank_cnt = config->layout.bank_tile_count;
     987           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "send" ) ) ) {
     988           0 :       tile->send.send_src_port = config->tiles.send.send_src_port;
     989           0 :       tile->send.ip_addr = config->net.ip_addr;
     990           0 :       strncpy( tile->send.identity_key_path, config->paths.identity_key, sizeof(tile->send.identity_key_path) );
     991           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "tower" ) ) ) {
     992           0 :       tile->tower.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
     993           0 :       strncpy( tile->tower.identity_key_path, config->paths.identity_key, sizeof(tile->tower.identity_key_path) );
     994           0 :       strncpy( tile->tower.vote_acc_path, config->paths.vote_account, sizeof(tile->tower.vote_acc_path) );
     995           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "rpcsrv" ) ) ) {
     996           0 :       strncpy( tile->replay.blockstore_file, config->firedancer.blockstore.file, sizeof(tile->replay.blockstore_file) );
     997           0 :       tile->rpcserv.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
     998           0 :       tile->rpcserv.rpc_port = config->rpc.port;
     999           0 :       tile->rpcserv.tpu_port = config->tiles.quic.regular_transaction_listen_port;
    1000           0 :       tile->rpcserv.tpu_ip_addr = config->net.ip_addr;
    1001           0 :       tile->rpcserv.block_index_max = config->rpc.block_index_max;
    1002           0 :       tile->rpcserv.txn_index_max = config->rpc.txn_index_max;
    1003           0 :       tile->rpcserv.acct_index_max = config->rpc.acct_index_max;
    1004           0 :       strncpy( tile->rpcserv.history_file, config->rpc.history_file, sizeof(tile->rpcserv.history_file) );
    1005           0 :       strncpy( tile->rpcserv.identity_key_path, config->paths.identity_key, sizeof(tile->rpcserv.identity_key_path) );
    1006           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "gui" ) ) ) {
    1007           0 :       if( FD_UNLIKELY( !fd_cstr_to_ip4_addr( config->tiles.gui.gui_listen_address, &tile->gui.listen_addr ) ) )
    1008           0 :         FD_LOG_ERR(( "failed to parse gui listen address `%s`", config->tiles.gui.gui_listen_address ));
    1009           0 :       tile->gui.listen_port = config->tiles.gui.gui_listen_port;
    1010           0 :       tile->gui.is_voting = strcmp( config->paths.vote_account, "" );
    1011           0 :       strncpy( tile->gui.cluster, config->cluster, sizeof(tile->gui.cluster) );
    1012           0 :       strncpy( tile->gui.identity_key_path, config->paths.identity_key, sizeof(tile->gui.identity_key_path) );
    1013           0 :       tile->gui.max_http_connections      = config->tiles.gui.max_http_connections;
    1014           0 :       tile->gui.max_websocket_connections = config->tiles.gui.max_websocket_connections;
    1015           0 :       tile->gui.max_http_request_length   = config->tiles.gui.max_http_request_length;
    1016           0 :       tile->gui.send_buffer_size_mb       = config->tiles.gui.send_buffer_size_mb;
    1017           0 :       tile->gui.schedule_strategy         = config->tiles.pack.schedule_strategy_enum;
    1018           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "plugin" ) ) ) {
    1019             : 
    1020           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "exec" ) ) ) {
    1021           0 :       tile->exec.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
    1022             : 
    1023           0 :       tile->exec.capture_start_slot = config->capture.capture_start_slot;
    1024           0 :       strncpy( tile->exec.dump_proto_dir, config->capture.dump_proto_dir, sizeof(tile->exec.dump_proto_dir) );
    1025           0 :       tile->exec.dump_instr_to_pb = config->capture.dump_instr_to_pb;
    1026           0 :       tile->exec.dump_txn_to_pb = config->capture.dump_txn_to_pb;
    1027           0 :       tile->exec.dump_syscall_to_pb = config->capture.dump_syscall_to_pb;
    1028           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "writer" ) ) ) {
    1029           0 :       tile->writer.funk_obj_id = fd_pod_query_ulong( config->topo.props, "funk", ULONG_MAX );
    1030           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "arch_f" ) ||
    1031           0 :                             !strcmp( tile->name, "arch_w" ) ) ) {
    1032           0 :       strncpy( tile->archiver.archiver_path, config->tiles.archiver.archiver_path, sizeof(tile->archiver.archiver_path) );
    1033           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "back" ) ) ) {
    1034           0 :         strncpy( tile->archiver.archiver_path, config->tiles.archiver.archiver_path, PATH_MAX );
    1035           0 :         tile->archiver.end_slot = config->tiles.archiver.end_slot;
    1036           0 :         if( FD_UNLIKELY( 0==strlen( tile->archiver.archiver_path ) ) ) {
    1037           0 :           FD_LOG_ERR(( "`archiver.archiver_path` not specified in toml" ));
    1038           0 :         }
    1039           0 :     } else if( FD_UNLIKELY( !strcmp( tile->name, "shrdcp" ) ) ) {
    1040           0 :       tile->shredcap.repair_intake_listen_port = config->tiles.repair.repair_intake_listen_port;
    1041           0 :       strncpy( tile->shredcap.folder_path, config->tiles.shredcap.folder_path, sizeof(tile->shredcap.folder_path) );
    1042           0 :       tile->shredcap.write_buffer_size = config->tiles.shredcap.write_buffer_size;
    1043           0 :     } else {
    1044           0 :       return 0;
    1045           0 :     }
    1046           0 :   return 1;
    1047           0 : }

Generated by: LCOV version 1.14