LCOV - code coverage report
Current view: top level - discof/shredcap - fd_shredcap_tile.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 487 0.0 %
Date: 2025-08-05 05:04:49 Functions: 0 23 0.0 %

          Line data    Source code
       1             : #define _GNU_SOURCE  /* Enable GNU and POSIX extensions */
       2             : #include "../../disco/topo/fd_topo.h"
       3             : #include "../../disco/net/fd_net_tile.h"
       4             : #include "../../flamenco/types/fd_types.h"
       5             : #include "../../flamenco/fd_flamenco_base.h"
       6             : #include "../../util/pod/fd_pod_format.h"
       7             : #include "../../disco/fd_disco.h"
       8             : #include "../../discof/fd_discof.h"
       9             : #include "../../discof/restore/utils/fd_ssmsg.h"
      10             : #include "../../discof/restore/utils/fd_ssmanifest_parser.h"
      11             : #include "../../flamenco/stakes/fd_stakes.h"
      12             : #include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
      13             : #include "../../disco/fd_disco.h"
      14             : #include "../../util/pod/fd_pod_format.h"
      15             : #include "../replay/fd_exec.h"
      16             : 
      17             : #include <errno.h>
      18             : #include <fcntl.h>
      19             : #include <sys/mman.h>
      20             : #include <sys/stat.h>
      21             : #include <string.h>
      22             : #include <stdio.h>
      23             : #include <unistd.h>
      24             : #include <linux/unistd.h>
      25             : #include <sys/socket.h>
      26             : #include <linux/if_xdp.h>
      27             : #include "generated/fd_shredcap_tile_seccomp.h"
      28             : 
      29             : 
      30             : /* This tile currently has two functionalities.
      31             : 
      32             :    The first is spying on the net_shred, repair_net, and shred_repair
      33             :    links and currently outputs to a csv that can analyze repair
      34             :    performance in post.
      35             : 
      36             :    The second is to capture the bank hashes from the replay tile and
      37             :    slices of shreds from the repair tile.  These are outputted to binary
      38             :    files that can be used to reproduce a live replay execution. */
      39             : 
      40           0 : #define FD_SHREDCAP_DEFAULT_WRITER_BUF_SZ  (4096UL)  /* local filesystem block size */
      41           0 : #define FD_SHREDCAP_ALLOC_TAG              (4UL)
      42             : #define MAX_BUFFER_SIZE                    (20000UL * sizeof(fd_shred_dest_wire_t))
      43           0 : #define MANIFEST_MAX_TOTAL_BANKS           (2UL) /* the minimum is 2 */
      44           0 : #define MANIFEST_MAX_FORK_WIDTH            (1UL) /* banks are only needed during publish_stake_weights() */
      45             : 
      46           0 : #define NET_SHRED       (0UL)
      47           0 : #define REPAIR_NET      (1UL)
      48           0 : #define SHRED_REPAIR    (2UL)
      49           0 : #define GOSSIP_SHRED    (3UL)
      50           0 : #define GOSSIP_REPAIR   (4UL)
      51           0 : #define REPAIR_SHREDCAP (5UL)
      52           0 : #define REPLAY_SHREDCAP (6UL)
      53             : 
      54             : typedef union {
      55             :   struct {
      56             :     fd_wksp_t * mem;
      57             :     ulong       chunk0;
      58             :     ulong       wmark;
      59             :   };
      60             :   fd_net_rx_bounds_t net_rx;
      61             : } fd_capture_in_ctx_t;
      62             : 
      63             : struct fd_stake_out_link {
      64             :   ulong       idx;
      65             :   fd_frag_meta_t * mcache;
      66             :   ulong *          sync;
      67             :   ulong            depth;
      68             :   ulong            seq;
      69             :   fd_wksp_t * mem;
      70             :   ulong       chunk0;
      71             :   ulong       wmark;
      72             :   ulong       chunk;
      73             : };
      74             : typedef struct fd_stake_out_link fd_stake_out_link_t;
      75             : 
      76             : struct fd_capture_tile_ctx {
      77             :   uchar               in_kind[ 32 ];
      78             :   fd_capture_in_ctx_t in_links[ 32 ];
      79             : 
      80             :   int skip_frag;
      81             :   ushort repair_intake_listen_port;
      82             : 
      83             :   ulong shred_buffer_sz;
      84             :   uchar shred_buffer[ FD_NET_MTU ];
      85             : 
      86             :   ulong repair_buffer_sz;
      87             :   uchar repair_buffer[ FD_NET_MTU ];
      88             : 
      89             :   fd_stake_out_link_t  stake_out[1];
      90             :   int                  enable_publish_stake_weights;
      91             :   ulong *              manifest_wmark;
      92             :   uchar *              manifest_bank_mem;
      93             :   uchar *              mainfest_exec_slot_ctx_mem;
      94             :   fd_exec_slot_ctx_t * manifest_exec_slot_ctx;
      95             :   char                 manifest_path[ PATH_MAX ];
      96             :   int                  manifest_load_done;
      97             :   uchar *              manifest_spad_mem;
      98             :   fd_spad_t *          manifest_spad;
      99             :   uchar *              shared_spad_mem;
     100             :   fd_spad_t *          shared_spad;
     101             : 
     102             :   fd_ip4_udp_hdrs_t intake_hdr[1];
     103             : 
     104             :   ulong now;
     105             :   ulong  last_packet_ns;
     106             :   double tick_per_ns;
     107             : 
     108             :   fd_io_buffered_ostream_t shred_ostream;
     109             :   fd_io_buffered_ostream_t repair_ostream;
     110             :   fd_io_buffered_ostream_t fecs_ostream;
     111             :   fd_io_buffered_ostream_t peers_ostream;
     112             :   fd_io_buffered_ostream_t slices_ostream;
     113             :   fd_io_buffered_ostream_t bank_hashes_ostream;
     114             : 
     115             :   int shreds_fd; /* shreds snooped from net_shred */
     116             :   int requests_fd;
     117             :   int fecs_fd;
     118             :   int peers_fd;
     119             :   int slices_fd; /* all shreds in slices from repair tile */
     120             :   int bank_hashes_fd; /* bank hashes from writer tile */
     121             : 
     122             :   ulong write_buf_sz;
     123             : 
     124             :   uchar * shreds_buf;
     125             :   uchar * requests_buf;
     126             :   uchar * fecs_buf;
     127             :   uchar * peers_buf;
     128             :   uchar * slices_buf;
     129             :   uchar * bank_hashes_buf;
     130             : 
     131             :   fd_alloc_t * alloc;
     132             :   uchar contact_info_buffer[ MAX_BUFFER_SIZE ];
     133             : };
     134             : typedef struct fd_capture_tile_ctx fd_capture_tile_ctx_t;
     135             : 
     136             : FD_FN_CONST static inline ulong
     137           0 : scratch_align( void ) {
     138           0 :   return 4096UL;
     139           0 : }
     140             : 
     141             : FD_FN_CONST static inline ulong
     142           0 : manifest_bank_align( void ) {
     143           0 :   return fd_banks_align();
     144           0 : }
     145             : 
     146             : FD_FN_CONST static inline ulong
     147           0 : manifest_bank_footprint( void ) {
     148           0 :   return fd_banks_footprint( MANIFEST_MAX_TOTAL_BANKS, MANIFEST_MAX_FORK_WIDTH );
     149           0 : }
     150             : 
     151             : FD_FN_CONST static inline ulong
     152           0 : manifest_load_align( void ) {
     153           0 :   return 128UL;
     154           0 : }
     155             : 
     156             : FD_FN_CONST static inline ulong
     157           0 : manifest_load_footprint( void ) {
     158             :   /* A manifest typically requires 1GB, but closer to 2GB
     159             :      have been observed in mainnet.  The footprint is then
     160             :      set to 2GB.  TODO a future adjustment may be needed. */
     161           0 :   return 2UL * FD_SHMEM_GIGANTIC_PAGE_SZ;
     162           0 : }
     163             : 
     164             : FD_FN_CONST static inline ulong
     165           0 : manifest_spad_max_alloc_align( void ) {
     166           0 :   return FD_SPAD_ALIGN;
     167           0 : }
     168             : 
     169             : FD_FN_CONST static inline ulong
     170           0 : manifest_spad_max_alloc_footprint( void ) {
     171             :   /* The amount of memory required in the manifest load
     172             :      scratchpad to process it tends to be slightly larger
     173             :      than the manifest load footprint. */
     174           0 :   return manifest_load_footprint() + 128UL * FD_SHMEM_HUGE_PAGE_SZ;
     175           0 : }
     176             : 
     177             : FD_FN_CONST static inline ulong
     178           0 : shared_spad_max_alloc_align( void ) {
     179           0 :   return FD_SPAD_ALIGN;
     180           0 : }
     181             : 
     182             : FD_FN_CONST static inline ulong
     183           0 : shared_spad_max_alloc_footprint( void ) {
     184             :   /* The shared scratchpad is used by the manifest banks
     185             :      and by the manifest load (but not at the same time).
     186             :      The footprint for the banks needs to be equal to
     187             :      banks footprint (at least for the current setup with
     188             :      MANIFEST_MAX_TOTAL_BANKS==2). */
     189           0 :   return fd_ulong_max( manifest_bank_footprint(), manifest_load_footprint() );
     190           0 : }
     191             : 
     192             : FD_FN_PURE static inline ulong
     193           0 : loose_footprint( fd_topo_tile_t const * tile FD_PARAM_UNUSED ) {
     194           0 :   ulong footprint = sizeof(fd_capture_tile_ctx_t) + FD_EXEC_SLOT_CTX_FOOTPRINT
     195           0 :                     + manifest_bank_footprint()
     196           0 :                     + fd_spad_footprint( manifest_spad_max_alloc_footprint() )
     197           0 :                     + fd_spad_footprint( shared_spad_max_alloc_footprint() )
     198           0 :                     + fd_alloc_footprint();
     199           0 :   return fd_ulong_align_up( footprint, FD_SHMEM_GIGANTIC_PAGE_SZ );
     200           0 : }
     201             : 
     202             : 
     203             : static ulong
     204             : populate_allowed_seccomp( fd_topo_t const *      topo FD_PARAM_UNUSED,
     205             :                           fd_topo_tile_t const * tile,
     206             :                           ulong                  out_cnt,
     207           0 :                           struct sock_filter *   out ) {
     208           0 :   populate_sock_filter_policy_fd_shredcap_tile( out_cnt,
     209           0 :                                              out,
     210           0 :                                              (uint)fd_log_private_logfile_fd(),
     211           0 :                                              (uint)tile->shredcap.shreds_fd,
     212           0 :                                              (uint)tile->shredcap.requests_fd,
     213           0 :                                              (uint)tile->shredcap.fecs_fd,
     214           0 :                                              (uint)tile->shredcap.peers_fd );
     215           0 :   return sock_filter_policy_fd_shredcap_tile_instr_cnt;
     216           0 : }
     217             : 
     218             : FD_FN_PURE static inline ulong
     219           0 : scratch_footprint( fd_topo_tile_t const * tile ) {
     220           0 :   (void)tile;
     221           0 :   ulong l = FD_LAYOUT_INIT;
     222           0 :   l = FD_LAYOUT_APPEND( l, alignof(fd_capture_tile_ctx_t),  sizeof(fd_capture_tile_ctx_t) );
     223           0 :   l = FD_LAYOUT_APPEND( l, FD_EXEC_SLOT_CTX_ALIGN,          FD_EXEC_SLOT_CTX_FOOTPRINT );
     224           0 :   l = FD_LAYOUT_APPEND( l, manifest_bank_align(),           manifest_bank_footprint() );
     225           0 :   l = FD_LAYOUT_APPEND( l, manifest_spad_max_alloc_align(), fd_spad_footprint( manifest_spad_max_alloc_footprint() ) );
     226           0 :   l = FD_LAYOUT_APPEND( l, shared_spad_max_alloc_align(),   fd_spad_footprint( shared_spad_max_alloc_footprint() ) );
     227           0 :   l = FD_LAYOUT_APPEND( l, fd_alloc_align(),                fd_alloc_footprint() );
     228           0 :   return FD_LAYOUT_FINI( l, scratch_align() );
     229           0 : }
     230             : 
     231             : static void
     232             : publish_stake_weights_manifest( fd_capture_tile_ctx_t * ctx,
     233             :                                 fd_stem_context_t *    stem,
     234           0 :                                 fd_snapshot_manifest_t const * manifest ) {
     235           0 :   fd_epoch_schedule_t const * schedule = fd_type_pun_const( &manifest->epoch_schedule_params );
     236           0 :   ulong epoch = fd_slot_to_epoch( schedule, manifest->slot, NULL );
     237             : 
     238             :   /* current epoch */
     239           0 :   ulong * stake_weights_msg = fd_chunk_to_laddr( ctx->stake_out->mem, ctx->stake_out->chunk );
     240           0 :   ulong stake_weights_sz = generate_stake_weight_msg_manifest( epoch, schedule, &manifest->epoch_stakes[0], stake_weights_msg );
     241           0 :   ulong stake_weights_sig = 4UL;
     242           0 :   fd_stem_publish( stem, 0UL, stake_weights_sig, ctx->stake_out->chunk, stake_weights_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
     243           0 :   ctx->stake_out->chunk = fd_dcache_compact_next( ctx->stake_out->chunk, stake_weights_sz, ctx->stake_out->chunk0, ctx->stake_out->wmark );
     244           0 :   FD_LOG_NOTICE(("sending current epoch stake weights - epoch: %lu, stake_weight_cnt: %lu, start_slot: %lu, slot_cnt: %lu", stake_weights_msg[0], stake_weights_msg[1], stake_weights_msg[2], stake_weights_msg[3]));
     245             : 
     246             :   /* next current epoch */
     247           0 :   stake_weights_msg = fd_chunk_to_laddr( ctx->stake_out->mem, ctx->stake_out->chunk );
     248           0 :   stake_weights_sz = generate_stake_weight_msg_manifest( epoch + 1, schedule, &manifest->epoch_stakes[1], stake_weights_msg );
     249           0 :   stake_weights_sig = 4UL;
     250           0 :   fd_stem_publish( stem, 0UL, stake_weights_sig, ctx->stake_out->chunk, stake_weights_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
     251           0 :   ctx->stake_out->chunk = fd_dcache_compact_next( ctx->stake_out->chunk, stake_weights_sz, ctx->stake_out->chunk0, ctx->stake_out->wmark );
     252           0 :   FD_LOG_NOTICE(("sending next epoch stake weights - epoch: %lu, stake_weight_cnt: %lu, start_slot: %lu, slot_cnt: %lu", stake_weights_msg[0], stake_weights_msg[1], stake_weights_msg[2], stake_weights_msg[3]));
     253           0 : }
     254             : 
     255             : static inline int
     256             : before_frag( fd_capture_tile_ctx_t * ctx,
     257             :              ulong            in_idx,
     258             :              ulong            seq FD_PARAM_UNUSED,
     259           0 :              ulong            sig ) {
     260           0 :   if( FD_LIKELY( ctx->in_kind[ in_idx ]==NET_SHRED ) ) {
     261           0 :     return (int)(fd_disco_netmux_sig_proto( sig )!=DST_PROTO_SHRED) & (int)(fd_disco_netmux_sig_proto( sig )!=DST_PROTO_REPAIR);
     262           0 :   }
     263           0 :   return 0;
     264           0 : }
     265             : 
     266             : static inline void
     267             : handle_new_turbine_contact_info( fd_capture_tile_ctx_t * ctx,
     268           0 :                                  uchar const *          buf ) {
     269           0 :   ulong const * header = (ulong const *)fd_type_pun_const( buf );
     270           0 :   ulong dest_cnt = header[ 0 ];
     271             : 
     272           0 :   fd_shred_dest_wire_t const * in_dests = fd_type_pun_const( header+1UL );
     273             : 
     274           0 :   for( ulong i=0UL; i<dest_cnt; i++ ) {
     275             :     // need to bswap the port
     276             :     //ushort port = fd_ushort_bswap( in_dests[i].udp_port );
     277           0 :     char peers_buf[1024];
     278           0 :     snprintf( peers_buf, sizeof(peers_buf),
     279           0 :               "%u,%u,%s,%d\n",
     280           0 :               in_dests[i].ip4_addr, in_dests[i].udp_port, FD_BASE58_ENC_32_ALLOCA(in_dests[i].pubkey), 1);
     281           0 :     int err = fd_io_buffered_ostream_write( &ctx->peers_ostream, peers_buf, strlen(peers_buf) );
     282           0 :     FD_TEST( err==0 );
     283           0 :   }
     284           0 : }
     285             : 
     286             : static int
     287           0 : is_fec_completes_msg( ulong sz ) {
     288           0 :   return sz == FD_SHRED_DATA_HEADER_SZ + FD_SHRED_MERKLE_ROOT_SZ;
     289           0 : }
     290             : 
     291             : static inline void
     292             : during_frag( fd_capture_tile_ctx_t * ctx,
     293             :              ulong                   in_idx,
     294             :              ulong                   seq FD_PARAM_UNUSED,
     295             :              ulong                   sig,
     296             :              ulong                   chunk,
     297             :              ulong                   sz,
     298           0 :              ulong                   ctl ) {
     299           0 :   ctx->skip_frag = 0;
     300           0 :   if( ctx->in_kind[ in_idx ]==SHRED_REPAIR ) {
     301           0 :     if( !is_fec_completes_msg( sz ) ) {
     302           0 :       ctx->skip_frag = 1;
     303           0 :       return;
     304           0 :     }
     305           0 :     fd_memcpy( ctx->shred_buffer, fd_chunk_to_laddr_const( ctx->in_links[ in_idx ].mem, chunk ), sz );
     306           0 :     ctx->shred_buffer_sz = sz;
     307           0 :   } else if( ctx->in_kind[ in_idx ] == NET_SHRED ) {
     308           0 :     uchar const * dcache_entry = fd_net_rx_translate_frag( &ctx->in_links[ in_idx ].net_rx, chunk, ctl, sz );
     309           0 :     ulong hdr_sz = fd_disco_netmux_sig_hdr_sz( sig );
     310           0 :     FD_TEST( hdr_sz <= sz ); /* Should be ensured by the net tile */
     311           0 :     fd_shred_t const * shred = fd_shred_parse( dcache_entry+hdr_sz, sz-hdr_sz );
     312           0 :     if( FD_UNLIKELY( !shred ) ) {
     313           0 :       ctx->skip_frag = 1;
     314           0 :       return;
     315           0 :     };
     316           0 :     fd_memcpy( ctx->shred_buffer, dcache_entry, sz );
     317           0 :     ctx->shred_buffer_sz = sz-hdr_sz;
     318           0 :   } else if( ctx->in_kind[ in_idx ] == REPAIR_NET ) {
     319             :     /* Repair will have outgoing pings, outgoing repair requests, and
     320             :        outgoing served shreds we want to filter everything but the
     321             :        repair requests.
     322             :        1. We can index into the ip4 udp packet hdr and check if the src
     323             :           port is the intake listen port or serve port
     324             :        2. Then we can filter on the discriminant which luckily does not
     325             :           require decoding! */
     326             : 
     327           0 :     uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in_links[ in_idx ].mem, chunk );
     328           0 :     fd_ip4_udp_hdrs_t const * hdr = (fd_ip4_udp_hdrs_t const *)dcache_entry;
     329           0 :     if( hdr->udp->net_sport != fd_ushort_bswap( ctx->repair_intake_listen_port ) ) {
     330           0 :       ctx->skip_frag = 1;
     331           0 :       return;
     332           0 :     }
     333           0 :     const uchar * encoded_protocol = dcache_entry + sizeof(fd_ip4_udp_hdrs_t);
     334           0 :     uint discriminant = FD_LOAD(uint, encoded_protocol);
     335             : 
     336           0 :     if( FD_UNLIKELY( discriminant <= fd_repair_protocol_enum_pong ) ) {
     337           0 :       ctx->skip_frag = 1;
     338           0 :       return;
     339           0 :     }
     340           0 :     fd_memcpy( ctx->repair_buffer, dcache_entry, sz );
     341           0 :     ctx->repair_buffer_sz = sz;
     342           0 :   } else if( ctx->in_kind[ in_idx ] == REPAIR_SHREDCAP ) {
     343             : 
     344           0 :     uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in_links[ in_idx ].mem, chunk );
     345             : 
     346             :     /* FIXME this should all be happening in after_frag */
     347             : 
     348             :     /* We expect to get all of the data shreds in a batch at once.  When
     349             :        we do we will write the header, the shreds, and a trailer. */
     350           0 :     ulong payload_sz = sig;
     351           0 :     fd_shredcap_slice_header_msg_t header = {
     352           0 :       .magic      = FD_SHREDCAP_SLICE_HEADER_MAGIC,
     353           0 :       .version    = FD_SHREDCAP_SLICE_HEADER_V1,
     354           0 :       .payload_sz = payload_sz,
     355           0 :     };
     356           0 :     int err;
     357           0 :     err = fd_io_buffered_ostream_write( &ctx->slices_ostream, &header, FD_SHREDCAP_SLICE_HEADER_FOOTPRINT );
     358           0 :     if( FD_UNLIKELY( err != 0 ) ) {
     359           0 :       FD_LOG_CRIT(( "failed to write slice header %d", err ));
     360           0 :     }
     361           0 :     err = fd_io_buffered_ostream_write( &ctx->slices_ostream, dcache_entry, payload_sz );
     362           0 :     if( FD_UNLIKELY( err != 0 ) ) {
     363           0 :       FD_LOG_CRIT(( "failed to write slice data %d", err ));
     364           0 :     }
     365           0 :     fd_shredcap_slice_trailer_msg_t trailer = {
     366           0 :       .magic   = FD_SHREDCAP_SLICE_TRAILER_MAGIC,
     367           0 :       .version = FD_SHREDCAP_SLICE_TRAILER_V1,
     368           0 :     };
     369           0 :     err = fd_io_buffered_ostream_write( &ctx->slices_ostream, &trailer, FD_SHREDCAP_SLICE_TRAILER_FOOTPRINT );
     370           0 :     if( FD_UNLIKELY( err != 0 ) ) {
     371           0 :       FD_LOG_CRIT(( "failed to write slice trailer %d", err ));
     372           0 :     }
     373             : 
     374           0 :   } else if( ctx->in_kind[ in_idx ] == REPLAY_SHREDCAP ) {
     375             : 
     376             :     /* FIXME this should all be happening in after_frag */
     377             : 
     378           0 :    uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in_links[ in_idx ].mem, chunk );
     379           0 :    fd_shredcap_bank_hash_msg_t bank_hash_msg = {
     380           0 :      .magic   = FD_SHREDCAP_BANK_HASH_MAGIC,
     381           0 :      .version = FD_SHREDCAP_BANK_HASH_V1
     382           0 :    };
     383           0 :    fd_memcpy( &bank_hash_msg.bank_hash, dcache_entry, sizeof(fd_hash_t) );
     384           0 :    fd_memcpy( &bank_hash_msg.slot, dcache_entry+sizeof(fd_hash_t), sizeof(ulong) );
     385             : 
     386           0 :    fd_io_buffered_ostream_write( &ctx->bank_hashes_ostream, &bank_hash_msg, FD_SHREDCAP_BANK_HASH_FOOTPRINT );
     387             : 
     388           0 :   } else {
     389             :     // contact infos can be copied into a buffer
     390           0 :     if( FD_UNLIKELY( chunk<ctx->in_links[ in_idx ].chunk0 || chunk>ctx->in_links[ in_idx ].wmark ) ) {
     391           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     392           0 :                    ctx->in_links[ in_idx ].chunk0, ctx->in_links[ in_idx ].wmark ));
     393           0 :     }
     394           0 :     uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in_links[ in_idx ].mem, chunk );
     395           0 :     fd_memcpy( ctx->contact_info_buffer, dcache_entry, sz );
     396           0 :   }
     397           0 : }
     398             : 
     399             : static void
     400             : after_credit( fd_capture_tile_ctx_t * ctx,
     401             :               fd_stem_context_t *     stem FD_PARAM_UNUSED,
     402             :               int *                   opt_poll_in FD_PARAM_UNUSED,
     403           0 :               int *                   charge_busy FD_PARAM_UNUSED ) {
     404             : 
     405           0 :   if( FD_UNLIKELY( !ctx->manifest_load_done ) ) {
     406           0 :     if( FD_LIKELY( !!strcmp( ctx->manifest_path, "") ) ) {
     407             :       /* ctx->manifest_spad will hold the processed manifest. */
     408           0 :       fd_spad_reset( ctx->manifest_spad );
     409             :       /* do not pop from ctx->manifest_spad, the manifest needs
     410             :          to remain available until a new manifest is processed. */
     411             : 
     412           0 :       int fd = open( ctx->manifest_path, O_RDONLY );
     413           0 :       if( FD_UNLIKELY( fd < 0 ) ) {
     414           0 :         FD_LOG_WARNING(( "open(%s) failed (%d-%s)", ctx->manifest_path, errno, fd_io_strerror( errno ) ));
     415           0 :         return;
     416           0 :       }
     417           0 :       FD_LOG_NOTICE(( "manifest %s.", ctx->manifest_path ));
     418             : 
     419           0 :       fd_snapshot_manifest_t * manifest = NULL;
     420           0 :       FD_SPAD_FRAME_BEGIN( ctx->manifest_spad ) {
     421           0 :         manifest = fd_spad_alloc( ctx->manifest_spad, alignof(fd_snapshot_manifest_t), sizeof(fd_snapshot_manifest_t) );
     422           0 :       } FD_SPAD_FRAME_END;
     423           0 :       FD_TEST( manifest );
     424             : 
     425           0 :       FD_SPAD_FRAME_BEGIN( ctx->shared_spad ) {
     426           0 :         uchar * buf    = fd_spad_alloc( ctx->shared_spad, manifest_load_align(), manifest_load_footprint() );
     427           0 :         ulong   buf_sz = 0;
     428           0 :         FD_TEST( !fd_io_read( fd, buf/*dst*/, 0/*dst_min*/, manifest_load_footprint()-1UL /*dst_max*/, &buf_sz ) );
     429             : 
     430           0 :         fd_ssmanifest_parser_t * parser = fd_ssmanifest_parser_join( fd_ssmanifest_parser_new( aligned_alloc(
     431           0 :                 fd_ssmanifest_parser_align(), fd_ssmanifest_parser_footprint( 1UL<<24UL ) ), 1UL<<24UL, 42UL ) );
     432           0 :         FD_TEST( parser );
     433           0 :         fd_ssmanifest_parser_init( parser, manifest );
     434           0 :         int parser_err = fd_ssmanifest_parser_consume( parser, buf, buf_sz );
     435           0 :         if( FD_UNLIKELY( parser_err ) ) FD_LOG_ERR(( "fd_ssmanifest_parser_consume failed (%d)", parser_err ));
     436           0 :       } FD_SPAD_FRAME_END;
     437           0 :       FD_LOG_NOTICE(( "manifest bank slot %lu", manifest->slot ));
     438             : 
     439           0 :       fd_fseq_update( ctx->manifest_wmark, manifest->slot );
     440             : 
     441           0 :       publish_stake_weights_manifest( ctx, stem, manifest );
     442             :       //*charge_busy = 0;
     443           0 :     }
     444             :     /* No need to strcmp every time after_credit is called. */
     445           0 :     ctx->manifest_load_done = 1;
     446           0 :   }
     447           0 : }
     448             : 
     449             : static inline void
     450             : after_frag( fd_capture_tile_ctx_t * ctx,
     451             :             ulong                   in_idx,
     452             :             ulong                   seq    FD_PARAM_UNUSED,
     453             :             ulong                   sig,
     454             :             ulong                   sz,
     455             :             ulong                   tsorig FD_PARAM_UNUSED,
     456             :             ulong                   tspub  FD_PARAM_UNUSED,
     457           0 :             fd_stem_context_t *     stem   FD_PARAM_UNUSED ) {
     458           0 :   if( FD_UNLIKELY( ctx->skip_frag ) ) return;
     459             : 
     460           0 :   if( ctx->in_kind[ in_idx ] == SHRED_REPAIR ) {
     461             :     /* This is a fec completes message! we can use it to check how long
     462             :        it takes to complete a fec */
     463             : 
     464           0 :     fd_shred_t const * shred = (fd_shred_t *)fd_type_pun( ctx->shred_buffer );
     465           0 :     uint data_cnt = fd_disco_shred_repair_fec_sig_data_cnt( sig );
     466           0 :     uint ref_tick = shred->data.flags & FD_SHRED_DATA_REF_TICK_MASK;
     467           0 :     char fec_complete[1024];
     468           0 :     snprintf( fec_complete, sizeof(fec_complete),
     469           0 :              "%ld,%lu,%u,%u,%u\n",
     470           0 :               fd_log_wallclock(), shred->slot, ref_tick, shred->fec_set_idx, data_cnt );
     471             : 
     472             :     // Last shred is guaranteed to be a data shred
     473             : 
     474             : 
     475           0 :     int err = fd_io_buffered_ostream_write( &ctx->fecs_ostream, fec_complete, strlen(fec_complete) );
     476           0 :     FD_TEST( err==0 );
     477           0 :   } else if( ctx->in_kind[ in_idx ] == NET_SHRED ) {
     478             :     /* TODO: leader schedule early exits in shred tile right around
     479             :        startup, which discards some turbine shreds, but there is a
     480             :        chance we capture this shred here. Currently handled in post, but
     481             :        in the future will want to get the leader schedule here so we can
     482             :        also benchmark whether the excepcted sender in the turbine tree
     483             :        matches the actual sender. */
     484             : 
     485           0 :     ulong hdr_sz     = fd_disco_netmux_sig_hdr_sz( sig );
     486           0 :     fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)ctx->shred_buffer;
     487           0 :     uint src_ip4_addr = hdr->ip4->saddr;
     488           0 :     ushort src_port   = hdr->udp->net_sport;
     489             : 
     490           0 :     fd_shred_t const * shred = fd_shred_parse( ctx->shred_buffer + hdr_sz, sz - hdr_sz );
     491           0 :     int   is_turbine = fd_disco_netmux_sig_proto( sig ) == DST_PROTO_SHRED;
     492           0 :     uint  nonce      = is_turbine ? 0 : FD_LOAD(uint, ctx->shred_buffer + hdr_sz + fd_shred_sz( shred ) );
     493           0 :     int   is_data    = fd_shred_is_data( fd_shred_type( shred->variant ) );
     494           0 :     ulong slot       = shred->slot;
     495           0 :     uint  idx        = shred->idx;
     496           0 :     uint  fec_idx    = shred->fec_set_idx;
     497           0 :     uint  ref_tick   = 65;
     498           0 :     if( FD_UNLIKELY( is_turbine && is_data ) ) {
     499             :       /* We can then index into the flag and get a REFTICK */
     500           0 :       ref_tick = shred->data.flags & FD_SHRED_DATA_REF_TICK_MASK;
     501           0 :     }
     502             : 
     503           0 :     char repair_data_buf[1024];
     504           0 :     snprintf( repair_data_buf, sizeof(repair_data_buf),
     505           0 :              "%u,%u,%ld,%lu,%u,%u,%u,%d,%d,%u\n",
     506           0 :               src_ip4_addr, src_port, fd_log_wallclock(), slot, ref_tick, fec_idx, idx, is_turbine, is_data, nonce );
     507             : 
     508           0 :     int err = fd_io_buffered_ostream_write( &ctx->shred_ostream, repair_data_buf, strlen(repair_data_buf) );
     509           0 :     FD_TEST( err==0 );
     510           0 :   } else if( ctx->in_kind[ in_idx ] == REPAIR_NET ) {
     511             :     /* We have a valid repair request that we can finally decode.
     512             :        Unfortunately we actually have to decode because we cant cast
     513             :        directly to the protocol */
     514           0 :     fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)ctx->repair_buffer;
     515           0 :     fd_repair_protocol_t protocol;
     516           0 :     fd_bincode_decode_ctx_t bctx = { .data = ctx->repair_buffer + sizeof(fd_ip4_udp_hdrs_t), .dataend = ctx->repair_buffer + ctx->repair_buffer_sz };
     517           0 :     fd_repair_protocol_t * decoded = fd_repair_protocol_decode( &protocol, &bctx );
     518             : 
     519           0 :     FD_TEST( decoded == &protocol );
     520           0 :     FD_TEST( decoded != NULL );
     521             : 
     522           0 :     uint   peer_ip4_addr = hdr->ip4->daddr;
     523           0 :     ushort peer_port     = hdr->udp->net_dport;
     524           0 :     ulong  slot          = 0UL;
     525           0 :     ulong  shred_index   = UINT_MAX;
     526           0 :     uint   nonce         = 0U;
     527             : 
     528           0 :     switch( protocol.discriminant ) {
     529           0 :       case fd_repair_protocol_enum_window_index: {
     530           0 :         slot        = protocol.inner.window_index.slot;
     531           0 :         shred_index = protocol.inner.window_index.shred_index;
     532           0 :         nonce       = protocol.inner.window_index.header.nonce;
     533           0 :         break;
     534           0 :       }
     535           0 :       case fd_repair_protocol_enum_highest_window_index: {
     536           0 :         slot        = protocol.inner.highest_window_index.slot;
     537           0 :         shred_index = protocol.inner.highest_window_index.shred_index;
     538           0 :         nonce       = protocol.inner.highest_window_index.header.nonce;
     539           0 :         break;
     540           0 :       }
     541           0 :       case fd_repair_protocol_enum_orphan: {
     542           0 :         slot  = protocol.inner.orphan.slot;
     543           0 :         nonce = protocol.inner.orphan.header.nonce;
     544           0 :         break;
     545           0 :       }
     546           0 :       default:
     547           0 :         break;
     548           0 :     }
     549             : 
     550           0 :     char repair_data_buf[1024];
     551           0 :     snprintf( repair_data_buf, sizeof(repair_data_buf),
     552           0 :               "%u,%u,%ld,%u,%lu,%lu\n",
     553           0 :               peer_ip4_addr, peer_port, fd_log_wallclock(), nonce, slot, shred_index );
     554           0 :     int err = fd_io_buffered_ostream_write( &ctx->repair_ostream, repair_data_buf, strlen(repair_data_buf) );
     555           0 :     FD_TEST( err==0 );
     556           0 :   } else if( ctx->in_kind[ in_idx ] == GOSSIP_REPAIR ) {
     557           0 :     fd_shred_dest_wire_t const * in_dests = (fd_shred_dest_wire_t const *)fd_type_pun_const( ctx->contact_info_buffer );
     558           0 :     ulong dest_cnt = sz;
     559           0 :     for( ulong i=0UL; i<dest_cnt; i++ ) {
     560           0 :       char peers_buf[1024];
     561           0 :       snprintf( peers_buf, sizeof(peers_buf),
     562           0 :                 "%u,%u,%s,%d\n",
     563           0 :                  in_dests[i].ip4_addr, in_dests[i].udp_port, FD_BASE58_ENC_32_ALLOCA(in_dests[i].pubkey), 0);
     564           0 :       int err = fd_io_buffered_ostream_write( &ctx->peers_ostream, peers_buf, strlen(peers_buf) );
     565           0 :       FD_TEST( err==0 );
     566           0 :     }
     567           0 :   } else if( ctx->in_kind[ in_idx ] == GOSSIP_SHRED ) { // crds_shred contact infos
     568           0 :     handle_new_turbine_contact_info( ctx, ctx->contact_info_buffer );
     569           0 :   }
     570           0 : }
     571             : 
     572             : static ulong
     573             : populate_allowed_fds( fd_topo_t const      * topo        FD_PARAM_UNUSED,
     574             :                       fd_topo_tile_t const * tile,
     575             :                       ulong                  out_fds_cnt FD_PARAM_UNUSED,
     576           0 :                       int *                  out_fds ) {
     577           0 :   ulong out_cnt = 0UL;
     578             : 
     579           0 :   out_fds[ out_cnt++ ] = 2; /* stderr */
     580           0 :   if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
     581           0 :     out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
     582           0 :   if( FD_LIKELY( -1!=tile->shredcap.shreds_fd ) )
     583           0 :     out_fds[ out_cnt++ ] = tile->shredcap.shreds_fd; /* shred file */
     584           0 :   if( FD_LIKELY( -1!=tile->shredcap.requests_fd ) )
     585           0 :     out_fds[ out_cnt++ ] = tile->shredcap.requests_fd; /* request file */
     586           0 :   if( FD_LIKELY( -1!=tile->shredcap.fecs_fd ) )
     587           0 :     out_fds[ out_cnt++ ] = tile->shredcap.fecs_fd; /* fec complete file */
     588           0 :   if( FD_LIKELY( -1!=tile->shredcap.peers_fd ) )
     589           0 :     out_fds[ out_cnt++ ] = tile->shredcap.peers_fd; /* peers file */
     590           0 :   if( FD_LIKELY( -1!=tile->shredcap.slices_fd ) )
     591           0 :     out_fds[ out_cnt++ ] = tile->shredcap.slices_fd; /* slices file */
     592           0 :   if( FD_LIKELY( -1!=tile->shredcap.bank_hashes_fd ) )
     593           0 :     out_fds[ out_cnt++ ] = tile->shredcap.bank_hashes_fd; /* bank hashes file */
     594             : 
     595           0 :   return out_cnt;
     596           0 : }
     597             : 
     598             : static void
     599             : privileged_init( fd_topo_t *      topo FD_PARAM_UNUSED,
     600           0 :                  fd_topo_tile_t * tile ) {
     601           0 :   char file_path[PATH_MAX];
     602           0 :   strcpy( file_path, tile->shredcap.folder_path );
     603           0 :   strcat( file_path, "/shred_data.csv" );
     604           0 :   tile->shredcap.shreds_fd = open( file_path, O_WRONLY|O_CREAT|O_APPEND /*| O_DIRECT*/, 0644 );
     605           0 :   if ( FD_UNLIKELY( tile->shredcap.shreds_fd == -1 ) ) {
     606           0 :     FD_LOG_ERR(( "failed to open or create shred csv dump file %s %d %s", file_path, errno, strerror(errno) ));
     607           0 :   }
     608             : 
     609           0 :   strcpy( file_path, tile->shredcap.folder_path );
     610           0 :   strcat( file_path, "/request_data.csv" );
     611           0 :   tile->shredcap.requests_fd = open( file_path, O_WRONLY|O_CREAT|O_APPEND /*| O_DIRECT*/, 0644 );
     612           0 :   if ( FD_UNLIKELY( tile->shredcap.requests_fd == -1 ) ) {
     613           0 :     FD_LOG_ERR(( "failed to open or create request csv dump file %s %d %s", file_path, errno, strerror(errno) ));
     614           0 :   }
     615             : 
     616           0 :   strcpy( file_path, tile->shredcap.folder_path );
     617           0 :   strcat( file_path, "/fec_complete.csv" );
     618           0 :   tile->shredcap.fecs_fd = open( file_path, O_WRONLY|O_CREAT|O_APPEND /*| O_DIRECT*/, 0644 );
     619           0 :   if ( FD_UNLIKELY( tile->shredcap.fecs_fd == -1 ) ) {
     620           0 :     FD_LOG_ERR(( "failed to open or create fec complete csv dump file %s %d %s", file_path, errno, strerror(errno) ));
     621           0 :   }
     622           0 :   FD_LOG_NOTICE(( "Opening shred csv dump file at %s", file_path ));
     623             : 
     624           0 :   strcpy( file_path, tile->shredcap.folder_path );
     625           0 :   strcat( file_path, "/peers.csv" );
     626           0 :   tile->shredcap.peers_fd = open( file_path, O_WRONLY|O_CREAT|O_APPEND /*| O_DIRECT*/, 0644 );
     627           0 :   if ( FD_UNLIKELY( tile->shredcap.peers_fd == -1 ) ) {
     628           0 :     FD_LOG_ERR(( "failed to open or create peers csv dump file %s %d %s", file_path, errno, strerror(errno) ));
     629           0 :   }
     630             : 
     631           0 :   strcpy( file_path, tile->shredcap.folder_path );
     632           0 :   strcat( file_path, "/slices.bin" );
     633           0 :   tile->shredcap.slices_fd = open( file_path, O_WRONLY|O_CREAT|O_APPEND /*| O_DIRECT*/, 0644 );
     634           0 :   if ( FD_UNLIKELY( tile->shredcap.slices_fd == -1 ) ) {
     635           0 :     FD_LOG_ERR(( "failed to open or create slices csv dump file %s %d %s", file_path, errno, strerror(errno) ));
     636           0 :   }
     637           0 :   FD_LOG_NOTICE(( "Opening val_shreds binary dump file at %s", file_path ));
     638             : 
     639           0 :   strcpy( file_path, tile->shredcap.folder_path );
     640           0 :   strcat( file_path, "/bank_hashes.bin" );
     641           0 :   tile->shredcap.bank_hashes_fd = open( file_path, O_WRONLY|O_CREAT|O_APPEND /*| O_DIRECT*/, 0644 );
     642           0 :   if ( FD_UNLIKELY( tile->shredcap.bank_hashes_fd == -1 ) ) {
     643           0 :     FD_LOG_ERR(( "failed to open or create bank_hashes csv dump file %s %d %s", file_path, errno, strerror(errno) ));
     644           0 :   }
     645           0 :   FD_LOG_NOTICE(( "Opening bank_hashes binary dump file at %s", file_path ));
     646           0 : }
     647             : 
     648             : static void
     649             : init_file_handlers( fd_capture_tile_ctx_t    * ctx,
     650             :                     int                      * ctx_file,
     651             :                     int                        tile_file,
     652             :                     uchar                   ** ctx_buf,
     653           0 :                     fd_io_buffered_ostream_t * ctx_ostream ) {
     654           0 :   *ctx_file =  tile_file ;
     655             : 
     656           0 :   int err = ftruncate( *ctx_file, 0UL );
     657           0 :   if( FD_UNLIKELY( err ) ) {
     658           0 :     FD_LOG_ERR(( "failed to truncate file (%i-%s)", errno, fd_io_strerror( errno ) ));
     659           0 :   }
     660           0 :   long seek = lseek( *ctx_file, 0UL, SEEK_SET );
     661           0 :   if( FD_UNLIKELY( seek!=0L ) ) {
     662           0 :     FD_LOG_ERR(( "failed to seek to the beginning of file" ));
     663           0 :   }
     664             : 
     665           0 :   *ctx_buf = fd_alloc_malloc( ctx->alloc, 4096, ctx->write_buf_sz );
     666           0 :   if( FD_UNLIKELY( *ctx_buf == NULL ) ) {
     667           0 :     FD_LOG_ERR(( "failed to allocate ostream buffer" ));
     668           0 :   }
     669             : 
     670           0 :   if( FD_UNLIKELY( !fd_io_buffered_ostream_init(
     671           0 :     ctx_ostream,
     672           0 :     *ctx_file,
     673           0 :     *ctx_buf,
     674           0 :     ctx->write_buf_sz ) ) ) {
     675           0 :     FD_LOG_ERR(( "failed to initialize ostream" ));
     676           0 :   }
     677           0 : }
     678             : 
     679             : 
     680             : static void
     681             : unprivileged_init( fd_topo_t *      topo,
     682           0 :                    fd_topo_tile_t * tile ) {
     683             : 
     684           0 :   void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
     685           0 :   FD_SCRATCH_ALLOC_INIT( l, scratch );
     686           0 :   fd_capture_tile_ctx_t * ctx       = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_capture_tile_ctx_t),  sizeof(fd_capture_tile_ctx_t) );
     687           0 :   void * mainfest_exec_slot_ctx_mem = FD_SCRATCH_ALLOC_APPEND( l, FD_EXEC_SLOT_CTX_ALIGN,          FD_EXEC_SLOT_CTX_FOOTPRINT );
     688           0 :   void * manifest_bank_mem          = FD_SCRATCH_ALLOC_APPEND( l, manifest_bank_align(),           manifest_bank_footprint() );
     689           0 :   void * manifest_spad_mem          = FD_SCRATCH_ALLOC_APPEND( l, manifest_spad_max_alloc_align(), fd_spad_footprint( manifest_spad_max_alloc_footprint() ) );
     690           0 :   void * shared_spad_mem            = FD_SCRATCH_ALLOC_APPEND( l, shared_spad_max_alloc_align(),   fd_spad_footprint( shared_spad_max_alloc_footprint() ) );
     691           0 :   void * alloc_mem                  = FD_SCRATCH_ALLOC_APPEND( l, fd_alloc_align(),                fd_alloc_footprint() );
     692           0 :   FD_SCRATCH_ALLOC_FINI( l, scratch_align() );
     693             : 
     694             :   /* Input links */
     695           0 :   for( ulong i=0; i<tile->in_cnt; i++ ) {
     696           0 :     fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
     697           0 :     fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
     698           0 :     if( 0==strcmp( link->name, "net_shred" ) ) {
     699           0 :       ctx->in_kind[ i ] = NET_SHRED;
     700           0 :       fd_net_rx_bounds_init( &ctx->in_links[ i ].net_rx, link->dcache );
     701           0 :       continue;
     702           0 :     } else if( 0==strcmp( link->name, "repair_net" ) ) {
     703           0 :       ctx->in_kind[ i ] = REPAIR_NET;
     704           0 :     } else if( 0==strcmp( link->name, "shred_repair" ) ) {
     705           0 :       ctx->in_kind[ i ] = SHRED_REPAIR;
     706           0 :     } else if( 0==strcmp( link->name, "crds_shred" ) ) {
     707           0 :       ctx->in_kind[ i ] = GOSSIP_SHRED;
     708           0 :     } else if( 0==strcmp( link->name, "gossip_repai" ) ) {
     709           0 :       ctx->in_kind[ i ] = GOSSIP_REPAIR;
     710           0 :     } else if( 0==strcmp( link->name, "repair_scap" ) ) {
     711           0 :       ctx->in_kind[ i ] = REPAIR_SHREDCAP;
     712           0 :     } else if( 0==strcmp( link->name, "replay_scap" ) ) {
     713           0 :       ctx->in_kind[ i ] = REPLAY_SHREDCAP;
     714           0 :     } else {
     715           0 :       FD_LOG_ERR(( "scap tile has unexpected input link %s", link->name ));
     716           0 :     }
     717             : 
     718           0 :     ctx->in_links[ i ].mem    = link_wksp->wksp;
     719           0 :     ctx->in_links[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in_links[ i ].mem, link->dcache );
     720           0 :     ctx->in_links[ i ].wmark  = fd_dcache_compact_wmark ( ctx->in_links[ i ].mem, link->dcache, link->mtu );
     721           0 :   }
     722             : 
     723           0 :   ctx->repair_intake_listen_port = tile->shredcap.repair_intake_listen_port;
     724           0 :   ctx->write_buf_sz = tile->shredcap.write_buffer_size ? tile->shredcap.write_buffer_size : FD_SHREDCAP_DEFAULT_WRITER_BUF_SZ;
     725             : 
     726             :   /* Set up stake weights tile output */
     727           0 :   ctx->stake_out->idx       = fd_topo_find_tile_out_link( topo, tile, "stake_out", 0 );
     728           0 :   if( FD_LIKELY( ctx->stake_out->idx!=ULONG_MAX ) ) {
     729           0 :     fd_topo_link_t * stake_weights_out = &topo->links[ tile->out_link_id[ ctx->stake_out->idx] ];
     730           0 :     ctx->stake_out->mcache  = stake_weights_out->mcache;
     731           0 :     ctx->stake_out->mem     = topo->workspaces[ topo->objs[ stake_weights_out->dcache_obj_id ].wksp_id ].wksp;
     732           0 :     ctx->stake_out->sync    = fd_mcache_seq_laddr     ( ctx->stake_out->mcache );
     733           0 :     ctx->stake_out->depth   = fd_mcache_depth         ( ctx->stake_out->mcache );
     734           0 :     ctx->stake_out->seq     = fd_mcache_seq_query     ( ctx->stake_out->sync );
     735           0 :     ctx->stake_out->chunk0  = fd_dcache_compact_chunk0( ctx->stake_out->mem, stake_weights_out->dcache );
     736           0 :     ctx->stake_out->wmark   = fd_dcache_compact_wmark ( ctx->stake_out->mem, stake_weights_out->dcache, stake_weights_out->mtu );
     737           0 :     ctx->stake_out->chunk   = ctx->stake_out->chunk0;
     738           0 :   } else {
     739           0 :     FD_LOG_WARNING(( "no connection to stake_out link" ));
     740           0 :     memset( ctx->stake_out, 0, sizeof(fd_stake_out_link_t) );
     741           0 :   }
     742             : 
     743             :   /* If the manifest is enabled (for processing), the stake_out link
     744             :      must be connected to the tile.  TODO in principle, it should be
     745             :      possible to gate the remaining of the manifest-related config. */
     746           0 :   ctx->enable_publish_stake_weights = tile->shredcap.enable_publish_stake_weights;
     747           0 :   FD_LOG_NOTICE(( "enable_publish_stake_weights ? %d", ctx->enable_publish_stake_weights ));
     748             : 
     749             :   /* manifest_wmark (root slot) */
     750           0 :   ulong root_slot_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "root_slot" );
     751           0 :   FD_TEST( root_slot_obj_id!=ULONG_MAX );
     752           0 :   ctx->manifest_wmark = fd_fseq_join( fd_topo_obj_laddr( topo, root_slot_obj_id ) );
     753           0 :   if( FD_UNLIKELY( !ctx->manifest_wmark ) ) FD_LOG_ERR(( "no root_slot fseq" ));
     754           0 :   FD_TEST( ULONG_MAX==fd_fseq_query( ctx->manifest_wmark ) );
     755             : 
     756           0 :   ctx->manifest_bank_mem    = manifest_bank_mem;
     757             : 
     758           0 :   ctx->mainfest_exec_slot_ctx_mem    = mainfest_exec_slot_ctx_mem;
     759           0 :   ctx->manifest_exec_slot_ctx        = fd_exec_slot_ctx_join( fd_exec_slot_ctx_new( ctx->mainfest_exec_slot_ctx_mem  ) );
     760           0 :   FD_TEST( ctx->manifest_exec_slot_ctx );
     761           0 :   ctx->manifest_exec_slot_ctx->banks = fd_banks_join( fd_banks_new( ctx->manifest_bank_mem, MANIFEST_MAX_TOTAL_BANKS, MANIFEST_MAX_FORK_WIDTH ) );
     762           0 :   FD_TEST( ctx->manifest_exec_slot_ctx->banks );
     763           0 :   ctx->manifest_exec_slot_ctx->bank  = fd_banks_init_bank( ctx->manifest_exec_slot_ctx->banks, 0UL );
     764           0 :   FD_TEST( ctx->manifest_exec_slot_ctx->bank );
     765             : 
     766           0 :   strncpy( ctx->manifest_path, tile->shredcap.manifest_path, PATH_MAX );
     767           0 :   ctx->manifest_load_done   = 0;
     768           0 :   ctx->manifest_spad_mem    = manifest_spad_mem;
     769           0 :   ctx->manifest_spad        = fd_spad_join( fd_spad_new( ctx->manifest_spad_mem, manifest_spad_max_alloc_footprint() ) );
     770           0 :   ctx->shared_spad_mem      = shared_spad_mem;
     771           0 :   ctx->shared_spad          = fd_spad_join( fd_spad_new( ctx->shared_spad_mem, shared_spad_max_alloc_footprint() ) );
     772             : 
     773             :   /* Allocate the write buffers */
     774           0 :   ctx->alloc = fd_alloc_join( fd_alloc_new( alloc_mem, FD_SHREDCAP_ALLOC_TAG ), fd_tile_idx() );
     775           0 :   if( FD_UNLIKELY( !ctx->alloc ) ) {
     776           0 :     FD_LOG_ERR( ( "fd_alloc_join failed" ) );
     777           0 :   }
     778             : 
     779             :   /* Setup the csv files to be in the expected state */
     780             : 
     781           0 :   init_file_handlers( ctx, &ctx->shreds_fd,      tile->shredcap.shreds_fd,      &ctx->shreds_buf,      &ctx->shred_ostream );
     782           0 :   init_file_handlers( ctx, &ctx->requests_fd,    tile->shredcap.requests_fd,    &ctx->requests_buf,    &ctx->repair_ostream );
     783           0 :   init_file_handlers( ctx, &ctx->fecs_fd,        tile->shredcap.fecs_fd,        &ctx->fecs_buf,        &ctx->fecs_ostream );
     784           0 :   init_file_handlers( ctx, &ctx->peers_fd,       tile->shredcap.peers_fd,       &ctx->peers_buf,       &ctx->peers_ostream );
     785             : 
     786           0 :   int err = fd_io_buffered_ostream_write( &ctx->shred_ostream,  "src_ip,src_port,timestamp,slot,ref_tick,fec_set_idx,idx,is_turbine,is_data,nonce\n", 81UL );
     787           0 :   err    |= fd_io_buffered_ostream_write( &ctx->repair_ostream, "dst_ip,dst_port,timestamp,nonce,slot,idx\n", 41UL );
     788           0 :   err    |= fd_io_buffered_ostream_write( &ctx->fecs_ostream,   "timestamp,slot,ref_tick,fec_set_idx,data_cnt\n", 45UL );
     789           0 :   err    |= fd_io_buffered_ostream_write( &ctx->peers_ostream,  "peer_ip4_addr,peer_port,pubkey,turbine\n", 48UL );
     790             : 
     791           0 :   if( FD_UNLIKELY( err ) ) {
     792           0 :     FD_LOG_ERR(( "failed to write header to any of the 4 csv files (%i-%s)", errno, fd_io_strerror( errno ) ));
     793           0 :   }
     794             : 
     795             :   /* Setup the binary files to be in the expected state. These files are
     796             :      not csv, so we don't need headers. */
     797           0 :   init_file_handlers( ctx, &ctx->slices_fd,      tile->shredcap.slices_fd,      &ctx->slices_buf,      &ctx->slices_ostream );
     798           0 :   init_file_handlers( ctx, &ctx->bank_hashes_fd, tile->shredcap.bank_hashes_fd, &ctx->bank_hashes_buf, &ctx->bank_hashes_ostream );
     799           0 : }
     800             : 
     801           0 : #define STEM_BURST (1UL)
     802           0 : #define STEM_LAZY  (50UL)
     803             : 
     804           0 : #define STEM_CALLBACK_CONTEXT_TYPE  fd_capture_tile_ctx_t
     805           0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_capture_tile_ctx_t)
     806             : 
     807           0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
     808           0 : #define STEM_CALLBACK_DURING_FRAG during_frag
     809           0 : #define STEM_CALLBACK_AFTER_FRAG  after_frag
     810           0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
     811             : 
     812             : #include "../../disco/stem/fd_stem.c"
     813             : 
     814             : fd_topo_run_tile_t fd_tile_shredcap = {
     815             :   .name                     = "scap",
     816             :   .loose_footprint          = loose_footprint,
     817             :   .populate_allowed_seccomp = populate_allowed_seccomp,
     818             :   .populate_allowed_fds     = populate_allowed_fds,
     819             :   .scratch_align            = scratch_align,
     820             :   .scratch_footprint        = scratch_footprint,
     821             :   .privileged_init          = privileged_init,
     822             :   .unprivileged_init        = unprivileged_init,
     823             :   .run                      = stem_run,
     824             : };

Generated by: LCOV version 1.14