Line data Source code
1 : #include "fd_verify_tile.h"
2 : #include "../fd_txn_m.h"
3 : #include "../metrics/fd_metrics.h"
4 : #include "generated/fd_verify_tile_seccomp.h"
5 : #include "../../flamenco/gossip/fd_gossip_types.h"
6 :
7 3 : #define IN_KIND_QUIC (0UL)
8 87 : #define IN_KIND_BUNDLE (1UL)
9 3 : #define IN_KIND_GOSSIP (2UL)
10 3 : #define IN_KIND_SEND (3UL)
11 :
12 : FD_FN_CONST static inline ulong
13 15 : scratch_align( void ) {
14 15 : return FD_TCACHE_ALIGN;
15 15 : }
16 :
17 : FD_FN_PURE static inline ulong
18 6 : scratch_footprint( fd_topo_tile_t const * tile ) {
19 6 : ulong l = FD_LAYOUT_INIT;
20 6 : l = FD_LAYOUT_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
21 6 : l = FD_LAYOUT_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->verify.tcache_depth, 0UL ) );
22 78 : for( ulong i=0; i<FD_TXN_ACTUAL_SIG_MAX; i++ ) {
23 72 : l = FD_LAYOUT_APPEND( l, fd_sha512_align(), fd_sha512_footprint() );
24 72 : }
25 6 : return FD_LAYOUT_FINI( l, scratch_align() );
26 6 : }
27 :
28 : static inline void
29 0 : metrics_write( fd_verify_ctx_t * ctx ) {
30 0 : FD_MCNT_SET( VERIFY, TRANSACTION_BUNDLE_PEER_FAILURE, ctx->metrics.bundle_peer_fail_cnt );
31 0 : FD_MCNT_SET( VERIFY, TRANSACTION_PARSE_FAILURE, ctx->metrics.parse_fail_cnt );
32 0 : FD_MCNT_SET( VERIFY, TRANSACTION_DEDUP_FAILURE, ctx->metrics.dedup_fail_cnt );
33 0 : FD_MCNT_SET( VERIFY, GOSSIPED_VOTES_RECEIVED, ctx->metrics.gossiped_votes_cnt );
34 0 : FD_MCNT_SET( VERIFY, TRANSACTION_VERIFY_FAILURE, ctx->metrics.verify_fail_cnt );
35 0 : }
36 :
37 : static int
38 : before_frag( fd_verify_ctx_t * ctx,
39 : ulong in_idx,
40 : ulong seq,
41 42 : ulong sig ) {
42 : /* Bundle tile can produce both "bundles" and "packets", a packet is a
43 : regular transaction and should be round-robined between verify
44 : tiles, while bundles need to go through verify:0 currently to
45 : prevent interleaving of bundle streams. */
46 42 : int is_bundle_packet = (ctx->in_kind[ in_idx ]==IN_KIND_BUNDLE && !sig);
47 :
48 42 : if( FD_LIKELY( is_bundle_packet || ctx->in_kind[ in_idx ]==IN_KIND_QUIC ) ) {
49 30 : return (seq % ctx->round_robin_cnt) != ctx->round_robin_idx;
50 30 : } else if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_BUNDLE ) ) {
51 12 : return ctx->round_robin_idx!=0UL;
52 12 : } else if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
53 0 : return (seq % ctx->round_robin_cnt) != ctx->round_robin_idx ||
54 0 : sig!=FD_GOSSIP_UPDATE_TAG_VOTE;
55 0 : }
56 :
57 0 : return 0;
58 42 : }
59 :
60 : /* during_frag is called between pairs for sequence number checks, as
61 : we are reading incoming frags. We don't actually need to copy the
62 : fragment here, see fd_dedup.c for why we do this.*/
63 :
64 : static inline void
65 : during_frag( fd_verify_ctx_t * ctx,
66 : ulong in_idx,
67 : ulong seq FD_PARAM_UNUSED,
68 : ulong sig FD_PARAM_UNUSED,
69 : ulong chunk,
70 : ulong sz,
71 0 : ulong ctl FD_PARAM_UNUSED ) {
72 :
73 0 : ulong in_kind = ctx->in_kind[ in_idx ];
74 0 : if( FD_UNLIKELY( in_kind==IN_KIND_BUNDLE || in_kind==IN_KIND_QUIC || in_kind==IN_KIND_SEND ) ) {
75 0 : if( FD_UNLIKELY( chunk<ctx->in[in_idx].chunk0 || chunk>ctx->in[in_idx].wmark || sz>FD_TPU_RAW_MTU ) )
76 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu,%lu]", chunk, sz, ctx->in[in_idx].chunk0, ctx->in[in_idx].wmark, FD_TPU_RAW_MTU ));
77 :
78 0 : uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[in_idx].mem, chunk );
79 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
80 0 : fd_memcpy( dst, src, sz );
81 :
82 0 : fd_txn_m_t const * txnm = (fd_txn_m_t const *)dst;
83 0 : if( FD_UNLIKELY( txnm->payload_sz>FD_TPU_MTU ) ) {
84 0 : FD_LOG_ERR(( "fd_verify: txn payload size %hu exceeds max %lu", txnm->payload_sz, FD_TPU_MTU ));
85 0 : }
86 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
87 0 : if( FD_UNLIKELY( chunk<ctx->in[in_idx].chunk0 || chunk>ctx->in[in_idx].wmark || sz>2048UL ) )
88 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[in_idx].chunk0, ctx->in[in_idx].wmark ));
89 :
90 0 : fd_gossip_update_message_t const * msg = (fd_gossip_update_message_t const *)fd_chunk_to_laddr_const( ctx->in[in_idx].mem, chunk );
91 0 : fd_txn_m_t * dst = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
92 :
93 0 : dst->payload_sz = (ushort)msg->vote.txn_sz;
94 0 : dst->block_engine.bundle_id = 0UL;
95 0 : dst->source_ipv4 = msg->vote.socket.addr;
96 0 : dst->source_tpu = FD_TXN_M_TPU_SOURCE_GOSSIP;
97 0 : fd_memcpy( fd_txn_m_payload( dst ), msg->vote.txn, msg->vote.txn_sz );
98 0 : }
99 0 : }
100 :
101 : static inline void
102 : after_frag( fd_verify_ctx_t * ctx,
103 : ulong in_idx,
104 : ulong seq,
105 : ulong sig,
106 : ulong sz,
107 : ulong tsorig,
108 : ulong _tspub,
109 0 : fd_stem_context_t * stem ) {
110 0 : (void)in_idx;
111 0 : (void)seq;
112 0 : (void)sig;
113 0 : (void)sz;
114 0 : (void)_tspub;
115 :
116 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
117 0 : fd_txn_t * txnt = fd_txn_m_txn_t( txnm );
118 0 : txnm->txn_t_sz = (ushort)fd_txn_parse( fd_txn_m_payload( txnm ), txnm->payload_sz, txnt, NULL );
119 :
120 0 : int is_bundle = !!txnm->block_engine.bundle_id;
121 :
122 0 : if( FD_UNLIKELY( is_bundle & (txnm->block_engine.bundle_id!=ctx->bundle_id) ) ) {
123 0 : ctx->bundle_failed = 0;
124 0 : ctx->bundle_id = txnm->block_engine.bundle_id;
125 0 : }
126 :
127 0 : if( FD_UNLIKELY( is_bundle & (!!ctx->bundle_failed) ) ) {
128 0 : ctx->metrics.bundle_peer_fail_cnt++;
129 0 : return;
130 0 : }
131 :
132 0 : if( FD_UNLIKELY( !txnm->txn_t_sz ) ) {
133 0 : if( FD_UNLIKELY( is_bundle ) ) ctx->bundle_failed = 1;
134 0 : ctx->metrics.parse_fail_cnt++;
135 0 : return;
136 0 : }
137 :
138 : /* Users sometimes send transactions as part of a bundle (with a tip)
139 : and via the normal path (without a tip). Regardless of which
140 : arrives first, we want to pack the one with the tip. Thus, we
141 : exempt bundles from the normal HA dedup checks. The dedup tile
142 : will still do a full-bundle dedup check to make sure to drop any
143 : identical bundles. */
144 0 : ulong _txn_sig;
145 0 : int res = fd_txn_verify( ctx, fd_txn_m_payload( txnm ), txnm->payload_sz, txnt, !is_bundle, &_txn_sig );
146 0 : if( FD_UNLIKELY( res!=FD_TXN_VERIFY_SUCCESS ) ) {
147 0 : if( FD_UNLIKELY( is_bundle ) ) ctx->bundle_failed = 1;
148 :
149 0 : if( FD_LIKELY( res==FD_TXN_VERIFY_DEDUP ) ) ctx->metrics.dedup_fail_cnt++;
150 0 : else ctx->metrics.verify_fail_cnt++;
151 :
152 0 : return;
153 0 : }
154 :
155 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) ctx->metrics.gossiped_votes_cnt++;
156 :
157 0 : ulong realized_sz = fd_txn_m_realized_footprint( txnm, 1, 0 );
158 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
159 0 : fd_stem_publish( stem, 0UL, 0UL, ctx->out_chunk, realized_sz, 0UL, tsorig, tspub );
160 0 : ctx->out_chunk = fd_dcache_compact_next( ctx->out_chunk, realized_sz, ctx->out_chunk0, ctx->out_wmark );
161 0 : }
162 :
163 : static void
164 : privileged_init( fd_topo_t * topo,
165 3 : fd_topo_tile_t * tile ) {
166 3 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
167 :
168 3 : FD_SCRATCH_ALLOC_INIT( l, scratch );
169 3 : fd_verify_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
170 3 : FD_TEST( fd_rng_secure( &ctx->hashmap_seed, 8U ) );
171 3 : }
172 :
173 : static void
174 : unprivileged_init( fd_topo_t * topo,
175 3 : fd_topo_tile_t * tile ) {
176 3 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
177 :
178 3 : FD_SCRATCH_ALLOC_INIT( l, scratch );
179 3 : fd_verify_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
180 3 : fd_tcache_t * tcache = fd_tcache_join( fd_tcache_new( FD_SCRATCH_ALLOC_APPEND( l, FD_TCACHE_ALIGN, FD_TCACHE_FOOTPRINT( tile->verify.tcache_depth, 0UL ) ), tile->verify.tcache_depth, 0UL ) );
181 3 : if( FD_UNLIKELY( !tcache ) ) FD_LOG_ERR(( "fd_tcache_join failed" ));
182 :
183 3 : ctx->round_robin_cnt = fd_topo_tile_name_cnt( topo, tile->name );
184 3 : ctx->round_robin_idx = tile->kind_id;
185 :
186 39 : for ( ulong i=0; i<FD_TXN_ACTUAL_SIG_MAX; i++ ) {
187 36 : fd_sha512_t * sha = fd_sha512_join( fd_sha512_new( FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_sha512_t ), sizeof( fd_sha512_t ) ) ) );
188 36 : if( FD_UNLIKELY( !sha ) ) FD_LOG_ERR(( "fd_sha512_join failed" ));
189 36 : ctx->sha[i] = sha;
190 36 : }
191 :
192 3 : ctx->bundle_failed = 0;
193 3 : ctx->bundle_id = 0UL;
194 :
195 3 : memset( &ctx->metrics, 0, sizeof( ctx->metrics ) );
196 :
197 3 : ctx->tcache_depth = fd_tcache_depth ( tcache );
198 3 : ctx->tcache_map_cnt = fd_tcache_map_cnt ( tcache );
199 3 : ctx->tcache_sync = fd_tcache_oldest_laddr( tcache );
200 3 : ctx->tcache_ring = fd_tcache_ring_laddr ( tcache );
201 3 : ctx->tcache_map = fd_tcache_map_laddr ( tcache );
202 :
203 15 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
204 12 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
205 :
206 12 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
207 12 : ctx->in[i].mem = link_wksp->wksp;
208 12 : ctx->in[i].chunk0 = fd_dcache_compact_chunk0( ctx->in[i].mem, link->dcache );
209 12 : ctx->in[i].wmark = fd_dcache_compact_wmark ( ctx->in[i].mem, link->dcache, link->mtu );
210 :
211 12 : if( !strcmp( link->name, "quic_verify" ) ) ctx->in_kind[ i ] = IN_KIND_QUIC;
212 9 : else if( !strcmp( link->name, "bundle_verif" ) ) ctx->in_kind[ i ] = IN_KIND_BUNDLE;
213 6 : else if( !strcmp( link->name, "send_out" ) ) ctx->in_kind[ i ] = IN_KIND_SEND;
214 3 : else if( !strcmp( link->name, "gossip_out" ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP;
215 0 : else FD_LOG_ERR(( "unexpected link name %s", link->name ));
216 12 : }
217 :
218 3 : ctx->out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
219 3 : ctx->out_chunk0 = fd_dcache_compact_chunk0( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
220 3 : ctx->out_wmark = fd_dcache_compact_wmark ( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
221 3 : ctx->out_chunk = ctx->out_chunk0;
222 :
223 3 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
224 3 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
225 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
226 3 : }
227 :
228 : static ulong
229 : populate_allowed_seccomp( fd_topo_t const * topo,
230 : fd_topo_tile_t const * tile,
231 : ulong out_cnt,
232 3 : struct sock_filter * out ) {
233 3 : (void)topo;
234 3 : (void)tile;
235 :
236 3 : populate_sock_filter_policy_fd_verify_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
237 3 : return sock_filter_policy_fd_verify_tile_instr_cnt;
238 3 : }
239 :
240 : static ulong
241 : populate_allowed_fds( fd_topo_t const * topo,
242 : fd_topo_tile_t const * tile,
243 : ulong out_fds_cnt,
244 3 : int * out_fds ) {
245 3 : (void)topo;
246 3 : (void)tile;
247 :
248 3 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
249 :
250 3 : ulong out_cnt = 0UL;
251 3 : out_fds[ out_cnt++ ] = 2; /* stderr */
252 3 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
253 3 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
254 3 : return out_cnt;
255 3 : }
256 :
257 0 : #define STEM_BURST (1UL)
258 :
259 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_verify_ctx_t
260 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_verify_ctx_t)
261 :
262 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
263 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
264 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
265 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
266 :
267 : #include "../stem/fd_stem.c"
268 :
269 : #ifndef FD_TILE_TEST
270 : fd_topo_run_tile_t fd_tile_verify = {
271 : .name = "verify",
272 : .populate_allowed_seccomp = populate_allowed_seccomp,
273 : .populate_allowed_fds = populate_allowed_fds,
274 : .scratch_align = scratch_align,
275 : .scratch_footprint = scratch_footprint,
276 : .privileged_init = privileged_init,
277 : .unprivileged_init = unprivileged_init,
278 : .run = stem_run,
279 : };
280 : #endif
|