Line data Source code
1 : #include "fd_verify_tile.h"
2 : #include "../metrics/fd_metrics.h"
3 : #include "generated/fd_verify_tile_seccomp.h"
4 :
5 : #include <linux/unistd.h>
6 :
7 0 : #define IN_KIND_QUIC (0UL)
8 0 : #define IN_KIND_BUNDLE (1UL)
9 0 : #define IN_KIND_GOSSIP (2UL)
10 :
11 : /* The verify tile is a wrapper around the mux tile, that also verifies
12 : incoming transaction signatures match the data being signed.
13 : Non-matching transactions are filtered out of the frag stream. */
14 :
15 : FD_FN_CONST static inline ulong
16 18 : scratch_align( void ) {
17 18 : return FD_TCACHE_ALIGN;
18 18 : }
19 :
20 : FD_FN_PURE static inline ulong
21 18 : scratch_footprint( fd_topo_tile_t const * tile ) {
22 18 : ulong l = FD_LAYOUT_INIT;
23 18 : l = FD_LAYOUT_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
24 18 : l = FD_LAYOUT_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->verify.tcache_depth, 0UL ) );
25 234 : for( ulong i=0; i<FD_TXN_ACTUAL_SIG_MAX; i++ ) {
26 216 : l = FD_LAYOUT_APPEND( l, fd_sha512_align(), fd_sha512_footprint() );
27 216 : }
28 18 : return FD_LAYOUT_FINI( l, scratch_align() );
29 18 : }
30 :
31 : static inline void
32 0 : metrics_write( fd_verify_ctx_t * ctx ) {
33 0 : FD_MCNT_SET( VERIFY, TRANSACTION_BUNDLE_PEER_FAILURE, ctx->metrics.bundle_peer_fail_cnt );
34 0 : FD_MCNT_SET( VERIFY, TRANSACTION_PARSE_FAILURE, ctx->metrics.parse_fail_cnt );
35 0 : FD_MCNT_SET( VERIFY, TRANSACTION_DEDUP_FAILURE, ctx->metrics.dedup_fail_cnt );
36 0 : FD_MCNT_SET( VERIFY, TRANSACTION_VERIFY_FAILURE, ctx->metrics.verify_fail_cnt );
37 0 : }
38 :
39 : static int
40 : before_frag( fd_verify_ctx_t * ctx,
41 : ulong in_idx,
42 : ulong seq,
43 0 : ulong sig ) {
44 : /* Bundle tile can produce both "bundles" and "packets", a packet is a
45 : regular transaction and should be round-robined between verify
46 : tiles, while bundles need to go through verify:0 currently to
47 : prevent interleaving of bundle streams. */
48 0 : int is_bundle_packet = (ctx->in_kind[ in_idx ]==IN_KIND_BUNDLE && !sig);
49 :
50 0 : if( FD_LIKELY( is_bundle_packet || ctx->in_kind[ in_idx ]==IN_KIND_QUIC || ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
51 0 : return (seq % ctx->round_robin_cnt) != ctx->round_robin_idx;
52 0 : } else if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_BUNDLE ) ) {
53 0 : return ctx->round_robin_idx!=0UL;
54 0 : }
55 :
56 0 : return 0;
57 0 : }
58 :
59 : /* during_frag is called between pairs for sequence number checks, as
60 : we are reading incoming frags. We don't actually need to copy the
61 : fragment here, see fd_dedup.c for why we do this.*/
62 :
63 : static inline void
64 : during_frag( fd_verify_ctx_t * ctx,
65 : ulong in_idx,
66 : ulong seq FD_PARAM_UNUSED,
67 : ulong sig FD_PARAM_UNUSED,
68 : ulong chunk,
69 : ulong sz,
70 0 : ulong ctl FD_PARAM_UNUSED ) {
71 :
72 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_QUIC || ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
73 0 : if( FD_UNLIKELY( chunk<ctx->in[in_idx].chunk0 || chunk>ctx->in[in_idx].wmark || sz>FD_TPU_MTU ) )
74 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[in_idx].chunk0, ctx->in[in_idx].wmark ));
75 :
76 0 : uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[in_idx].mem, chunk );
77 0 : fd_txn_m_t * dst = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
78 :
79 0 : dst->payload_sz = (ushort)sz;
80 0 : dst->block_engine.bundle_id = 0UL;
81 0 : fd_memcpy( fd_txn_m_payload( dst ), src, sz );
82 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_BUNDLE ) ) {
83 0 : if( FD_UNLIKELY( chunk<ctx->in[in_idx].chunk0 || chunk>ctx->in[in_idx].wmark || sz>FD_TPU_RAW_MTU ) )
84 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu,%lu]", chunk, sz, ctx->in[in_idx].chunk0, ctx->in[in_idx].wmark, FD_TPU_RAW_MTU ));
85 :
86 0 : uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[in_idx].mem, chunk );
87 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
88 0 : fd_memcpy( dst, src, sz );
89 :
90 0 : fd_txn_m_t const * txnm = (fd_txn_m_t const *)dst;
91 0 : if( FD_UNLIKELY( txnm->payload_sz>FD_TPU_MTU ) ) {
92 0 : FD_LOG_ERR(( "fd_verify: txn payload size %hu exceeds max %lu", txnm->payload_sz, FD_TPU_MTU ));
93 0 : }
94 0 : }
95 0 : }
96 :
97 : static inline void
98 : after_frag( fd_verify_ctx_t * ctx,
99 : ulong in_idx,
100 : ulong seq,
101 : ulong sig,
102 : ulong sz,
103 : ulong tsorig,
104 : ulong _tspub,
105 0 : fd_stem_context_t * stem ) {
106 0 : (void)in_idx;
107 0 : (void)seq;
108 0 : (void)sig;
109 0 : (void)sz;
110 0 : (void)_tspub;
111 :
112 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
113 0 : fd_txn_t * txnt = fd_txn_m_txn_t( txnm );
114 0 : txnm->txn_t_sz = (ushort)fd_txn_parse( fd_txn_m_payload( txnm ), txnm->payload_sz, txnt, NULL );
115 :
116 0 : int is_bundle = !!txnm->block_engine.bundle_id;
117 :
118 0 : if( FD_UNLIKELY( is_bundle & (txnm->block_engine.bundle_id!=ctx->bundle_id) ) ) {
119 0 : ctx->bundle_failed = 0;
120 0 : ctx->bundle_id = txnm->block_engine.bundle_id;
121 0 : }
122 :
123 0 : if( FD_UNLIKELY( is_bundle & (!!ctx->bundle_failed) ) ) {
124 0 : ctx->metrics.bundle_peer_fail_cnt++;
125 0 : return;
126 0 : }
127 :
128 0 : if( FD_UNLIKELY( !txnm->txn_t_sz ) ) {
129 0 : if( FD_UNLIKELY( is_bundle ) ) ctx->bundle_failed = 1;
130 0 : ctx->metrics.parse_fail_cnt++;
131 0 : return;
132 0 : }
133 :
134 : /* Users sometimes send transactions as part of a bundle (with a tip)
135 : and via the normal path (without a tip). Regardless of which
136 : arrives first, we want to pack the one with the tip. Thus, we
137 : exempt bundles from the normal HA dedup checks. The dedup tile
138 : will still do a full-bundle dedup check to make sure to drop any
139 : identical bundles. */
140 0 : ulong _txn_sig;
141 0 : int res = fd_txn_verify( ctx, fd_txn_m_payload( txnm ), txnm->payload_sz, txnt, !is_bundle, &_txn_sig );
142 0 : if( FD_UNLIKELY( res!=FD_TXN_VERIFY_SUCCESS ) ) {
143 0 : if( FD_UNLIKELY( is_bundle ) ) ctx->bundle_failed = 1;
144 :
145 0 : if( FD_LIKELY( res==FD_TXN_VERIFY_DEDUP ) ) ctx->metrics.dedup_fail_cnt++;
146 0 : else ctx->metrics.verify_fail_cnt++;
147 :
148 0 : return;
149 0 : }
150 :
151 0 : ulong realized_sz = fd_txn_m_realized_footprint( txnm, 1, 0 );
152 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
153 0 : fd_stem_publish( stem, 0UL, 0UL, ctx->out_chunk, realized_sz, 0UL, tsorig, tspub );
154 0 : ctx->out_chunk = fd_dcache_compact_next( ctx->out_chunk, realized_sz, ctx->out_chunk0, ctx->out_wmark );
155 0 : }
156 :
157 : static void
158 : privileged_init( fd_topo_t * topo,
159 0 : fd_topo_tile_t * tile ) {
160 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
161 :
162 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
163 0 : fd_verify_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
164 0 : FD_TEST( fd_rng_secure( &ctx->hashmap_seed, 8U ) );
165 0 : }
166 :
167 : static void
168 : unprivileged_init( fd_topo_t * topo,
169 0 : fd_topo_tile_t * tile ) {
170 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
171 :
172 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
173 0 : fd_verify_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
174 0 : fd_tcache_t * tcache = fd_tcache_join( fd_tcache_new( FD_SCRATCH_ALLOC_APPEND( l, FD_TCACHE_ALIGN, FD_TCACHE_FOOTPRINT( tile->verify.tcache_depth, 0UL ) ), tile->verify.tcache_depth, 0UL ) );
175 0 : if( FD_UNLIKELY( !tcache ) ) FD_LOG_ERR(( "fd_tcache_join failed" ));
176 :
177 0 : ctx->round_robin_cnt = fd_topo_tile_name_cnt( topo, tile->name );
178 0 : ctx->round_robin_idx = tile->kind_id;
179 :
180 0 : for ( ulong i=0; i<FD_TXN_ACTUAL_SIG_MAX; i++ ) {
181 0 : fd_sha512_t * sha = fd_sha512_join( fd_sha512_new( FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_sha512_t ), sizeof( fd_sha512_t ) ) ) );
182 0 : if( FD_UNLIKELY( !sha ) ) FD_LOG_ERR(( "fd_sha512_join failed" ));
183 0 : ctx->sha[i] = sha;
184 0 : }
185 :
186 0 : ctx->bundle_failed = 0;
187 0 : ctx->bundle_id = 0UL;
188 :
189 0 : memset( &ctx->metrics, 0, sizeof( ctx->metrics ) );
190 :
191 0 : ctx->tcache_depth = fd_tcache_depth ( tcache );
192 0 : ctx->tcache_map_cnt = fd_tcache_map_cnt ( tcache );
193 0 : ctx->tcache_sync = fd_tcache_oldest_laddr( tcache );
194 0 : ctx->tcache_ring = fd_tcache_ring_laddr ( tcache );
195 0 : ctx->tcache_map = fd_tcache_map_laddr ( tcache );
196 :
197 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
198 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
199 :
200 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
201 0 : ctx->in[i].mem = link_wksp->wksp;
202 0 : ctx->in[i].chunk0 = fd_dcache_compact_chunk0( ctx->in[i].mem, link->dcache );
203 0 : ctx->in[i].wmark = fd_dcache_compact_wmark ( ctx->in[i].mem, link->dcache, link->mtu );
204 :
205 0 : if( FD_UNLIKELY( !strcmp( link->name, "quic_verify" ) ) ) ctx->in_kind[ i ] = IN_KIND_QUIC;
206 0 : else if( FD_UNLIKELY( !strcmp( link->name, "bundle_verif" ) ) ) ctx->in_kind[ i ] = IN_KIND_BUNDLE;
207 0 : else if( FD_UNLIKELY( !strcmp( link->name, "gossip_verif" ) ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP;
208 0 : else FD_LOG_ERR(( "unexpected link name %s", link->name ));
209 0 : }
210 :
211 0 : ctx->out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
212 0 : ctx->out_chunk0 = fd_dcache_compact_chunk0( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
213 0 : ctx->out_wmark = fd_dcache_compact_wmark ( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
214 0 : ctx->out_chunk = ctx->out_chunk0;
215 :
216 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
217 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
218 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
219 0 : }
220 :
221 : static ulong
222 : populate_allowed_seccomp( fd_topo_t const * topo,
223 : fd_topo_tile_t const * tile,
224 : ulong out_cnt,
225 0 : struct sock_filter * out ) {
226 0 : (void)topo;
227 0 : (void)tile;
228 :
229 0 : populate_sock_filter_policy_fd_verify_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
230 0 : return sock_filter_policy_fd_verify_tile_instr_cnt;
231 0 : }
232 :
233 : static ulong
234 : populate_allowed_fds( fd_topo_t const * topo,
235 : fd_topo_tile_t const * tile,
236 : ulong out_fds_cnt,
237 0 : int * out_fds ) {
238 0 : (void)topo;
239 0 : (void)tile;
240 :
241 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
242 :
243 0 : ulong out_cnt = 0UL;
244 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
245 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
246 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
247 0 : return out_cnt;
248 0 : }
249 :
250 0 : #define STEM_BURST (1UL)
251 :
252 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_verify_ctx_t
253 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_verify_ctx_t)
254 :
255 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
256 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
257 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
258 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
259 :
260 : #include "../stem/fd_stem.c"
261 :
262 : fd_topo_run_tile_t fd_tile_verify = {
263 : .name = "verify",
264 : .populate_allowed_seccomp = populate_allowed_seccomp,
265 : .populate_allowed_fds = populate_allowed_fds,
266 : .scratch_align = scratch_align,
267 : .scratch_footprint = scratch_footprint,
268 : .privileged_init = privileged_init,
269 : .unprivileged_init = unprivileged_init,
270 : .run = stem_run,
271 : };
|