Line data Source code
1 : #include "fd_verify_tile.h"
2 : #include "../fd_txn_m_t.h"
3 : #include "../metrics/fd_metrics.h"
4 : #include "generated/fd_verify_tile_seccomp.h"
5 : #include "../../flamenco/gossip/fd_gossip_types.h"
6 :
7 3 : #define IN_KIND_QUIC (0UL)
8 87 : #define IN_KIND_BUNDLE (1UL)
9 3 : #define IN_KIND_GOSSIP (2UL)
10 3 : #define IN_KIND_SEND (3UL)
11 :
12 : FD_FN_CONST static inline ulong
13 15 : scratch_align( void ) {
14 15 : return FD_TCACHE_ALIGN;
15 15 : }
16 :
17 : FD_FN_PURE static inline ulong
18 6 : scratch_footprint( fd_topo_tile_t const * tile ) {
19 6 : ulong l = FD_LAYOUT_INIT;
20 6 : l = FD_LAYOUT_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
21 6 : l = FD_LAYOUT_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->verify.tcache_depth, 0UL ) );
22 78 : for( ulong i=0; i<FD_TXN_ACTUAL_SIG_MAX; i++ ) {
23 72 : l = FD_LAYOUT_APPEND( l, fd_sha512_align(), fd_sha512_footprint() );
24 72 : }
25 6 : return FD_LAYOUT_FINI( l, scratch_align() );
26 6 : }
27 :
28 : static inline void
29 0 : metrics_write( fd_verify_ctx_t * ctx ) {
30 0 : FD_MCNT_SET( VERIFY, TRANSACTION_BUNDLE_PEER_FAILURE, ctx->metrics.bundle_peer_fail_cnt );
31 0 : FD_MCNT_SET( VERIFY, TRANSACTION_PARSE_FAILURE, ctx->metrics.parse_fail_cnt );
32 0 : FD_MCNT_SET( VERIFY, TRANSACTION_DEDUP_FAILURE, ctx->metrics.dedup_fail_cnt );
33 0 : FD_MCNT_SET( VERIFY, TRANSACTION_VERIFY_FAILURE, ctx->metrics.verify_fail_cnt );
34 0 : }
35 :
36 : static int
37 : before_frag( fd_verify_ctx_t * ctx,
38 : ulong in_idx,
39 : ulong seq,
40 42 : ulong sig ) {
41 : /* Bundle tile can produce both "bundles" and "packets", a packet is a
42 : regular transaction and should be round-robined between verify
43 : tiles, while bundles need to go through verify:0 currently to
44 : prevent interleaving of bundle streams. */
45 42 : int is_bundle_packet = (ctx->in_kind[ in_idx ]==IN_KIND_BUNDLE && !sig);
46 :
47 42 : if( FD_LIKELY( is_bundle_packet || ctx->in_kind[ in_idx ]==IN_KIND_QUIC ) ) {
48 30 : return (seq % ctx->round_robin_cnt) != ctx->round_robin_idx;
49 30 : } else if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_BUNDLE ) ) {
50 12 : return ctx->round_robin_idx!=0UL;
51 12 : } else if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
52 0 : return (seq % ctx->round_robin_cnt) != ctx->round_robin_idx ||
53 0 : sig!=FD_GOSSIP_UPDATE_TAG_VOTE;
54 0 : }
55 :
56 0 : return 0;
57 42 : }
58 :
59 : /* during_frag is called between pairs for sequence number checks, as
60 : we are reading incoming frags. We don't actually need to copy the
61 : fragment here, see fd_dedup.c for why we do this.*/
62 :
63 : static inline void
64 : during_frag( fd_verify_ctx_t * ctx,
65 : ulong in_idx,
66 : ulong seq FD_PARAM_UNUSED,
67 : ulong sig FD_PARAM_UNUSED,
68 : ulong chunk,
69 : ulong sz,
70 0 : ulong ctl FD_PARAM_UNUSED ) {
71 :
72 0 : ulong in_kind = ctx->in_kind[ in_idx ];
73 0 : if( FD_UNLIKELY( in_kind==IN_KIND_BUNDLE || in_kind==IN_KIND_QUIC || in_kind==IN_KIND_SEND ) ) {
74 0 : if( FD_UNLIKELY( chunk<ctx->in[in_idx].chunk0 || chunk>ctx->in[in_idx].wmark || sz>FD_TPU_RAW_MTU ) )
75 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu,%lu]", chunk, sz, ctx->in[in_idx].chunk0, ctx->in[in_idx].wmark, FD_TPU_RAW_MTU ));
76 :
77 0 : uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[in_idx].mem, chunk );
78 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
79 0 : fd_memcpy( dst, src, sz );
80 :
81 0 : fd_txn_m_t const * txnm = (fd_txn_m_t const *)dst;
82 0 : if( FD_UNLIKELY( txnm->payload_sz>FD_TPU_MTU ) ) {
83 0 : FD_LOG_ERR(( "fd_verify: txn payload size %hu exceeds max %lu", txnm->payload_sz, FD_TPU_MTU ));
84 0 : }
85 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
86 0 : if( FD_UNLIKELY( chunk<ctx->in[in_idx].chunk0 || chunk>ctx->in[in_idx].wmark || sz>2048UL ) )
87 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[in_idx].chunk0, ctx->in[in_idx].wmark ));
88 :
89 0 : fd_gossip_update_message_t const * msg = (fd_gossip_update_message_t const *)fd_chunk_to_laddr_const( ctx->in[in_idx].mem, chunk );
90 0 : fd_txn_m_t * dst = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
91 :
92 0 : dst->payload_sz = (ushort)msg->vote.txn_sz;
93 0 : dst->block_engine.bundle_id = 0UL;
94 0 : fd_memcpy( fd_txn_m_payload( dst ), msg->vote.txn, msg->vote.txn_sz );
95 :
96 0 : }
97 0 : }
98 :
99 : static inline void
100 : after_frag( fd_verify_ctx_t * ctx,
101 : ulong in_idx,
102 : ulong seq,
103 : ulong sig,
104 : ulong sz,
105 : ulong tsorig,
106 : ulong _tspub,
107 0 : fd_stem_context_t * stem ) {
108 0 : (void)in_idx;
109 0 : (void)seq;
110 0 : (void)sig;
111 0 : (void)sz;
112 0 : (void)_tspub;
113 :
114 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
115 0 : fd_txn_t * txnt = fd_txn_m_txn_t( txnm );
116 0 : txnm->txn_t_sz = (ushort)fd_txn_parse( fd_txn_m_payload( txnm ), txnm->payload_sz, txnt, NULL );
117 :
118 0 : int is_bundle = !!txnm->block_engine.bundle_id;
119 :
120 0 : if( FD_UNLIKELY( is_bundle & (txnm->block_engine.bundle_id!=ctx->bundle_id) ) ) {
121 0 : ctx->bundle_failed = 0;
122 0 : ctx->bundle_id = txnm->block_engine.bundle_id;
123 0 : }
124 :
125 0 : if( FD_UNLIKELY( is_bundle & (!!ctx->bundle_failed) ) ) {
126 0 : ctx->metrics.bundle_peer_fail_cnt++;
127 0 : return;
128 0 : }
129 :
130 0 : if( FD_UNLIKELY( !txnm->txn_t_sz ) ) {
131 0 : if( FD_UNLIKELY( is_bundle ) ) ctx->bundle_failed = 1;
132 0 : ctx->metrics.parse_fail_cnt++;
133 0 : return;
134 0 : }
135 :
136 : /* Users sometimes send transactions as part of a bundle (with a tip)
137 : and via the normal path (without a tip). Regardless of which
138 : arrives first, we want to pack the one with the tip. Thus, we
139 : exempt bundles from the normal HA dedup checks. The dedup tile
140 : will still do a full-bundle dedup check to make sure to drop any
141 : identical bundles. */
142 0 : ulong _txn_sig;
143 0 : int res = fd_txn_verify( ctx, fd_txn_m_payload( txnm ), txnm->payload_sz, txnt, !is_bundle, &_txn_sig );
144 0 : if( FD_UNLIKELY( res!=FD_TXN_VERIFY_SUCCESS ) ) {
145 0 : if( FD_UNLIKELY( is_bundle ) ) ctx->bundle_failed = 1;
146 :
147 0 : if( FD_LIKELY( res==FD_TXN_VERIFY_DEDUP ) ) ctx->metrics.dedup_fail_cnt++;
148 0 : else ctx->metrics.verify_fail_cnt++;
149 :
150 0 : return;
151 0 : }
152 :
153 0 : ulong realized_sz = fd_txn_m_realized_footprint( txnm, 1, 0 );
154 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
155 0 : fd_stem_publish( stem, 0UL, 0UL, ctx->out_chunk, realized_sz, 0UL, tsorig, tspub );
156 0 : ctx->out_chunk = fd_dcache_compact_next( ctx->out_chunk, realized_sz, ctx->out_chunk0, ctx->out_wmark );
157 0 : }
158 :
159 : static void
160 : privileged_init( fd_topo_t * topo,
161 3 : fd_topo_tile_t * tile ) {
162 3 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
163 :
164 3 : FD_SCRATCH_ALLOC_INIT( l, scratch );
165 3 : fd_verify_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
166 3 : FD_TEST( fd_rng_secure( &ctx->hashmap_seed, 8U ) );
167 3 : }
168 :
169 : static void
170 : unprivileged_init( fd_topo_t * topo,
171 3 : fd_topo_tile_t * tile ) {
172 3 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
173 :
174 3 : FD_SCRATCH_ALLOC_INIT( l, scratch );
175 3 : fd_verify_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_verify_ctx_t ), sizeof( fd_verify_ctx_t ) );
176 3 : fd_tcache_t * tcache = fd_tcache_join( fd_tcache_new( FD_SCRATCH_ALLOC_APPEND( l, FD_TCACHE_ALIGN, FD_TCACHE_FOOTPRINT( tile->verify.tcache_depth, 0UL ) ), tile->verify.tcache_depth, 0UL ) );
177 3 : if( FD_UNLIKELY( !tcache ) ) FD_LOG_ERR(( "fd_tcache_join failed" ));
178 :
179 3 : ctx->round_robin_cnt = fd_topo_tile_name_cnt( topo, tile->name );
180 3 : ctx->round_robin_idx = tile->kind_id;
181 :
182 39 : for ( ulong i=0; i<FD_TXN_ACTUAL_SIG_MAX; i++ ) {
183 36 : fd_sha512_t * sha = fd_sha512_join( fd_sha512_new( FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_sha512_t ), sizeof( fd_sha512_t ) ) ) );
184 36 : if( FD_UNLIKELY( !sha ) ) FD_LOG_ERR(( "fd_sha512_join failed" ));
185 36 : ctx->sha[i] = sha;
186 36 : }
187 :
188 3 : ctx->bundle_failed = 0;
189 3 : ctx->bundle_id = 0UL;
190 :
191 3 : memset( &ctx->metrics, 0, sizeof( ctx->metrics ) );
192 :
193 3 : ctx->tcache_depth = fd_tcache_depth ( tcache );
194 3 : ctx->tcache_map_cnt = fd_tcache_map_cnt ( tcache );
195 3 : ctx->tcache_sync = fd_tcache_oldest_laddr( tcache );
196 3 : ctx->tcache_ring = fd_tcache_ring_laddr ( tcache );
197 3 : ctx->tcache_map = fd_tcache_map_laddr ( tcache );
198 :
199 15 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
200 12 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
201 :
202 12 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
203 12 : ctx->in[i].mem = link_wksp->wksp;
204 12 : ctx->in[i].chunk0 = fd_dcache_compact_chunk0( ctx->in[i].mem, link->dcache );
205 12 : ctx->in[i].wmark = fd_dcache_compact_wmark ( ctx->in[i].mem, link->dcache, link->mtu );
206 :
207 12 : if( !strcmp( link->name, "quic_verify" ) ) ctx->in_kind[ i ] = IN_KIND_QUIC;
208 9 : else if( !strcmp( link->name, "bundle_verif" ) ) ctx->in_kind[ i ] = IN_KIND_BUNDLE;
209 6 : else if( !strcmp( link->name, "send_txns" ) ) ctx->in_kind[ i ] = IN_KIND_SEND;
210 3 : else if( !strcmp( link->name, "gossip_out" ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP;
211 0 : else FD_LOG_ERR(( "unexpected link name %s", link->name ));
212 12 : }
213 :
214 3 : ctx->out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
215 3 : ctx->out_chunk0 = fd_dcache_compact_chunk0( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
216 3 : ctx->out_wmark = fd_dcache_compact_wmark ( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
217 3 : ctx->out_chunk = ctx->out_chunk0;
218 :
219 3 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
220 3 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
221 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
222 3 : }
223 :
224 : static ulong
225 : populate_allowed_seccomp( fd_topo_t const * topo,
226 : fd_topo_tile_t const * tile,
227 : ulong out_cnt,
228 3 : struct sock_filter * out ) {
229 3 : (void)topo;
230 3 : (void)tile;
231 :
232 3 : populate_sock_filter_policy_fd_verify_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
233 3 : return sock_filter_policy_fd_verify_tile_instr_cnt;
234 3 : }
235 :
236 : static ulong
237 : populate_allowed_fds( fd_topo_t const * topo,
238 : fd_topo_tile_t const * tile,
239 : ulong out_fds_cnt,
240 3 : int * out_fds ) {
241 3 : (void)topo;
242 3 : (void)tile;
243 :
244 3 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
245 :
246 3 : ulong out_cnt = 0UL;
247 3 : out_fds[ out_cnt++ ] = 2; /* stderr */
248 3 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
249 3 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
250 3 : return out_cnt;
251 3 : }
252 :
253 0 : #define STEM_BURST (1UL)
254 :
255 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_verify_ctx_t
256 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_verify_ctx_t)
257 :
258 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
259 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
260 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
261 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
262 :
263 : #include "../stem/fd_stem.c"
264 :
265 : #ifndef FD_TILE_TEST
266 : fd_topo_run_tile_t fd_tile_verify = {
267 : .name = "verify",
268 : .populate_allowed_seccomp = populate_allowed_seccomp,
269 : .populate_allowed_fds = populate_allowed_fds,
270 : .scratch_align = scratch_align,
271 : .scratch_footprint = scratch_footprint,
272 : .privileged_init = privileged_init,
273 : .unprivileged_init = unprivileged_init,
274 : .run = stem_run,
275 : };
276 : #endif
|