Line data Source code
1 :
2 : #include "../fd_txn_m.h"
3 : #include "generated/fd_dedup_tile_seccomp.h"
4 :
5 : #include "../topo/fd_topo.h"
6 : #include "../metrics/fd_metrics.h"
7 :
8 : /* fd_dedup provides services to deduplicate multiple streams of input
9 : fragments and present them to a mix of reliable and unreliable
10 : consumers as though they were generated by a single multi-stream
11 : producer. */
12 :
13 0 : #define IN_KIND_GOSSIP (0UL)
14 0 : #define IN_KIND_VERIFY (1UL)
15 0 : #define IN_KIND_EXECUTED_TXN (2UL)
16 :
17 : /* fd_dedup_in_ctx_t is a context object for each in (producer) mcache
18 : connected to the dedup tile. */
19 :
20 : typedef struct {
21 : fd_wksp_t * mem;
22 : ulong chunk0;
23 : ulong wmark;
24 : ulong mtu;
25 : } fd_dedup_in_ctx_t;
26 :
27 : /* fd_dedup_ctx_t is the context object provided to callbacks from
28 : fd_stem, and contains all state needed to progress the tile. */
29 :
30 : typedef struct {
31 : ulong tcache_depth; /* == fd_tcache_depth( tcache ), depth of this dedups's tcache (const) */
32 : ulong tcache_map_cnt; /* == fd_tcache_map_cnt( tcache ), number of slots to use for tcache map (const) */
33 : ulong * tcache_sync; /* == fd_tcache_oldest_laddr( tcache ), local join to the oldest key in the tcache */
34 : ulong * tcache_ring;
35 : ulong * tcache_map;
36 :
37 : ulong in_kind[ 64UL ];
38 : fd_dedup_in_ctx_t in[ 64UL ];
39 :
40 : int bundle_failed;
41 : ulong bundle_id;
42 : ulong bundle_idx;
43 : uchar bundle_signatures[ 4UL ][ 64UL ];
44 :
45 : fd_wksp_t * out_mem;
46 : ulong out_chunk0;
47 : ulong out_wmark;
48 : ulong out_chunk;
49 :
50 : ulong hashmap_seed;
51 :
52 : struct {
53 : ulong bundle_peer_failure_cnt;
54 : ulong dedup_fail_cnt;
55 : } metrics;
56 : } fd_dedup_ctx_t;
57 :
58 : FD_FN_CONST static inline ulong
59 0 : scratch_align( void ) {
60 0 : return alignof( fd_dedup_ctx_t );
61 0 : }
62 :
63 : FD_FN_PURE static inline ulong
64 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
65 0 : ulong l = FD_LAYOUT_INIT;
66 0 : l = FD_LAYOUT_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
67 0 : l = FD_LAYOUT_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->dedup.tcache_depth, 0UL ) );
68 0 : return FD_LAYOUT_FINI( l, scratch_align() );
69 0 : }
70 :
71 : static inline void
72 0 : metrics_write( fd_dedup_ctx_t * ctx ) {
73 0 : FD_MCNT_SET( DEDUP, TRANSACTION_BUNDLE_PEER_FAILURE, ctx->metrics.bundle_peer_failure_cnt );
74 0 : FD_MCNT_SET( DEDUP, TRANSACTION_DEDUP_FAILURE, ctx->metrics.dedup_fail_cnt );
75 0 : }
76 :
77 : /* during_frag is called between pairs for sequence number checks, as
78 : we are reading incoming frags. We don't actually need to copy the
79 : fragment here, flow control prevents it getting overrun, and
80 : downstream consumers could reuse the same chunk and workspace to
81 : improve performance.
82 :
83 : The bounds checking and copying here are defensive measures,
84 :
85 : * In a functioning system, the bounds checking should never fail,
86 : but we want to prevent an attacker with code execution on a producer
87 : tile from trivially being able to jump to a consumer tile with
88 : out of bounds chunks.
89 :
90 : * For security reasons, we have chosen to isolate all workspaces from
91 : one another, so for example, if the QUIC tile is compromised with
92 : RCE, it cannot wait until the sigverify tile has verified a transaction,
93 : and then overwrite the transaction while it's being processed by the
94 : banking stage. */
95 :
96 : static inline void
97 : during_frag( fd_dedup_ctx_t * ctx,
98 : ulong in_idx,
99 : ulong seq FD_PARAM_UNUSED,
100 : ulong sig FD_PARAM_UNUSED,
101 : ulong chunk,
102 : ulong sz,
103 0 : ulong ctl FD_PARAM_UNUSED ) {
104 :
105 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) )
106 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
107 :
108 0 : uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
109 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
110 :
111 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
112 0 : if( FD_UNLIKELY( sz>FD_TPU_RAW_MTU ) ) FD_LOG_ERR(( "received a gossip transaction that was too large" ));
113 0 : fd_memcpy( dst, src, sz );
114 :
115 0 : fd_txn_m_t const * txnm = (fd_txn_m_t const *)dst;
116 0 : if( FD_UNLIKELY( txnm->payload_sz>FD_TPU_MTU ) ) {
117 0 : FD_LOG_ERR(( "vote txn payload size %hu exceeds max %lu", txnm->payload_sz, FD_TPU_MTU ));
118 0 : }
119 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EXECUTED_TXN ) ) {
120 0 : if( FD_UNLIKELY( sz!=FD_TXN_SIGNATURE_SZ ) ) FD_LOG_ERR(( "received an executed transaction signature message with the wrong size %lu", sz ));
121 : /* Executed txns just have their signature inserted into the tcache
122 : so we can dedup them easily. */
123 0 : ulong ha_dedup_tag = fd_hash( ctx->hashmap_seed, src, FD_TXN_SIGNATURE_SZ );
124 0 : int _is_dup;
125 0 : FD_TCACHE_INSERT( _is_dup, *ctx->tcache_sync, ctx->tcache_ring, ctx->tcache_depth, ctx->tcache_map, ctx->tcache_map_cnt, ha_dedup_tag );
126 0 : (void)_is_dup;
127 0 : } else {
128 0 : fd_memcpy( dst, src, sz );
129 0 : }
130 0 : }
131 :
132 : /* After the transaction has been fully received, and we know we were
133 : not overrun while reading it, check if it's a duplicate of a prior
134 : transaction.
135 :
136 : If the transaction came in from the gossip link, then it hasn't been
137 : parsed by us. So parse it here if necessary. */
138 :
139 : static inline void
140 : after_frag( fd_dedup_ctx_t * ctx,
141 : ulong in_idx,
142 : ulong seq,
143 : ulong sig,
144 : ulong sz,
145 : ulong tsorig,
146 : ulong _tspub,
147 0 : fd_stem_context_t * stem ) {
148 0 : (void)seq;
149 0 : (void)sig;
150 0 : (void)sz;
151 0 : (void)_tspub;
152 :
153 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EXECUTED_TXN ) ) return;
154 :
155 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
156 0 : FD_TEST( txnm->payload_sz<=FD_TPU_MTU );
157 0 : fd_txn_t * txn = fd_txn_m_txn_t( txnm );
158 :
159 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id && (txnm->block_engine.bundle_id!=ctx->bundle_id) ) ) {
160 0 : ctx->bundle_failed = 0;
161 0 : ctx->bundle_id = txnm->block_engine.bundle_id;
162 0 : ctx->bundle_idx = 0UL;
163 0 : }
164 :
165 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id && ctx->bundle_failed ) ) {
166 0 : ctx->metrics.bundle_peer_failure_cnt++;
167 0 : return;
168 0 : }
169 :
170 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
171 : /* Transactions coming in from these links are not parsed.
172 :
173 : We'll need to parse it so it's ready for downstream consumers.
174 : Equally importantly, we need to parse to extract the signature
175 : for dedup. Just parse it right into the output dcache. */
176 0 : txnm->txn_t_sz = (ushort)fd_txn_parse( fd_txn_m_payload( txnm ), txnm->payload_sz, txn, NULL );
177 0 : if( FD_UNLIKELY( !txnm->txn_t_sz ) ) FD_LOG_ERR(( "fd_txn_parse failed for vote transactions that should have been sigverified" ));
178 :
179 0 : FD_MCNT_INC( DEDUP, GOSSIPED_VOTES_RECEIVED, 1UL );
180 0 : }
181 :
182 0 : int is_dup = 0;
183 0 : if( FD_LIKELY( !txnm->block_engine.bundle_id ) ) {
184 : /* Compute fd_hash(signature) for dedup. */
185 0 : ulong ha_dedup_tag = fd_hash( ctx->hashmap_seed, fd_txn_m_payload( txnm )+txn->signature_off, 64UL );
186 :
187 0 : FD_TCACHE_INSERT( is_dup, *ctx->tcache_sync, ctx->tcache_ring, ctx->tcache_depth, ctx->tcache_map, ctx->tcache_map_cnt, ha_dedup_tag );
188 0 : } else {
189 : /* Make sure bundles don't contain a duplicate transaction inside
190 : the bundle, which would not be valid. */
191 :
192 0 : if( FD_UNLIKELY( ctx->bundle_idx>4UL ) ) FD_LOG_ERR(( "bundle_idx %lu > 4", ctx->bundle_idx ));
193 :
194 0 : for( ulong i=0UL; i<ctx->bundle_idx; i++ ) {
195 0 : if( !memcmp( ctx->bundle_signatures[ i ], fd_txn_m_payload( txnm )+txn->signature_off, 64UL ) ) {
196 0 : is_dup = 1;
197 0 : break;
198 0 : }
199 0 : }
200 :
201 0 : if( FD_UNLIKELY( ctx->bundle_idx==4UL ) ) ctx->bundle_idx++;
202 0 : else fd_memcpy( ctx->bundle_signatures[ ctx->bundle_idx++ ], fd_txn_m_payload( txnm )+txn->signature_off, 64UL );
203 0 : }
204 :
205 0 : if( FD_LIKELY( is_dup ) ) {
206 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id ) ) ctx->bundle_failed = 1;
207 :
208 0 : ctx->metrics.dedup_fail_cnt++;
209 0 : } else {
210 0 : ulong realized_sz = fd_txn_m_realized_footprint( txnm, 1, 0 );
211 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
212 0 : fd_stem_publish( stem, 0UL, 0, ctx->out_chunk, realized_sz, 0UL, tsorig, tspub );
213 0 : ctx->out_chunk = fd_dcache_compact_next( ctx->out_chunk, realized_sz, ctx->out_chunk0, ctx->out_wmark );
214 0 : }
215 0 : }
216 :
217 : static void
218 : privileged_init( fd_topo_t * topo,
219 0 : fd_topo_tile_t * tile ) {
220 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
221 :
222 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
223 0 : fd_dedup_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
224 0 : FD_TEST( fd_rng_secure( &ctx->hashmap_seed, 8U ) );
225 0 : }
226 :
227 : static void
228 : unprivileged_init( fd_topo_t * topo,
229 0 : fd_topo_tile_t * tile ) {
230 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
231 :
232 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
233 0 : fd_dedup_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
234 0 : fd_tcache_t * tcache = fd_tcache_join( fd_tcache_new( FD_SCRATCH_ALLOC_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->dedup.tcache_depth, 0) ), tile->dedup.tcache_depth, 0 ) );
235 0 : if( FD_UNLIKELY( !tcache ) ) FD_LOG_ERR(( "fd_tcache_new failed" ));
236 :
237 0 : ctx->bundle_failed = 0;
238 0 : ctx->bundle_id = 0UL;
239 0 : ctx->bundle_idx = 0UL;
240 :
241 0 : memset( &ctx->metrics, 0, sizeof( ctx->metrics ) );
242 :
243 0 : ctx->tcache_depth = fd_tcache_depth ( tcache );
244 0 : ctx->tcache_map_cnt = fd_tcache_map_cnt ( tcache );
245 0 : ctx->tcache_sync = fd_tcache_oldest_laddr( tcache );
246 0 : ctx->tcache_ring = fd_tcache_ring_laddr ( tcache );
247 0 : ctx->tcache_map = fd_tcache_map_laddr ( tcache );
248 :
249 0 : FD_TEST( tile->in_cnt<=sizeof( ctx->in )/sizeof( ctx->in[ 0 ] ) );
250 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
251 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
252 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
253 :
254 0 : ctx->in[i].mem = link_wksp->wksp;
255 0 : ctx->in[i].mtu = link->mtu;
256 0 : ctx->in[i].chunk0 = fd_dcache_compact_chunk0( ctx->in[i].mem, link->dcache );
257 0 : ctx->in[i].wmark = fd_dcache_compact_wmark ( ctx->in[i].mem, link->dcache, link->mtu );
258 :
259 0 : if( !strcmp( link->name, "gossip_dedup" ) ) {
260 0 : ctx->in_kind[ i ] = IN_KIND_GOSSIP;
261 0 : } else if( !strcmp( link->name, "verify_dedup" ) ) {
262 0 : ctx->in_kind[ i ] = IN_KIND_VERIFY;
263 0 : } else if( !strcmp( link->name, "executed_txn" ) ) {
264 0 : ctx->in_kind[ i ] = IN_KIND_EXECUTED_TXN;
265 0 : } else if( !strcmp( link->name, "exec_sig" ) ) {
266 0 : ctx->in_kind[ i ] = IN_KIND_EXECUTED_TXN;
267 0 : } else {
268 0 : FD_LOG_ERR(( "unexpected link name %s", link->name ));
269 0 : }
270 0 : }
271 :
272 0 : ctx->out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
273 0 : ctx->out_chunk0 = fd_dcache_compact_chunk0( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
274 0 : ctx->out_wmark = fd_dcache_compact_wmark ( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
275 0 : ctx->out_chunk = ctx->out_chunk0;
276 :
277 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
278 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
279 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
280 0 : }
281 :
282 : static ulong
283 : populate_allowed_seccomp( fd_topo_t const * topo,
284 : fd_topo_tile_t const * tile,
285 : ulong out_cnt,
286 0 : struct sock_filter * out ) {
287 0 : (void)topo;
288 0 : (void)tile;
289 :
290 0 : populate_sock_filter_policy_fd_dedup_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
291 0 : return sock_filter_policy_fd_dedup_tile_instr_cnt;
292 0 : }
293 :
294 : static ulong
295 : populate_allowed_fds( fd_topo_t const * topo,
296 : fd_topo_tile_t const * tile,
297 : ulong out_fds_cnt,
298 0 : int * out_fds ) {
299 0 : (void)topo;
300 0 : (void)tile;
301 :
302 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
303 :
304 0 : ulong out_cnt = 0UL;
305 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
306 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
307 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
308 0 : return out_cnt;
309 0 : }
310 :
311 0 : #define STEM_BURST (1UL)
312 :
313 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_dedup_ctx_t
314 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_dedup_ctx_t)
315 :
316 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
317 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
318 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
319 :
320 : #include "../stem/fd_stem.c"
321 :
322 : fd_topo_run_tile_t fd_tile_dedup = {
323 : .name = "dedup",
324 : .populate_allowed_seccomp = populate_allowed_seccomp,
325 : .populate_allowed_fds = populate_allowed_fds,
326 : .scratch_align = scratch_align,
327 : .scratch_footprint = scratch_footprint,
328 : .privileged_init = privileged_init,
329 : .unprivileged_init = unprivileged_init,
330 : .run = stem_run,
331 : };
|