Line data Source code
1 : #include "../tiles.h"
2 :
3 : #include "generated/fd_dedup_tile_seccomp.h"
4 :
5 : #include "../verify/fd_verify_tile.h"
6 : #include "../metrics/fd_metrics.h"
7 :
8 : #include <linux/unistd.h>
9 :
10 : /* fd_dedup provides services to deduplicate multiple streams of input
11 : fragments and present them to a mix of reliable and unreliable
12 : consumers as though they were generated by a single multi-stream
13 : producer.
14 :
15 : The dedup tile is simply a wrapper around the mux tile, that also
16 : checks the transaction signature field for duplicates and filters
17 : them out. */
18 :
19 0 : #define IN_KIND_GOSSIP (0UL)
20 0 : #define IN_KIND_VERIFY (1UL)
21 0 : #define IN_KIND_EXECUTED_TXN (2UL)
22 :
23 : /* fd_dedup_in_ctx_t is a context object for each in (producer) mcache
24 : connected to the dedup tile. */
25 :
26 : typedef struct {
27 : fd_wksp_t * mem;
28 : ulong chunk0;
29 : ulong wmark;
30 : ulong mtu;
31 : } fd_dedup_in_ctx_t;
32 :
33 : /* fd_dedup_ctx_t is the context object provided to callbacks from the
34 : mux tile, and contains all state needed to progress the tile. */
35 :
36 : typedef struct {
37 : ulong tcache_depth; /* == fd_tcache_depth( tcache ), depth of this dedups's tcache (const) */
38 : ulong tcache_map_cnt; /* == fd_tcache_map_cnt( tcache ), number of slots to use for tcache map (const) */
39 : ulong * tcache_sync; /* == fd_tcache_oldest_laddr( tcache ), local join to the oldest key in the tcache */
40 : ulong * tcache_ring;
41 : ulong * tcache_map;
42 :
43 : ulong in_kind[ 64UL ];
44 : fd_dedup_in_ctx_t in[ 64UL ];
45 :
46 : int bundle_failed;
47 : ulong bundle_id;
48 : ulong bundle_idx;
49 : uchar bundle_signatures[ 4UL ][ 64UL ];
50 :
51 : fd_wksp_t * out_mem;
52 : ulong out_chunk0;
53 : ulong out_wmark;
54 : ulong out_chunk;
55 :
56 : ulong hashmap_seed;
57 :
58 : struct {
59 : ulong bundle_peer_failure_cnt;
60 : ulong dedup_fail_cnt;
61 : } metrics;
62 : } fd_dedup_ctx_t;
63 :
64 : FD_FN_CONST static inline ulong
65 0 : scratch_align( void ) {
66 0 : return alignof( fd_dedup_ctx_t );
67 0 : }
68 :
69 : FD_FN_PURE static inline ulong
70 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
71 0 : ulong l = FD_LAYOUT_INIT;
72 0 : l = FD_LAYOUT_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
73 0 : l = FD_LAYOUT_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->dedup.tcache_depth, 0UL ) );
74 0 : return FD_LAYOUT_FINI( l, scratch_align() );
75 0 : }
76 :
77 : static inline void
78 0 : metrics_write( fd_dedup_ctx_t * ctx ) {
79 0 : FD_MCNT_SET( DEDUP, TRANSACTION_BUNDLE_PEER_FAILURE, ctx->metrics.bundle_peer_failure_cnt );
80 0 : FD_MCNT_SET( DEDUP, TRANSACTION_DEDUP_FAILURE, ctx->metrics.dedup_fail_cnt );
81 0 : }
82 :
83 : /* during_frag is called between pairs for sequence number checks, as
84 : we are reading incoming frags. We don't actually need to copy the
85 : fragment here, flow control prevents it getting overrun, and
86 : downstream consumers could reuse the same chunk and workspace to
87 : improve performance.
88 :
89 : The bounds checking and copying here are defensive measures,
90 :
91 : * In a functioning system, the bounds checking should never fail,
92 : but we want to prevent an attacker with code execution on a producer
93 : tile from trivially being able to jump to a consumer tile with
94 : out of bounds chunks.
95 :
96 : * For security reasons, we have chosen to isolate all workspaces from
97 : one another, so for example, if the QUIC tile is compromised with
98 : RCE, it cannot wait until the sigverify tile has verified a transaction,
99 : and then overwrite the transaction while it's being processed by the
100 : banking stage. */
101 :
102 : static inline void
103 : during_frag( fd_dedup_ctx_t * ctx,
104 : ulong in_idx,
105 : ulong seq FD_PARAM_UNUSED,
106 : ulong sig FD_PARAM_UNUSED,
107 : ulong chunk,
108 : ulong sz,
109 0 : ulong ctl FD_PARAM_UNUSED ) {
110 :
111 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) )
112 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
113 :
114 0 : uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
115 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
116 :
117 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
118 0 : if( FD_UNLIKELY( sz>FD_TPU_MTU ) ) FD_LOG_ERR(( "received a gossip transaction that was too large" ));
119 :
120 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)dst;
121 0 : txnm->payload_sz = (ushort)sz;
122 0 : fd_memcpy( fd_txn_m_payload( txnm ), src, sz );
123 0 : txnm->block_engine.bundle_id = 0UL;
124 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EXECUTED_TXN ) ) {
125 : /* Executed txns just have their signature inserted into the tcache
126 : so we can dedup them easily. */
127 0 : ulong ha_dedup_tag = fd_hash( ctx->hashmap_seed, src+64UL, 64UL );
128 0 : int _is_dup;
129 0 : FD_TCACHE_INSERT( _is_dup, *ctx->tcache_sync, ctx->tcache_ring, ctx->tcache_depth, ctx->tcache_map, ctx->tcache_map_cnt, ha_dedup_tag );
130 0 : (void)_is_dup;
131 0 : } else {
132 0 : fd_memcpy( dst, src, sz );
133 0 : }
134 0 : }
135 :
136 : /* After the transaction has been fully received, and we know we were
137 : not overrun while reading it, check if it's a duplicate of a prior
138 : transaction.
139 :
140 : If the transaction came in from the gossip link, then it hasn't been
141 : parsed by us. So parse it here if necessary. */
142 :
143 : static inline void
144 : after_frag( fd_dedup_ctx_t * ctx,
145 : ulong in_idx,
146 : ulong seq,
147 : ulong sig,
148 : ulong sz,
149 : ulong tsorig,
150 : ulong _tspub,
151 0 : fd_stem_context_t * stem ) {
152 0 : (void)seq;
153 0 : (void)sig;
154 0 : (void)sz;
155 0 : (void)_tspub;
156 :
157 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EXECUTED_TXN ) ) return;
158 :
159 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
160 0 : FD_TEST( txnm->payload_sz<=FD_TPU_MTU );
161 0 : fd_txn_t * txn = fd_txn_m_txn_t( txnm );
162 :
163 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id && (txnm->block_engine.bundle_id!=ctx->bundle_id) ) ) {
164 0 : ctx->bundle_failed = 0;
165 0 : ctx->bundle_id = txnm->block_engine.bundle_id;
166 0 : ctx->bundle_idx = 0UL;
167 0 : }
168 :
169 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id && ctx->bundle_failed ) ) {
170 0 : ctx->metrics.bundle_peer_failure_cnt++;
171 0 : return;
172 0 : }
173 :
174 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
175 : /* Transactions coming in from these links are not parsed.
176 :
177 : We'll need to parse it so it's ready for downstream consumers.
178 : Equally importantly, we need to parse to extract the signature
179 : for dedup. Just parse it right into the output dcache. */
180 0 : txnm->txn_t_sz = (ushort)fd_txn_parse( fd_txn_m_payload( txnm ), txnm->payload_sz, txn, NULL );
181 0 : if( FD_UNLIKELY( !txnm->txn_t_sz ) ) FD_LOG_ERR(( "fd_txn_parse failed for vote transactions that should have been sigverified" ));
182 :
183 0 : FD_MCNT_INC( DEDUP, GOSSIPED_VOTES_RECEIVED, 1UL );
184 0 : }
185 :
186 0 : int is_dup = 0;
187 0 : if( FD_LIKELY( !txnm->block_engine.bundle_id ) ) {
188 : /* Compute fd_hash(signature) for dedup. */
189 0 : ulong ha_dedup_tag = fd_hash( ctx->hashmap_seed, fd_txn_m_payload( txnm )+txn->signature_off, 64UL );
190 :
191 0 : FD_TCACHE_INSERT( is_dup, *ctx->tcache_sync, ctx->tcache_ring, ctx->tcache_depth, ctx->tcache_map, ctx->tcache_map_cnt, ha_dedup_tag );
192 0 : } else {
193 : /* Make sure bundles don't contain a duplicate transaction inside
194 : the bundle, which would not be valid. */
195 :
196 0 : if( FD_UNLIKELY( ctx->bundle_idx>4UL ) ) FD_LOG_ERR(( "bundle_idx %lu > 4", ctx->bundle_idx ));
197 :
198 0 : for( ulong i=0UL; i<ctx->bundle_idx; i++ ) {
199 0 : if( !memcmp( ctx->bundle_signatures[ i ], fd_txn_m_payload( txnm )+txn->signature_off, 64UL ) ) {
200 0 : is_dup = 1;
201 0 : break;
202 0 : }
203 0 : }
204 :
205 0 : if( FD_UNLIKELY( ctx->bundle_idx==4UL ) ) ctx->bundle_idx++;
206 0 : else fd_memcpy( ctx->bundle_signatures[ ctx->bundle_idx++ ], fd_txn_m_payload( txnm )+txn->signature_off, 64UL );
207 0 : }
208 :
209 0 : if( FD_LIKELY( is_dup ) ) {
210 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id ) ) ctx->bundle_failed = 1;
211 :
212 0 : ctx->metrics.dedup_fail_cnt++;
213 0 : } else {
214 0 : ulong realized_sz = fd_txn_m_realized_footprint( txnm, 1, 0 );
215 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
216 0 : fd_stem_publish( stem, 0UL, 0, ctx->out_chunk, realized_sz, 0UL, tsorig, tspub );
217 0 : ctx->out_chunk = fd_dcache_compact_next( ctx->out_chunk, realized_sz, ctx->out_chunk0, ctx->out_wmark );
218 0 : }
219 0 : }
220 :
221 : static void
222 : privileged_init( fd_topo_t * topo,
223 0 : fd_topo_tile_t * tile ) {
224 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
225 :
226 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
227 0 : fd_dedup_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
228 0 : FD_TEST( fd_rng_secure( &ctx->hashmap_seed, 8U ) );
229 0 : }
230 :
231 : static void
232 : unprivileged_init( fd_topo_t * topo,
233 0 : fd_topo_tile_t * tile ) {
234 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
235 :
236 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
237 0 : fd_dedup_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
238 0 : fd_tcache_t * tcache = fd_tcache_join( fd_tcache_new( FD_SCRATCH_ALLOC_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->dedup.tcache_depth, 0) ), tile->dedup.tcache_depth, 0 ) );
239 0 : if( FD_UNLIKELY( !tcache ) ) FD_LOG_ERR(( "fd_tcache_new failed" ));
240 :
241 0 : ctx->bundle_failed = 0;
242 0 : ctx->bundle_id = 0UL;
243 0 : ctx->bundle_idx = 0UL;
244 :
245 0 : memset( &ctx->metrics, 0, sizeof( ctx->metrics ) );
246 :
247 0 : ctx->tcache_depth = fd_tcache_depth ( tcache );
248 0 : ctx->tcache_map_cnt = fd_tcache_map_cnt ( tcache );
249 0 : ctx->tcache_sync = fd_tcache_oldest_laddr( tcache );
250 0 : ctx->tcache_ring = fd_tcache_ring_laddr ( tcache );
251 0 : ctx->tcache_map = fd_tcache_map_laddr ( tcache );
252 :
253 0 : FD_TEST( tile->in_cnt<=sizeof( ctx->in )/sizeof( ctx->in[ 0 ] ) );
254 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
255 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
256 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
257 :
258 0 : ctx->in[i].mem = link_wksp->wksp;
259 0 : ctx->in[i].mtu = link->mtu;
260 0 : ctx->in[i].chunk0 = fd_dcache_compact_chunk0( ctx->in[i].mem, link->dcache );
261 0 : ctx->in[i].wmark = fd_dcache_compact_wmark ( ctx->in[i].mem, link->dcache, link->mtu );
262 :
263 0 : if( !strcmp( link->name, "gossip_dedup" ) ) {
264 0 : ctx->in_kind[ i ] = IN_KIND_GOSSIP;
265 0 : } else if( !strcmp( link->name, "verify_dedup" ) ) {
266 0 : ctx->in_kind[ i ] = IN_KIND_VERIFY;
267 0 : } else if( !strcmp( link->name, "executed_txn" ) ) {
268 0 : ctx->in_kind[ i ] = IN_KIND_EXECUTED_TXN;
269 0 : } else {
270 0 : FD_LOG_ERR(( "unexpected link name %s", link->name ));
271 0 : }
272 0 : }
273 :
274 0 : ctx->out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
275 0 : ctx->out_chunk0 = fd_dcache_compact_chunk0( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
276 0 : ctx->out_wmark = fd_dcache_compact_wmark ( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
277 0 : ctx->out_chunk = ctx->out_chunk0;
278 :
279 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
280 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
281 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
282 0 : }
283 :
284 : static ulong
285 : populate_allowed_seccomp( fd_topo_t const * topo,
286 : fd_topo_tile_t const * tile,
287 : ulong out_cnt,
288 0 : struct sock_filter * out ) {
289 0 : (void)topo;
290 0 : (void)tile;
291 :
292 0 : populate_sock_filter_policy_fd_dedup_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
293 0 : return sock_filter_policy_fd_dedup_tile_instr_cnt;
294 0 : }
295 :
296 : static ulong
297 : populate_allowed_fds( fd_topo_t const * topo,
298 : fd_topo_tile_t const * tile,
299 : ulong out_fds_cnt,
300 0 : int * out_fds ) {
301 0 : (void)topo;
302 0 : (void)tile;
303 :
304 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
305 :
306 0 : ulong out_cnt = 0UL;
307 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
308 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
309 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
310 0 : return out_cnt;
311 0 : }
312 :
313 0 : #define STEM_BURST (1UL)
314 :
315 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_dedup_ctx_t
316 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_dedup_ctx_t)
317 :
318 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
319 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
320 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
321 :
322 : #include "../stem/fd_stem.c"
323 :
324 : fd_topo_run_tile_t fd_tile_dedup = {
325 : .name = "dedup",
326 : .populate_allowed_seccomp = populate_allowed_seccomp,
327 : .populate_allowed_fds = populate_allowed_fds,
328 : .scratch_align = scratch_align,
329 : .scratch_footprint = scratch_footprint,
330 : .privileged_init = privileged_init,
331 : .unprivileged_init = unprivileged_init,
332 : .run = stem_run,
333 : };
|