Line data Source code
1 : #include "../tiles.h"
2 :
3 : #include "generated/fd_dedup_tile_seccomp.h"
4 :
5 : #include "../verify/fd_verify_tile.h"
6 : #include "../metrics/fd_metrics.h"
7 :
8 : #include <linux/unistd.h>
9 :
10 : /* fd_dedup provides services to deduplicate multiple streams of input
11 : fragments and present them to a mix of reliable and unreliable
12 : consumers as though they were generated by a single multi-stream
13 : producer. */
14 :
15 0 : #define IN_KIND_GOSSIP (0UL)
16 0 : #define IN_KIND_VERIFY (1UL)
17 0 : #define IN_KIND_EXECUTED_TXN (2UL)
18 :
19 : /* fd_dedup_in_ctx_t is a context object for each in (producer) mcache
20 : connected to the dedup tile. */
21 :
22 : typedef struct {
23 : fd_wksp_t * mem;
24 : ulong chunk0;
25 : ulong wmark;
26 : ulong mtu;
27 : } fd_dedup_in_ctx_t;
28 :
29 : /* fd_dedup_ctx_t is the context object provided to callbacks from
30 : fd_stem, and contains all state needed to progress the tile. */
31 :
32 : typedef struct {
33 : ulong tcache_depth; /* == fd_tcache_depth( tcache ), depth of this dedups's tcache (const) */
34 : ulong tcache_map_cnt; /* == fd_tcache_map_cnt( tcache ), number of slots to use for tcache map (const) */
35 : ulong * tcache_sync; /* == fd_tcache_oldest_laddr( tcache ), local join to the oldest key in the tcache */
36 : ulong * tcache_ring;
37 : ulong * tcache_map;
38 :
39 : ulong in_kind[ 64UL ];
40 : fd_dedup_in_ctx_t in[ 64UL ];
41 :
42 : int bundle_failed;
43 : ulong bundle_id;
44 : ulong bundle_idx;
45 : uchar bundle_signatures[ 4UL ][ 64UL ];
46 :
47 : fd_wksp_t * out_mem;
48 : ulong out_chunk0;
49 : ulong out_wmark;
50 : ulong out_chunk;
51 :
52 : ulong hashmap_seed;
53 :
54 : struct {
55 : ulong bundle_peer_failure_cnt;
56 : ulong dedup_fail_cnt;
57 : } metrics;
58 : } fd_dedup_ctx_t;
59 :
60 : FD_FN_CONST static inline ulong
61 0 : scratch_align( void ) {
62 0 : return alignof( fd_dedup_ctx_t );
63 0 : }
64 :
65 : FD_FN_PURE static inline ulong
66 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
67 0 : ulong l = FD_LAYOUT_INIT;
68 0 : l = FD_LAYOUT_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
69 0 : l = FD_LAYOUT_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->dedup.tcache_depth, 0UL ) );
70 0 : return FD_LAYOUT_FINI( l, scratch_align() );
71 0 : }
72 :
73 : static inline void
74 0 : metrics_write( fd_dedup_ctx_t * ctx ) {
75 0 : FD_MCNT_SET( DEDUP, TRANSACTION_BUNDLE_PEER_FAILURE, ctx->metrics.bundle_peer_failure_cnt );
76 0 : FD_MCNT_SET( DEDUP, TRANSACTION_DEDUP_FAILURE, ctx->metrics.dedup_fail_cnt );
77 0 : }
78 :
79 : /* during_frag is called between pairs for sequence number checks, as
80 : we are reading incoming frags. We don't actually need to copy the
81 : fragment here, flow control prevents it getting overrun, and
82 : downstream consumers could reuse the same chunk and workspace to
83 : improve performance.
84 :
85 : The bounds checking and copying here are defensive measures,
86 :
87 : * In a functioning system, the bounds checking should never fail,
88 : but we want to prevent an attacker with code execution on a producer
89 : tile from trivially being able to jump to a consumer tile with
90 : out of bounds chunks.
91 :
92 : * For security reasons, we have chosen to isolate all workspaces from
93 : one another, so for example, if the QUIC tile is compromised with
94 : RCE, it cannot wait until the sigverify tile has verified a transaction,
95 : and then overwrite the transaction while it's being processed by the
96 : banking stage. */
97 :
98 : static inline void
99 : during_frag( fd_dedup_ctx_t * ctx,
100 : ulong in_idx,
101 : ulong seq FD_PARAM_UNUSED,
102 : ulong sig FD_PARAM_UNUSED,
103 : ulong chunk,
104 : ulong sz,
105 0 : ulong ctl FD_PARAM_UNUSED ) {
106 :
107 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) )
108 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
109 :
110 0 : uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
111 0 : uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
112 :
113 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
114 0 : if( FD_UNLIKELY( sz>FD_TPU_MTU ) ) FD_LOG_ERR(( "received a gossip transaction that was too large" ));
115 :
116 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)dst;
117 0 : txnm->payload_sz = (ushort)sz;
118 0 : fd_memcpy( fd_txn_m_payload( txnm ), src, sz );
119 0 : txnm->block_engine.bundle_id = 0UL;
120 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EXECUTED_TXN ) ) {
121 : /* Executed txns just have their signature inserted into the tcache
122 : so we can dedup them easily. */
123 0 : ulong ha_dedup_tag = fd_hash( ctx->hashmap_seed, src+64UL, 64UL );
124 0 : int _is_dup;
125 0 : FD_TCACHE_INSERT( _is_dup, *ctx->tcache_sync, ctx->tcache_ring, ctx->tcache_depth, ctx->tcache_map, ctx->tcache_map_cnt, ha_dedup_tag );
126 0 : (void)_is_dup;
127 0 : } else {
128 0 : fd_memcpy( dst, src, sz );
129 0 : }
130 0 : }
131 :
132 : /* After the transaction has been fully received, and we know we were
133 : not overrun while reading it, check if it's a duplicate of a prior
134 : transaction.
135 :
136 : If the transaction came in from the gossip link, then it hasn't been
137 : parsed by us. So parse it here if necessary. */
138 :
139 : static inline void
140 : after_frag( fd_dedup_ctx_t * ctx,
141 : ulong in_idx,
142 : ulong seq,
143 : ulong sig,
144 : ulong sz,
145 : ulong tsorig,
146 : ulong _tspub,
147 0 : fd_stem_context_t * stem ) {
148 0 : (void)seq;
149 0 : (void)sig;
150 0 : (void)sz;
151 0 : (void)_tspub;
152 :
153 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EXECUTED_TXN ) ) return;
154 :
155 0 : fd_txn_m_t * txnm = (fd_txn_m_t *)fd_chunk_to_laddr( ctx->out_mem, ctx->out_chunk );
156 0 : FD_TEST( txnm->payload_sz<=FD_TPU_MTU );
157 0 : fd_txn_t * txn = fd_txn_m_txn_t( txnm );
158 :
159 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id && (txnm->block_engine.bundle_id!=ctx->bundle_id) ) ) {
160 0 : ctx->bundle_failed = 0;
161 0 : ctx->bundle_id = txnm->block_engine.bundle_id;
162 0 : ctx->bundle_idx = 0UL;
163 0 : }
164 :
165 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id && ctx->bundle_failed ) ) {
166 0 : ctx->metrics.bundle_peer_failure_cnt++;
167 0 : return;
168 0 : }
169 :
170 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
171 : /* Transactions coming in from these links are not parsed.
172 :
173 : We'll need to parse it so it's ready for downstream consumers.
174 : Equally importantly, we need to parse to extract the signature
175 : for dedup. Just parse it right into the output dcache. */
176 0 : txnm->txn_t_sz = (ushort)fd_txn_parse( fd_txn_m_payload( txnm ), txnm->payload_sz, txn, NULL );
177 0 : if( FD_UNLIKELY( !txnm->txn_t_sz ) ) FD_LOG_ERR(( "fd_txn_parse failed for vote transactions that should have been sigverified" ));
178 :
179 0 : FD_MCNT_INC( DEDUP, GOSSIPED_VOTES_RECEIVED, 1UL );
180 0 : }
181 :
182 0 : int is_dup = 0;
183 0 : if( FD_LIKELY( !txnm->block_engine.bundle_id ) ) {
184 : /* Compute fd_hash(signature) for dedup. */
185 0 : ulong ha_dedup_tag = fd_hash( ctx->hashmap_seed, fd_txn_m_payload( txnm )+txn->signature_off, 64UL );
186 :
187 0 : FD_TCACHE_INSERT( is_dup, *ctx->tcache_sync, ctx->tcache_ring, ctx->tcache_depth, ctx->tcache_map, ctx->tcache_map_cnt, ha_dedup_tag );
188 0 : } else {
189 : /* Make sure bundles don't contain a duplicate transaction inside
190 : the bundle, which would not be valid. */
191 :
192 0 : if( FD_UNLIKELY( ctx->bundle_idx>4UL ) ) FD_LOG_ERR(( "bundle_idx %lu > 4", ctx->bundle_idx ));
193 :
194 0 : for( ulong i=0UL; i<ctx->bundle_idx; i++ ) {
195 0 : if( !memcmp( ctx->bundle_signatures[ i ], fd_txn_m_payload( txnm )+txn->signature_off, 64UL ) ) {
196 0 : is_dup = 1;
197 0 : break;
198 0 : }
199 0 : }
200 :
201 0 : if( FD_UNLIKELY( ctx->bundle_idx==4UL ) ) ctx->bundle_idx++;
202 0 : else fd_memcpy( ctx->bundle_signatures[ ctx->bundle_idx++ ], fd_txn_m_payload( txnm )+txn->signature_off, 64UL );
203 0 : }
204 :
205 0 : if( FD_LIKELY( is_dup ) ) {
206 0 : if( FD_UNLIKELY( txnm->block_engine.bundle_id ) ) ctx->bundle_failed = 1;
207 :
208 0 : ctx->metrics.dedup_fail_cnt++;
209 0 : } else {
210 0 : ulong realized_sz = fd_txn_m_realized_footprint( txnm, 1, 0 );
211 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
212 0 : fd_stem_publish( stem, 0UL, 0, ctx->out_chunk, realized_sz, 0UL, tsorig, tspub );
213 0 : ctx->out_chunk = fd_dcache_compact_next( ctx->out_chunk, realized_sz, ctx->out_chunk0, ctx->out_wmark );
214 0 : }
215 0 : }
216 :
217 : static void
218 : privileged_init( fd_topo_t * topo,
219 0 : fd_topo_tile_t * tile ) {
220 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
221 :
222 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
223 0 : fd_dedup_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
224 0 : FD_TEST( fd_rng_secure( &ctx->hashmap_seed, 8U ) );
225 0 : }
226 :
227 : static void
228 : unprivileged_init( fd_topo_t * topo,
229 0 : fd_topo_tile_t * tile ) {
230 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
231 :
232 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
233 0 : fd_dedup_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_dedup_ctx_t ), sizeof( fd_dedup_ctx_t ) );
234 0 : fd_tcache_t * tcache = fd_tcache_join( fd_tcache_new( FD_SCRATCH_ALLOC_APPEND( l, fd_tcache_align(), fd_tcache_footprint( tile->dedup.tcache_depth, 0) ), tile->dedup.tcache_depth, 0 ) );
235 0 : if( FD_UNLIKELY( !tcache ) ) FD_LOG_ERR(( "fd_tcache_new failed" ));
236 :
237 0 : ctx->bundle_failed = 0;
238 0 : ctx->bundle_id = 0UL;
239 0 : ctx->bundle_idx = 0UL;
240 :
241 0 : memset( &ctx->metrics, 0, sizeof( ctx->metrics ) );
242 :
243 0 : ctx->tcache_depth = fd_tcache_depth ( tcache );
244 0 : ctx->tcache_map_cnt = fd_tcache_map_cnt ( tcache );
245 0 : ctx->tcache_sync = fd_tcache_oldest_laddr( tcache );
246 0 : ctx->tcache_ring = fd_tcache_ring_laddr ( tcache );
247 0 : ctx->tcache_map = fd_tcache_map_laddr ( tcache );
248 :
249 0 : FD_TEST( tile->in_cnt<=sizeof( ctx->in )/sizeof( ctx->in[ 0 ] ) );
250 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
251 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
252 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
253 :
254 0 : ctx->in[i].mem = link_wksp->wksp;
255 0 : ctx->in[i].mtu = link->mtu;
256 0 : ctx->in[i].chunk0 = fd_dcache_compact_chunk0( ctx->in[i].mem, link->dcache );
257 0 : ctx->in[i].wmark = fd_dcache_compact_wmark ( ctx->in[i].mem, link->dcache, link->mtu );
258 :
259 0 : if( !strcmp( link->name, "gossip_dedup" ) ) {
260 0 : ctx->in_kind[ i ] = IN_KIND_GOSSIP;
261 0 : } else if( !strcmp( link->name, "verify_dedup" ) ) {
262 0 : ctx->in_kind[ i ] = IN_KIND_VERIFY;
263 0 : } else if( !strcmp( link->name, "executed_txn" ) ) {
264 0 : ctx->in_kind[ i ] = IN_KIND_EXECUTED_TXN;
265 0 : } else {
266 0 : FD_LOG_ERR(( "unexpected link name %s", link->name ));
267 0 : }
268 0 : }
269 :
270 0 : ctx->out_mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ 0 ] ].dcache_obj_id ].wksp_id ].wksp;
271 0 : ctx->out_chunk0 = fd_dcache_compact_chunk0( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache );
272 0 : ctx->out_wmark = fd_dcache_compact_wmark ( ctx->out_mem, topo->links[ tile->out_link_id[ 0 ] ].dcache, topo->links[ tile->out_link_id[ 0 ] ].mtu );
273 0 : ctx->out_chunk = ctx->out_chunk0;
274 :
275 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
276 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
277 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
278 0 : }
279 :
280 : static ulong
281 : populate_allowed_seccomp( fd_topo_t const * topo,
282 : fd_topo_tile_t const * tile,
283 : ulong out_cnt,
284 0 : struct sock_filter * out ) {
285 0 : (void)topo;
286 0 : (void)tile;
287 :
288 0 : populate_sock_filter_policy_fd_dedup_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
289 0 : return sock_filter_policy_fd_dedup_tile_instr_cnt;
290 0 : }
291 :
292 : static ulong
293 : populate_allowed_fds( fd_topo_t const * topo,
294 : fd_topo_tile_t const * tile,
295 : ulong out_fds_cnt,
296 0 : int * out_fds ) {
297 0 : (void)topo;
298 0 : (void)tile;
299 :
300 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
301 :
302 0 : ulong out_cnt = 0UL;
303 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
304 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
305 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
306 0 : return out_cnt;
307 0 : }
308 :
309 0 : #define STEM_BURST (1UL)
310 :
311 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_dedup_ctx_t
312 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_dedup_ctx_t)
313 :
314 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
315 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
316 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
317 :
318 : #include "../stem/fd_stem.c"
319 :
320 : fd_topo_run_tile_t fd_tile_dedup = {
321 : .name = "dedup",
322 : .populate_allowed_seccomp = populate_allowed_seccomp,
323 : .populate_allowed_fds = populate_allowed_fds,
324 : .scratch_align = scratch_align,
325 : .scratch_footprint = scratch_footprint,
326 : .privileged_init = privileged_init,
327 : .unprivileged_init = unprivileged_init,
328 : .run = stem_run,
329 : };
|