Line data Source code
1 : #define _GNU_SOURCE
2 : #include "../../disco/tiles.h"
3 : #include "generated/fd_writer_tile_seccomp.h"
4 :
5 : #include "../../util/pod/fd_pod_format.h"
6 :
7 : #include "../../flamenco/runtime/fd_bank.h"
8 : #include "../../flamenco/runtime/fd_runtime.h"
9 : #include "../../discof/replay/fd_exec.h"
10 : #include "../../discof/replay/fd_vote_tracker.h"
11 :
12 : #include "../../funk/fd_funk.h"
13 :
14 : struct fd_writer_tile_in_ctx {
15 : fd_wksp_t * mem;
16 : ulong chunk0;
17 : ulong wmark;
18 : };
19 : typedef struct fd_writer_tile_in_ctx fd_writer_tile_in_ctx_t;
20 :
21 : /* fd_writer_tile_out_ctx_t is used by the writer tile to send account updates
22 : to the replay tile for solcap writing.
23 :
24 : TODO: remove this when solcap v2 is here. */
25 : struct fd_writer_tile_out_ctx {
26 : ulong idx;
27 :
28 : fd_frag_meta_t * mcache;
29 : ulong * sync;
30 : ulong depth;
31 : ulong seq;
32 :
33 : fd_wksp_t * mem;
34 : ulong chunk0;
35 : ulong wmark;
36 : ulong chunk;
37 : };
38 : typedef struct fd_writer_tile_out_ctx fd_writer_tile_out_ctx_t;
39 :
40 : struct fd_writer_tile_ctx {
41 : fd_wksp_t * wksp;
42 : ulong tile_cnt;
43 : ulong tile_idx;
44 : ulong exec_tile_cnt;
45 :
46 : /* Capture ctx */
47 : fd_capture_ctx_t * capture_ctx;
48 : FILE * capture_file;
49 : uchar * solcap_publish_buffer_ptr;
50 : ulong account_updates_flushed;
51 :
52 : /* Local join of Funk. R/W. */
53 : fd_funk_t funk[1];
54 : fd_funk_txn_t * funk_txn;
55 :
56 : /* Link management. */
57 : fd_writer_tile_in_ctx_t exec_writer_in[ FD_PACK_MAX_BANK_TILES ];
58 : fd_writer_tile_in_ctx_t send_writer_in[1];
59 : fd_writer_tile_out_ctx_t writer_replay_out[1];
60 : fd_writer_tile_out_ctx_t capture_replay_out[1];
61 :
62 : /* Local joins of exec spads. Read-only. */
63 : fd_spad_t * exec_spad[ FD_PACK_MAX_BANK_TILES ];
64 : fd_wksp_t * exec_spad_wksp[ FD_PACK_MAX_BANK_TILES ];
65 :
66 : /* Local joins of exec tile txn ctx. Read-only. */
67 : fd_exec_txn_ctx_t * txn_ctx[ FD_PACK_MAX_BANK_TILES ];
68 :
69 : /* Local join of bank manager. R/W. */
70 : fd_banks_t * banks;
71 : fd_bank_t * bank;
72 :
73 : /* Local join of vote tracker. R/W. */
74 : fd_vote_tracker_t * vote_tracker;
75 :
76 : /* Buffers to hold fragments received during during_frag */
77 : fd_exec_writer_boot_msg_t boot_msg;
78 : fd_exec_writer_txn_msg_t txn_msg;
79 :
80 : uchar vote_msg[64];
81 :
82 : /* Buffer to hold the writer->replay notification that a txn has been finalized.
83 : We need to store it here before publishing it, because we need to ensure that
84 : all solcap updates have been published before this message. */
85 : fd_writer_replay_txn_finalized_msg_t txn_finalized_buffer;
86 : int pending_txn_finalized_msg;
87 : };
88 : typedef struct fd_writer_tile_ctx fd_writer_tile_ctx_t;
89 :
90 : FD_FN_CONST static inline ulong
91 0 : scratch_align( void ) {
92 0 : return 128UL;
93 0 : }
94 :
95 : FD_FN_PURE static inline ulong
96 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
97 0 : (void)tile;
98 0 : ulong l = FD_LAYOUT_INIT;
99 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_writer_tile_ctx_t), sizeof(fd_writer_tile_ctx_t) );
100 0 : l = FD_LAYOUT_APPEND( l, FD_CAPTURE_CTX_ALIGN, FD_CAPTURE_CTX_FOOTPRINT );
101 0 : l = FD_LAYOUT_APPEND( l, fd_vote_tracker_align(), fd_vote_tracker_footprint() );
102 0 : return FD_LAYOUT_FINI( l, scratch_align() );
103 0 : }
104 :
105 : static void
106 : join_txn_ctx( fd_writer_tile_ctx_t * ctx,
107 : ulong exec_tile_idx,
108 0 : uint txn_ctx_offset ) {
109 :
110 0 : ulong exec_spad_gaddr = fd_wksp_gaddr( ctx->exec_spad_wksp[ exec_tile_idx ], ctx->exec_spad[ exec_tile_idx ] );
111 0 : if( FD_UNLIKELY( !exec_spad_gaddr ) ) {
112 0 : FD_LOG_CRIT(( "Unable to get gaddr of exec_spad %lu", exec_tile_idx ));
113 0 : }
114 :
115 0 : ulong txn_ctx_gaddr = exec_spad_gaddr + txn_ctx_offset;
116 0 : uchar * txn_ctx_laddr = fd_wksp_laddr( ctx->exec_spad_wksp[ exec_tile_idx ], txn_ctx_gaddr );
117 0 : if( FD_UNLIKELY( !txn_ctx_laddr ) ) {
118 0 : FD_LOG_CRIT(( "Unable to get laddr of the txn ctx at gaddr 0x%lx from exec_spad %lu", txn_ctx_gaddr, exec_tile_idx ));
119 0 : }
120 :
121 0 : ctx->txn_ctx[ exec_tile_idx ] = fd_exec_txn_ctx_join( txn_ctx_laddr,
122 0 : ctx->exec_spad[ exec_tile_idx ],
123 0 : ctx->exec_spad_wksp[ exec_tile_idx ] );
124 0 : if( FD_UNLIKELY( !ctx->txn_ctx[ exec_tile_idx ] ) ) {
125 0 : FD_LOG_CRIT(( "Unable to join txn ctx at gaddr 0x%lx laddr 0x%lx from exec_spad %lu", txn_ctx_gaddr, (ulong)txn_ctx_laddr, exec_tile_idx ));
126 0 : }
127 0 : }
128 :
129 : /* Publish the next account update event buffered in the capture tile to the replay tile
130 :
131 : TODO: remove this when solcap v2 is here. */
132 : static void
133 0 : publish_next_capture_ctx_account_update( fd_writer_tile_ctx_t * ctx, fd_stem_context_t * stem ) {
134 0 : if( FD_UNLIKELY( !ctx->capture_ctx ) ) {
135 0 : return;
136 0 : }
137 :
138 : /* Copy the account update event to the buffer */
139 0 : ulong chunk = ctx->capture_replay_out->chunk;
140 0 : uchar * out_ptr = fd_chunk_to_laddr( ctx->capture_replay_out->mem, chunk );
141 0 : fd_capture_ctx_account_update_msg_t * msg = (fd_capture_ctx_account_update_msg_t *)ctx->solcap_publish_buffer_ptr;
142 0 : memcpy( out_ptr, msg, sizeof(fd_capture_ctx_account_update_msg_t) );
143 0 : ctx->solcap_publish_buffer_ptr += sizeof(fd_capture_ctx_account_update_msg_t);
144 0 : out_ptr += sizeof(fd_capture_ctx_account_update_msg_t);
145 :
146 : /* Copy the data to the buffer */
147 0 : ulong data_sz = msg->data_sz;
148 0 : memcpy( out_ptr, ctx->solcap_publish_buffer_ptr, data_sz );
149 0 : ctx->solcap_publish_buffer_ptr += data_sz;
150 0 : out_ptr += data_sz;
151 :
152 : /* Stem publish the account update event */
153 0 : ulong msg_sz = sizeof(fd_capture_ctx_account_update_msg_t) + msg->data_sz;
154 0 : fd_stem_publish( stem, ctx->capture_replay_out->idx, 0UL, chunk, msg_sz, 0UL, 0UL, 0UL );
155 0 : ctx->capture_replay_out->chunk = fd_dcache_compact_next(
156 0 : chunk,
157 0 : msg_sz,
158 0 : ctx->capture_replay_out->chunk0,
159 0 : ctx->capture_replay_out->wmark );
160 :
161 : /* Advance the number of account updates flushed */
162 0 : ctx->account_updates_flushed++;
163 :
164 : /* If we have published all the account updates, reset the buffer pointer and length */
165 0 : if( ctx->account_updates_flushed == ctx->capture_ctx->account_updates_len ) {
166 0 : ctx->capture_ctx->account_updates_buffer_ptr = ctx->capture_ctx->account_updates_buffer;
167 0 : ctx->solcap_publish_buffer_ptr = ctx->capture_ctx->account_updates_buffer;
168 0 : ctx->capture_ctx->account_updates_len = 0UL;
169 0 : ctx->account_updates_flushed = 0UL;
170 0 : }
171 0 : }
172 :
173 : /* Publish the txn finalized message to the replay tile */
174 : static void
175 0 : publish_txn_finalized_msg( fd_writer_tile_ctx_t * ctx, fd_stem_context_t * stem ) {
176 0 : if( FD_UNLIKELY( !ctx->pending_txn_finalized_msg ) ) {
177 0 : return;
178 0 : }
179 :
180 : /* Copy the txn finalized message to the buffer */
181 0 : uchar * out_ptr = fd_chunk_to_laddr( ctx->writer_replay_out->mem, ctx->writer_replay_out->chunk );
182 0 : memcpy( out_ptr, &ctx->txn_finalized_buffer, sizeof(fd_writer_replay_txn_finalized_msg_t) );
183 :
184 : /* Publish the txn finalized message */
185 0 : fd_stem_publish(
186 0 : stem,
187 0 : ctx->writer_replay_out->idx,
188 0 : 0UL,
189 0 : ctx->writer_replay_out->chunk,
190 0 : sizeof(fd_writer_replay_txn_finalized_msg_t),
191 0 : 0UL,
192 0 : 0UL,
193 0 : 0UL );
194 0 : ctx->writer_replay_out->chunk = fd_dcache_compact_next(
195 0 : ctx->writer_replay_out->chunk,
196 0 : sizeof(fd_writer_replay_txn_finalized_msg_t),
197 0 : ctx->writer_replay_out->chunk0,
198 0 : ctx->writer_replay_out->wmark );
199 0 : ctx->pending_txn_finalized_msg = 0;
200 0 : }
201 :
202 : static void
203 : after_credit( fd_writer_tile_ctx_t * ctx,
204 : fd_stem_context_t * stem,
205 : int * opt_poll_in,
206 0 : int * charge_busy ) {
207 0 : (void)charge_busy;
208 :
209 : /* If we have outstanding account updates to send to solcap, send them.
210 : Note that we set opt_poll_in to 0 here because we must not consume
211 : any more fragments from the exec tiles before publishing our messages,
212 : so that solcap updates are not interleaved between slots.
213 : */
214 0 : if( ctx->capture_ctx && ctx->account_updates_flushed < ctx->capture_ctx->account_updates_len ) {
215 0 : publish_next_capture_ctx_account_update( ctx, stem );
216 0 : *opt_poll_in = 0;
217 0 : } else if ( ctx->pending_txn_finalized_msg ) {
218 0 : publish_txn_finalized_msg( ctx, stem );
219 0 : *opt_poll_in = 0;
220 0 : }
221 0 : }
222 :
223 : static int
224 : before_frag( fd_writer_tile_ctx_t * ctx,
225 : ulong in_idx,
226 : ulong seq,
227 0 : ulong sig ) {
228 :
229 : /* Round-robin.
230 :
231 : The usual round-robin strategy of returning
232 : (seq % ctx->tile_cnt) != ctx->tile_idx
233 : here suffers somewhat from a sort of convoy effect.
234 : This is because exec tiles do not proceed to the next transaction
235 : until transaction finalization has been done. In other words, exec
236 : tiles block on writer tiles, rather than truly pipelining. As a
237 : result, when all the exec tiles publish to seq 0, the 0th writer
238 : tile becomes busy, and all exec tiles block on it. Then writer
239 : tile 1 becomes busy, while all other writer tiles sit idle. So on
240 : and so forth.
241 :
242 : So we offset by in_idx to try to mitigate this.
243 : */
244 0 : return ((seq+in_idx) % ctx->tile_cnt) != ctx->tile_idx && sig != FD_WRITER_BOOT_SIG; /* The boot message should go through to all writer tiles. */
245 0 : }
246 :
247 : static void
248 : during_frag( fd_writer_tile_ctx_t * ctx,
249 : ulong in_idx,
250 : ulong seq FD_PARAM_UNUSED,
251 : ulong sig,
252 : ulong chunk,
253 : ulong sz,
254 0 : ulong ctl FD_PARAM_UNUSED ) {
255 :
256 0 : if( in_idx<ctx->exec_tile_cnt ) {
257 :
258 0 : fd_writer_tile_in_ctx_t * in_ctx = &(ctx->exec_writer_in[ in_idx ]);
259 :
260 0 : if( FD_UNLIKELY( chunk < in_ctx->chunk0 || chunk > in_ctx->wmark ) ) {
261 0 : FD_LOG_CRIT(( "chunk %lu %lu corrupt, not in range [%lu,%lu]",
262 0 : chunk,
263 0 : sz,
264 0 : in_ctx->chunk0,
265 0 : in_ctx->wmark ));
266 0 : }
267 :
268 0 : if( FD_UNLIKELY( sig==FD_WRITER_BOOT_SIG ) ) {
269 0 : fd_exec_writer_boot_msg_t * msg = fd_type_pun( fd_chunk_to_laddr( in_ctx->mem, chunk ) );
270 0 : ctx->boot_msg = *msg;
271 0 : }
272 :
273 0 : if( FD_LIKELY( sig==FD_WRITER_TXN_SIG ) ) {
274 0 : fd_exec_writer_txn_msg_t * msg = fd_type_pun( fd_chunk_to_laddr( in_ctx->mem, chunk ) );
275 0 : ctx->txn_msg = *msg;
276 0 : }
277 0 : } else if( in_idx==ctx->exec_tile_cnt ) {
278 : /* This is a message from the send tile. */
279 0 : fd_writer_tile_in_ctx_t * in_ctx = &ctx->send_writer_in[ 0 ];
280 :
281 0 : fd_txn_m_t * txnm = fd_type_pun( fd_chunk_to_laddr( in_ctx->mem, chunk ) );
282 0 : uchar * payload = ((uchar *)txnm) + sizeof(fd_txn_m_t);
283 0 : fd_txn_t txn;
284 0 : if( FD_UNLIKELY( !fd_txn_parse( payload, txnm->payload_sz, &txn, NULL ) ) ) {
285 0 : FD_LOG_CRIT(( "Could not parse txn from send tile" ));
286 0 : }
287 0 : uchar * signature = payload + txn.signature_off;
288 0 : memcpy( ctx->vote_msg, signature, 64UL );
289 0 : return;
290 0 : }
291 0 : }
292 :
293 : static void
294 : after_frag( fd_writer_tile_ctx_t * ctx,
295 : ulong in_idx,
296 : ulong seq FD_PARAM_UNUSED,
297 : ulong sig,
298 : ulong sz FD_PARAM_UNUSED,
299 : ulong tsorig FD_PARAM_UNUSED,
300 : ulong tspub FD_PARAM_UNUSED,
301 0 : fd_stem_context_t * stem FD_PARAM_UNUSED ) {
302 :
303 : /* Process messages from exec tiles. */
304 :
305 0 : if( in_idx<ctx->exec_tile_cnt ) {
306 0 : switch( sig ) {
307 :
308 0 : case FD_WRITER_BOOT_SIG: {
309 0 : fd_exec_writer_boot_msg_t * msg = &ctx->boot_msg;
310 0 : join_txn_ctx( ctx, in_idx, msg->txn_ctx_offset );
311 0 : ulong txn_ctx_cnt = 0UL;
312 0 : for( ulong i=0UL; i<ctx->exec_tile_cnt; i++ ) {
313 0 : txn_ctx_cnt += fd_ulong_if( ctx->txn_ctx[ i ]!=NULL, 1UL, 0UL );
314 0 : }
315 0 : if( txn_ctx_cnt==ctx->exec_tile_cnt ) {
316 0 : FD_LOG_INFO(( "writer tile %lu fully booted", ctx->tile_idx ));
317 0 : }
318 0 : break;
319 0 : } case FD_WRITER_TXN_SIG: {
320 0 : fd_exec_writer_txn_msg_t * msg = &ctx->txn_msg;
321 0 : if( FD_UNLIKELY( msg->exec_tile_id!=in_idx ) ) {
322 0 : FD_LOG_CRIT(( "exec_tile_id %u should be == in_idx %lu", msg->exec_tile_id, in_idx ));
323 0 : }
324 0 : fd_exec_txn_ctx_t * txn_ctx = ctx->txn_ctx[ in_idx ];
325 :
326 0 : ctx->bank = fd_banks_get_bank_idx( ctx->banks, txn_ctx->bank_idx );
327 0 : if( FD_UNLIKELY( !ctx->bank ) ) {
328 0 : FD_LOG_CRIT(( "Could not find bank for slot %lu", txn_ctx->slot ));
329 0 : }
330 :
331 0 : if( !ctx->funk_txn || txn_ctx->slot != ctx->funk_txn->xid.ul[0] ) {
332 0 : fd_funk_txn_map_t * txn_map = fd_funk_txn_map( ctx->funk );
333 0 : if( FD_UNLIKELY( !txn_map->map ) ) {
334 0 : FD_LOG_CRIT(( "Could not find valid funk transaction map" ));
335 0 : }
336 0 : fd_funk_txn_xid_t xid = { .ul = { fd_bank_slot_get( ctx->bank ), fd_bank_slot_get( ctx->bank ) } };
337 0 : fd_funk_txn_start_read( ctx->funk );
338 0 : ctx->funk_txn = fd_funk_txn_query( &xid, txn_map );
339 0 : if( FD_UNLIKELY( !ctx->funk_txn ) ) {
340 0 : FD_LOG_CRIT(( "Could not find valid funk transaction" ));
341 0 : }
342 0 : fd_funk_txn_end_read( ctx->funk );
343 0 : }
344 :
345 0 : txn_ctx->spad = ctx->exec_spad[ in_idx ];
346 0 : txn_ctx->spad_wksp = ctx->exec_spad_wksp[ in_idx ];
347 :
348 : /* Query the vote signature against the recently generated vote txn
349 : signatures. If the query is successful, then we have seen our
350 : own vote transaction and this should be marked in the bank. */
351 0 : uchar * txn_signature = txn_ctx->txn.payload + TXN( &txn_ctx->txn )->signature_off;
352 0 : if( fd_vote_tracker_query_sig( ctx->vote_tracker, (fd_signature_t *)txn_signature ) ) {
353 0 : FD_ATOMIC_FETCH_AND_ADD( fd_bank_has_identity_vote_modify( ctx->bank ), 1 );
354 0 : }
355 :
356 0 : if( FD_LIKELY( txn_ctx->flags & FD_TXN_P_FLAGS_EXECUTE_SUCCESS ) ) {
357 0 : fd_runtime_finalize_txn(
358 0 : ctx->funk,
359 0 : ctx->funk_txn,
360 0 : txn_ctx,
361 0 : ctx->bank,
362 0 : ctx->capture_ctx );
363 0 : } else {
364 : /* This means that we should mark the block as dead. */
365 0 : fd_banks_mark_bank_dead( ctx->banks, ctx->bank );
366 0 : }
367 :
368 : /* Notify the replay tile that we are done with this txn. */
369 0 : ctx->txn_finalized_buffer.exec_tile_id = msg->exec_tile_id;
370 0 : ctx->pending_txn_finalized_msg = 1;
371 0 : break;
372 0 : } default:
373 0 : FD_LOG_CRIT(( "Unknown sig %lu", sig ));
374 0 : }
375 0 : } else if( in_idx==ctx->exec_tile_cnt ) {
376 : /* This means that the send tile has signed and sent a vote. Add
377 : this vote to the vote tracker. */
378 0 : fd_vote_tracker_insert( ctx->vote_tracker, (fd_signature_t *)ctx->vote_msg );
379 0 : } else {
380 0 : FD_LOG_CRIT(( "Unknown in_idx %lu", in_idx ));
381 0 : }
382 0 : }
383 :
384 : static void
385 : unprivileged_init( fd_topo_t * topo,
386 0 : fd_topo_tile_t * tile ) {
387 :
388 : /********************************************************************/
389 : /* Validate allocations */
390 : /********************************************************************/
391 :
392 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
393 :
394 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
395 0 : fd_writer_tile_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_writer_tile_ctx_t), sizeof(fd_writer_tile_ctx_t) );
396 0 : void * capture_ctx_mem = FD_SCRATCH_ALLOC_APPEND( l, FD_CAPTURE_CTX_ALIGN, FD_CAPTURE_CTX_FOOTPRINT );
397 0 : void * vote_tracker_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_vote_tracker_align(), fd_vote_tracker_footprint() );
398 0 : ulong scratch_alloc_mem = FD_SCRATCH_ALLOC_FINI( l, scratch_align() );
399 0 : if( FD_UNLIKELY( scratch_alloc_mem - (ulong)scratch - scratch_footprint( tile ) ) ) {
400 0 : FD_LOG_CRIT( ( "scratch_alloc_mem did not match scratch_footprint diff: %lu alloc: %lu footprint: %lu",
401 0 : scratch_alloc_mem - (ulong)scratch - scratch_footprint( tile ),
402 0 : scratch_alloc_mem,
403 0 : (ulong)scratch + scratch_footprint( tile ) ) );
404 0 : }
405 0 : fd_memset( ctx, 0, sizeof(*ctx) );
406 0 : ctx->wksp = topo->workspaces[ topo->objs[ tile->tile_obj_id ].wksp_id ].wksp;
407 :
408 : /********************************************************************/
409 : /* Links */
410 : /********************************************************************/
411 :
412 0 : ctx->tile_cnt = fd_topo_tile_name_cnt( topo, tile->name );
413 0 : ctx->tile_idx = tile->kind_id;
414 :
415 0 : ulong exec_tile_cnt = fd_topo_tile_name_cnt( topo, "exec" );
416 0 : ctx->exec_tile_cnt = exec_tile_cnt;
417 :
418 0 : ulong send_tile_cnt = fd_topo_tile_name_cnt( topo, "send" );
419 :
420 : /* Find and setup all the exec_writer links. */
421 :
422 0 : for( ulong i=0UL; i<exec_tile_cnt; i++ ) {
423 0 : ulong exec_writer_idx = fd_topo_find_tile_in_link( topo, tile, "exec_writer", i );
424 0 : if( FD_UNLIKELY( exec_writer_idx==ULONG_MAX ) ) {
425 0 : FD_LOG_CRIT(( "Could not find exec_writer in-link %lu", i ));
426 0 : }
427 :
428 0 : fd_topo_link_t * exec_writer_in_link = &topo->links[ tile->in_link_id[ i ] ];
429 0 : if( FD_UNLIKELY( !exec_writer_in_link ) ) {
430 0 : FD_LOG_CRIT(( "Invalid exec_writer in-link %lu", i ));
431 0 : }
432 0 : ctx->exec_writer_in[ i ].mem = topo->workspaces[ topo->objs[ exec_writer_in_link->dcache_obj_id ].wksp_id ].wksp;
433 0 : ctx->exec_writer_in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->exec_writer_in[ i ].mem, exec_writer_in_link->dcache );
434 0 : ctx->exec_writer_in[ i ].wmark = fd_dcache_compact_wmark( ctx->exec_writer_in[ i ].mem,
435 0 : exec_writer_in_link->dcache,
436 0 : exec_writer_in_link->mtu );
437 :
438 0 : }
439 :
440 : /* Find and setup the send_writer link. */
441 :
442 0 : if( send_tile_cnt>0UL ) {
443 0 : ulong send_writer_idx = fd_topo_find_tile_in_link( topo, tile, "send_txns", 0UL );
444 0 : if( FD_UNLIKELY( send_writer_idx==ULONG_MAX ) ) {
445 0 : FD_LOG_CRIT(( "Could not find send_writer in-link" ));
446 0 : }
447 0 : fd_topo_link_t * send_writer_in_link = &topo->links[ tile->in_link_id[ send_writer_idx ] ];
448 0 : ctx->send_writer_in[ 0 ].mem = topo->workspaces[ topo->objs[ send_writer_in_link->dcache_obj_id ].wksp_id ].wksp;
449 0 : ctx->send_writer_in[ 0 ].chunk0 = fd_dcache_compact_chunk0( ctx->send_writer_in[ 0 ].mem, send_writer_in_link->dcache );
450 0 : ctx->send_writer_in[ 0 ].wmark = fd_dcache_compact_wmark( ctx->send_writer_in[ 0 ].mem,
451 0 : send_writer_in_link->dcache,
452 0 : send_writer_in_link->mtu );
453 0 : }
454 :
455 : /********************************************************************/
456 : /* Spad */
457 : /********************************************************************/
458 :
459 : /* Join all of the exec spads. */
460 0 : for( ulong i=0UL; i<exec_tile_cnt; i++ ) {
461 0 : ulong exec_spad_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "exec_spad.%lu", i );
462 0 : if( FD_UNLIKELY( exec_spad_obj_id==ULONG_MAX ) ) {
463 0 : FD_LOG_CRIT(( "Could not find topology object for exec_spad.%lu", i ));
464 0 : }
465 :
466 0 : ctx->exec_spad[ i ] = fd_spad_join( fd_topo_obj_laddr( topo, exec_spad_obj_id ) );
467 0 : if( FD_UNLIKELY( !ctx->exec_spad[ i ] ) ) {
468 0 : FD_LOG_CRIT(( "Failed to join exec_spad.%lu", i ));
469 0 : }
470 0 : ctx->exec_spad_wksp[ i ] = fd_wksp_containing( ctx->exec_spad[ i ] );
471 0 : if( FD_UNLIKELY( !ctx->exec_spad_wksp[ i ] ) ) {
472 0 : FD_LOG_CRIT(( "Failed to find wksp for exec_spad.%lu", i ));
473 0 : }
474 0 : }
475 :
476 : /********************************************************************/
477 : /* Funk */
478 : /********************************************************************/
479 :
480 0 : if( FD_UNLIKELY( !fd_funk_join( ctx->funk, fd_topo_obj_laddr( topo, tile->writer.funk_obj_id ) ) ) ) {
481 0 : FD_LOG_ERR(( "Failed to join database cache" ));
482 0 : }
483 :
484 : /********************************************************************/
485 : /* Bank */
486 : /********************************************************************/
487 :
488 0 : ulong banks_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "banks" );
489 0 : if( FD_UNLIKELY( banks_obj_id==ULONG_MAX ) ) {
490 0 : FD_LOG_ERR(( "Could not find topology object for banks" ));
491 0 : }
492 :
493 0 : ctx->banks = fd_banks_join( fd_topo_obj_laddr( topo, banks_obj_id ) );
494 0 : if( FD_UNLIKELY( !ctx->banks ) ) {
495 0 : FD_LOG_ERR(( "Failed to join banks" ));
496 0 : }
497 :
498 : /********************************************************************/
499 : /* Vote tracker */
500 : /********************************************************************/
501 :
502 0 : ctx->vote_tracker = fd_vote_tracker_join( fd_vote_tracker_new( vote_tracker_mem, 0UL ) );
503 0 : if( FD_UNLIKELY( !ctx->vote_tracker ) ) {
504 0 : FD_LOG_ERR(( "Failed to join and create vote tracker object" ));
505 0 : }
506 :
507 : /********************************************************************/
508 : /* Capture ctx */
509 : /********************************************************************/
510 0 : if( strlen( tile->writer.solcap_capture ) ) {
511 0 : ctx->capture_ctx = fd_capture_ctx_new( capture_ctx_mem );
512 0 : ctx->capture_ctx->capture_txns = 0;
513 0 : ctx->capture_ctx->solcap_start_slot = tile->writer.capture_start_slot;
514 0 : ctx->pending_txn_finalized_msg = 0;
515 0 : ctx->account_updates_flushed = 0;
516 0 : ctx->solcap_publish_buffer_ptr = ctx->capture_ctx->account_updates_buffer;
517 0 : }
518 :
519 : /********************************************************************************/
520 : /* writer_replay output link for notifying replay of txn finalization */
521 : /********************************************************************************/
522 :
523 0 : ctx->writer_replay_out->idx = fd_topo_find_tile_out_link( topo, tile, "writ_repl", ctx->tile_idx );
524 0 : if( FD_LIKELY( ctx->writer_replay_out->idx!=ULONG_MAX ) ) {
525 0 : fd_topo_link_t * writer_replay_link = &topo->links[ tile->out_link_id[ ctx->writer_replay_out->idx ] ];
526 0 : ctx->writer_replay_out->mcache = writer_replay_link->mcache;
527 0 : ctx->writer_replay_out->sync = fd_mcache_seq_laddr( ctx->writer_replay_out->mcache );
528 0 : ctx->writer_replay_out->depth = fd_mcache_depth( ctx->writer_replay_out->mcache );
529 0 : ctx->writer_replay_out->seq = fd_mcache_seq_query( ctx->writer_replay_out->sync );
530 0 : ctx->writer_replay_out->mem = topo->workspaces[ topo->objs[ writer_replay_link->dcache_obj_id ].wksp_id ].wksp;
531 0 : ctx->writer_replay_out->chunk0 = fd_dcache_compact_chunk0( ctx->writer_replay_out->mem, writer_replay_link->dcache );
532 0 : ctx->writer_replay_out->wmark = fd_dcache_compact_wmark( ctx->writer_replay_out->mem, writer_replay_link->dcache, writer_replay_link->mtu );
533 0 : ctx->writer_replay_out->chunk = ctx->writer_replay_out->chunk0;
534 0 : }
535 :
536 : /********************************************************************************/
537 : /* capture_replay output link for notifying replay's solcap of account updates */
538 : /********************************************************************************/
539 :
540 0 : ctx->capture_replay_out->idx = fd_topo_find_tile_out_link( topo, tile, "capt_replay", ctx->tile_idx );
541 0 : if( FD_UNLIKELY( ctx->capture_replay_out->idx!=ULONG_MAX ) ) {
542 0 : fd_topo_link_t * capture_replay_link = &topo->links[ tile->out_link_id[ ctx->capture_replay_out->idx ] ];
543 0 : ctx->capture_replay_out->mcache = capture_replay_link->mcache;
544 0 : ctx->capture_replay_out->sync = fd_mcache_seq_laddr( ctx->capture_replay_out->mcache );
545 0 : ctx->capture_replay_out->depth = fd_mcache_depth( ctx->capture_replay_out->mcache );
546 0 : ctx->capture_replay_out->seq = fd_mcache_seq_query( ctx->capture_replay_out->sync );
547 0 : ctx->capture_replay_out->mem = topo->workspaces[ topo->objs[ capture_replay_link->dcache_obj_id ].wksp_id ].wksp;
548 0 : ctx->capture_replay_out->chunk0 = fd_dcache_compact_chunk0( ctx->capture_replay_out->mem, capture_replay_link->dcache );
549 0 : ctx->capture_replay_out->wmark = fd_dcache_compact_wmark( ctx->capture_replay_out->mem, capture_replay_link->dcache, capture_replay_link->mtu );
550 0 : ctx->capture_replay_out->chunk = ctx->capture_replay_out->chunk0;
551 0 : }
552 0 : }
553 :
554 : static ulong
555 : populate_allowed_seccomp( fd_topo_t const * topo,
556 : fd_topo_tile_t const * tile,
557 : ulong out_cnt,
558 0 : struct sock_filter * out ) {
559 0 : (void)topo;
560 0 : (void)tile;
561 :
562 0 : populate_sock_filter_policy_fd_writer_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
563 0 : return sock_filter_policy_fd_writer_tile_instr_cnt;
564 0 : }
565 :
566 : static ulong
567 : populate_allowed_fds( fd_topo_t const * topo,
568 : fd_topo_tile_t const * tile,
569 : ulong out_fds_cnt,
570 0 : int * out_fds ) {
571 0 : (void)topo;
572 0 : (void)tile;
573 :
574 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
575 :
576 0 : ulong out_cnt = 0UL;
577 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
578 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
579 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
580 0 : return out_cnt;
581 0 : }
582 :
583 0 : #define STEM_BURST (1UL)
584 :
585 : /* STEM_LAZY is calculated as cr_max/(frag production rate * 1.5). We
586 : have cr_max ~ 16K and frag production rate ~ 1M/s. In reality, we
587 : probably need more than one writer tile to get to 1M TPS, so we
588 : forget about the 1.5 factor. That gives O(10^7 ns). */
589 0 : #define STEM_LAZY ((long)1e7)
590 :
591 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_writer_tile_ctx_t
592 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_writer_tile_ctx_t)
593 :
594 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
595 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
596 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
597 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
598 :
599 : #include "../../disco/stem/fd_stem.c"
600 :
601 : fd_topo_run_tile_t fd_tile_writer = {
602 : .name = "writer",
603 : .loose_footprint = 0UL,
604 : .populate_allowed_seccomp = populate_allowed_seccomp,
605 : .populate_allowed_fds = populate_allowed_fds,
606 : .scratch_align = scratch_align,
607 : .scratch_footprint = scratch_footprint,
608 : .unprivileged_init = unprivileged_init,
609 : .run = stem_run,
610 : };
|