Line data Source code
1 : /* Vinyl database server (Firedancer adaptation)
2 :
3 : This implementation is a fork of src/vinyl/fd_vinyl_exec with some
4 : Firedancer-specific changes:
5 : - All clients are joined on startup
6 : - Some errors (invalid link_id, invalid comp_gaddr) result in hard
7 : crashes instead of silent drops
8 : - Sandboxing */
9 :
10 : #include "../../disco/topo/fd_topo.h"
11 : #include "../../disco/metrics/fd_metrics.h"
12 : #include "../../vinyl/fd_vinyl.h"
13 : #include "../../vinyl/fd_vinyl_base.h"
14 : #include "../../vinyl/io/fd_vinyl_io_ur.h"
15 : #include "../../util/pod/fd_pod_format.h"
16 : #include "generated/fd_accdb_tile_seccomp.h"
17 :
18 : #include <errno.h>
19 : #include <fcntl.h>
20 : #include <lz4.h>
21 : #if FD_HAS_LIBURING
22 : #include <liburing.h>
23 : #endif
24 :
25 : #define NAME "accdb"
26 : #define MAX_INS 8
27 :
28 0 : #define IO_SPAD_MAX (32UL<<20)
29 :
30 : #define FD_VINYL_CLIENT_MAX (1024UL)
31 0 : #define FD_VINYL_REQ_MAX (1024UL)
32 :
33 : struct fd_vinyl_client {
34 : fd_vinyl_rq_t * rq; /* Channel for requests from this client (could be shared by multiple vinyl instances) */
35 : fd_vinyl_cq_t * cq; /* Channel for completions from this client to this vinyl instance
36 : (could be shared by multiple receivers of completions from this vinyl instance). */
37 : ulong burst_max; /* Max requests receive from this client at a time */
38 : ulong seq; /* Sequence number of the next request to receive in the rq */
39 : ulong link_id; /* Identifies requests from this client to this vinyl instance in the rq */
40 : ulong laddr0; /* A valid non-zero gaddr from this client maps to the vinyl instance's laddr laddr0 + gaddr ... */
41 : ulong laddr1; /* ... and thus is in (laddr0,laddr1). A zero gaddr maps to laddr NULL. */
42 : ulong quota_rem; /* Num of remaining acquisitions this client is allowed on this vinyl instance */
43 : ulong quota_max; /* Max quota */
44 : };
45 :
46 : typedef struct fd_vinyl_client fd_vinyl_client_t;
47 :
48 : /* MAP_REQ_GADDR maps a request global address req_gaddr to an array of
49 : cnt T's into the local address space as a T * pointer. If the result
50 : is not properly aligned or the entire range does not completely fall
51 : within the shared region with the client, returns NULL. Likewise,
52 : gaadr 0 maps to NULL. Assumes sizeof(T)*(n) does not overflow (which
53 : is true where as n is at most batch_cnt which is at most 2^32 and
54 : sizeof(T) is at most 40. */
55 :
56 0 : #define MAP_REQ_GADDR( gaddr, T, n ) ((T *)fd_vinyl_laddr( (gaddr), alignof(T), sizeof(T)*(n), client_laddr0, client_laddr1 ))
57 :
58 : FD_FN_CONST static inline void *
59 : fd_vinyl_laddr( ulong req_gaddr,
60 : ulong align,
61 : ulong footprint,
62 : ulong client_laddr0,
63 0 : ulong client_laddr1 ) {
64 0 : ulong req_laddr0 = client_laddr0 + req_gaddr;
65 0 : ulong req_laddr1 = req_laddr0 + footprint;
66 0 : return (void *)fd_ulong_if( (!!req_gaddr) & fd_ulong_is_aligned( req_laddr0, align ) &
67 0 : (client_laddr0<=req_laddr0) & (req_laddr0<=req_laddr1) & (req_laddr1<=client_laddr1),
68 0 : req_laddr0, 0UL );
69 0 : }
70 :
71 : struct fd_vinyl_tile {
72 :
73 : /* Vinyl objects */
74 :
75 : fd_vinyl_t vinyl[1];
76 : void * io_mem;
77 :
78 : /* Tile architecture */
79 :
80 : uint booted : 1;
81 : uint shutdown : 1;
82 : ulong volatile const * snapct_state;
83 : ulong volatile const * snapwm_pair_cnt;
84 :
85 : /* I/O */
86 :
87 : int bstream_fd;
88 : struct io_uring * ring;
89 :
90 : /* Clients */
91 :
92 : fd_vinyl_client_t _client[ FD_VINYL_CLIENT_MAX ];
93 : ulong client_cnt;
94 : ulong client_idx;
95 :
96 : /* Received requests */
97 :
98 : fd_vinyl_req_t _req[ FD_VINYL_REQ_MAX ];
99 : ulong req_head; /* Requests [0,req_head) have been processed */
100 : ulong req_tail; /* Requests [req_head,req_tail) are pending */
101 : /* Requests [req_tail,ULONG_MAX) have not been received */
102 : ulong exec_max;
103 :
104 : /* accum_dead_cnt is the number of dead blocks that have been
105 : written since the last partition block.
106 :
107 : accum_move_cnt is the number of move blocks that have been
108 : written since this last partition block.
109 :
110 : accum_garbage_cnt / sz is the number of items / bytes garbage in
111 : the bstream that have accumulated since the last time we compacted
112 : the bstream. We use this to estimate the number of rounds of
113 : compaction to do in async handling. */
114 :
115 : ulong accum_dead_cnt;
116 : ulong accum_garbage_cnt;
117 : ulong accum_garbage_sz;
118 :
119 : /* Run loop state */
120 :
121 : ulong seq_part;
122 :
123 : /* Periodic syncing */
124 :
125 : long sync_next_ns;
126 :
127 : /* Place optional/external data structures last so the above struct
128 : offsets are reasonably stable across builds */
129 : # if FD_HAS_LIBURING
130 : struct io_uring _ring[1];
131 : # endif
132 :
133 : };
134 :
135 : typedef struct fd_vinyl_tile fd_vinyl_tile_t;
136 :
137 : #if FD_HAS_LIBURING
138 :
139 : static struct io_uring_params *
140 : vinyl_io_uring_params( struct io_uring_params * params,
141 : uint uring_depth ) {
142 : memset( params, 0, sizeof(struct io_uring_params) );
143 : params->flags |= IORING_SETUP_CQSIZE;
144 : params->cq_entries = uring_depth;
145 : params->flags |= IORING_SETUP_COOP_TASKRUN;
146 : params->flags |= IORING_SETUP_SINGLE_ISSUER;
147 : params->flags |= IORING_SETUP_R_DISABLED;
148 : params->features |= IORING_SETUP_DEFER_TASKRUN;
149 : return params;
150 : }
151 :
152 : #endif
153 :
154 : /* Vinyl state object */
155 :
156 : static ulong
157 0 : scratch_align( void ) {
158 0 : return FD_SHMEM_HUGE_PAGE_SZ;
159 0 : }
160 :
161 : static ulong
162 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
163 0 : (void)tile;
164 0 : ulong l = FD_LAYOUT_INIT;
165 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_vinyl_tile_t), sizeof(fd_vinyl_tile_t) );
166 0 : if( tile->accdb.io_type==FD_VINYL_IO_TYPE_UR ) {
167 : # if FD_HAS_LIBURING
168 : l = FD_LAYOUT_APPEND( l, fd_vinyl_io_ur_align(), fd_vinyl_io_ur_footprint( IO_SPAD_MAX ) );
169 : # endif
170 0 : } else {
171 0 : l = FD_LAYOUT_APPEND( l, fd_vinyl_io_bd_align(), fd_vinyl_io_bd_footprint( IO_SPAD_MAX ) );
172 0 : }
173 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_vinyl_line_t), sizeof(fd_vinyl_line_t)*tile->accdb.line_max );
174 0 : return FD_LAYOUT_FINI( l, scratch_align() );
175 0 : }
176 :
177 : #if FD_HAS_LIBURING
178 :
179 : static void
180 : vinyl_io_uring_init( fd_vinyl_tile_t * ctx,
181 : uint uring_depth,
182 : int dev_fd ) {
183 : ctx->ring = ctx->_ring;
184 :
185 : /* Setup io_uring instance */
186 : struct io_uring_params params[1];
187 : vinyl_io_uring_params( params, uring_depth );
188 : int init_err = io_uring_queue_init_params( uring_depth, ctx->ring, params );
189 : if( FD_UNLIKELY( init_err<0 ) ) FD_LOG_ERR(( "io_uring_queue_init_params failed (%i-%s)", init_err, fd_io_strerror( -init_err ) ));
190 :
191 : /* Setup io_uring file access */
192 : FD_TEST( 0==io_uring_register_files( ctx->ring, &dev_fd, 1 ) );
193 :
194 : /* Register restrictions */
195 : struct io_uring_restriction res[3] = {
196 : { .opcode = IORING_RESTRICTION_SQE_OP,
197 : .sqe_op = IORING_OP_READ },
198 : { .opcode = IORING_RESTRICTION_SQE_FLAGS_REQUIRED,
199 : .sqe_flags = IOSQE_FIXED_FILE },
200 : { .opcode = IORING_RESTRICTION_SQE_FLAGS_ALLOWED,
201 : .sqe_flags = IOSQE_IO_LINK | IOSQE_CQE_SKIP_SUCCESS }
202 : };
203 : int res_err = io_uring_register_restrictions( ctx->ring, res, 3U );
204 : if( FD_UNLIKELY( res_err<0 ) ) FD_LOG_ERR(( "io_uring_register_restrictions failed (%i-%s)", res_err, fd_io_strerror( -res_err ) ));
205 :
206 : /* Enable rings */
207 : int enable_err = io_uring_enable_rings( ctx->ring );
208 : if( FD_UNLIKELY( enable_err<0 ) ) FD_LOG_ERR(( "io_uring_enable_rings failed (%i-%s)", enable_err, fd_io_strerror( -enable_err ) ));
209 : }
210 :
211 : #else /* no io_uring */
212 :
213 : static void
214 : vinyl_io_uring_init( fd_vinyl_tile_t * ctx,
215 : uint uring_depth,
216 0 : int dev_fd ) {
217 0 : (void)ctx; (void)uring_depth; (void)dev_fd;
218 0 : FD_LOG_ERR(( "Sorry, this build does not support io_uring" ));
219 0 : }
220 :
221 : #endif
222 :
223 : static ulong
224 : populate_allowed_fds( fd_topo_t const * topo,
225 : fd_topo_tile_t const * tile,
226 : ulong out_fds_cnt,
227 0 : int * out_fds ) {
228 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
229 :
230 0 : ulong out_cnt = 0;
231 0 : out_fds[ out_cnt++ ] = 2UL; /* stderr */
232 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) ) {
233 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
234 0 : }
235 :
236 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
237 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
238 0 : fd_vinyl_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_vinyl_tile_t), sizeof(fd_vinyl_tile_t) );
239 :
240 0 : out_fds[ out_cnt++ ] = ctx->bstream_fd;
241 :
242 : #if FD_HAS_LIBURING
243 : if( FD_LIKELY( !!ctx->ring ) ) out_fds[ out_cnt++ ] = ctx->ring->ring_fd;
244 : #endif
245 :
246 0 : return out_cnt;
247 0 : }
248 :
249 : static ulong
250 : populate_allowed_seccomp( fd_topo_t const * topo,
251 : fd_topo_tile_t const * tile,
252 : ulong out_cnt,
253 0 : struct sock_filter * out ) {
254 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
255 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
256 0 : fd_vinyl_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_vinyl_tile_t), sizeof(fd_vinyl_tile_t) );
257 :
258 0 : int ring_fd = INT_MAX;
259 : #if FD_HAS_LIBURING
260 : if( FD_LIKELY( !!ctx->ring ) ) ring_fd = ctx->ring->ring_fd;
261 : #endif
262 0 : populate_sock_filter_policy_fd_accdb_tile( out_cnt, out, (uint)fd_log_private_logfile_fd(), (uint)ctx->bstream_fd, (uint)ring_fd );
263 0 : return sock_filter_policy_fd_accdb_tile_instr_cnt;
264 0 : }
265 :
266 : static void
267 : privileged_init( fd_topo_t * topo,
268 0 : fd_topo_tile_t * tile ) {
269 0 : ulong line_footprint;
270 0 : if( FD_UNLIKELY( !tile->accdb.line_max || __builtin_umull_overflow( tile->accdb.line_max, sizeof(fd_vinyl_line_t), &line_footprint ) ) ) {
271 0 : FD_LOG_ERR(( "invalid vinyl_line_max %lu", tile->accdb.line_max ));
272 0 : }
273 :
274 0 : void * tile_mem = fd_topo_obj_laddr( topo, tile->tile_obj_id );
275 0 : FD_SCRATCH_ALLOC_INIT( l, tile_mem );
276 0 : fd_vinyl_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_vinyl_tile_t), sizeof(fd_vinyl_tile_t) );
277 0 : fd_vinyl_t * vinyl = ctx->vinyl;
278 0 : void * _io = NULL;
279 0 : if( tile->accdb.io_type==FD_VINYL_IO_TYPE_UR ) {
280 : # if FD_HAS_LIBURING
281 : _io = FD_SCRATCH_ALLOC_APPEND( l, fd_vinyl_io_ur_align(), fd_vinyl_io_ur_footprint( IO_SPAD_MAX ) );
282 : # endif
283 0 : } else {
284 0 : _io = FD_SCRATCH_ALLOC_APPEND( l, fd_vinyl_io_bd_align(), fd_vinyl_io_bd_footprint( IO_SPAD_MAX ) );
285 0 : }
286 0 : fd_vinyl_line_t * _line = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_vinyl_line_t), line_footprint );
287 0 : ulong _end = FD_SCRATCH_ALLOC_FINI( l, scratch_align() );
288 0 : FD_TEST( (ulong)tile_mem==(ulong)ctx );
289 0 : FD_TEST( (ulong)_end-(ulong)tile_mem==scratch_footprint( tile ) );
290 :
291 0 : memset( ctx, 0, sizeof(fd_vinyl_tile_t) );
292 :
293 0 : ctx->io_mem = _io;
294 :
295 0 : vinyl->cnc = NULL;
296 0 : vinyl->io = NULL;
297 0 : vinyl->line = (fd_vinyl_line_t *)_line;
298 0 : vinyl->line_footprint = line_footprint;
299 :
300 : /* FIXME use O_DIRECT? */
301 0 : int dev_fd = open( tile->accdb.bstream_path, O_RDWR|O_CLOEXEC );
302 0 : if( FD_UNLIKELY( dev_fd<0 ) ) FD_LOG_ERR(( "open(%s,O_RDWR|O_CLOEXEC) failed (%i-%s)", tile->accdb.bstream_path, errno, fd_io_strerror( errno ) ));
303 :
304 0 : ctx->bstream_fd = dev_fd;
305 :
306 0 : int io_type = tile->accdb.io_type;
307 0 : if( io_type==FD_VINYL_IO_TYPE_UR ) {
308 0 : vinyl_io_uring_init( ctx, tile->accdb.uring_depth, dev_fd );
309 0 : } else if( io_type!=FD_VINYL_IO_TYPE_BD ) {
310 0 : FD_LOG_ERR(( "Unsupported vinyl io_type %d", io_type ));
311 0 : }
312 0 : }
313 :
314 : static void
315 : unprivileged_init( fd_topo_t * topo,
316 0 : fd_topo_tile_t * tile ) {
317 :
318 0 : fd_vinyl_tile_t * ctx = fd_topo_obj_laddr( topo, tile->tile_obj_id );
319 0 : fd_vinyl_t * vinyl = ctx->vinyl;
320 :
321 0 : ctx->sync_next_ns = fd_log_wallclock();
322 :
323 0 : void * _meta = fd_topo_obj_laddr( topo, tile->accdb.meta_map_obj_id );
324 0 : void * _ele = fd_topo_obj_laddr( topo, tile->accdb.meta_pool_obj_id );
325 0 : void * _obj = fd_topo_obj_laddr( topo, tile->accdb.data_obj_id );
326 :
327 0 : # define TEST( c ) do { if( FD_UNLIKELY( !(c) ) ) { FD_LOG_ERR(( "FAIL: %s", #c )); } } while(0)
328 :
329 0 : vinyl->cnc_footprint = 0UL;
330 0 : vinyl->meta_footprint = topo->objs[ tile->accdb.meta_map_obj_id ].footprint;
331 0 : vinyl->ele_footprint = topo->objs[ tile->accdb.meta_pool_obj_id ].footprint;
332 0 : vinyl->obj_footprint = topo->objs[ tile->accdb.data_obj_id ].footprint;
333 :
334 0 : void * obj_laddr0 = fd_wksp_containing( _obj );
335 0 : ulong part_thresh = 64UL<<20;
336 0 : ulong gc_thresh = 128UL<<20;
337 0 : int gc_eager = 2;
338 :
339 0 : ulong ele_max = fd_ulong_pow2_dn( vinyl->ele_footprint / sizeof( fd_vinyl_meta_ele_t ) );
340 :
341 0 : ulong pair_max = ele_max - 1UL;
342 0 : ulong line_cnt = fd_ulong_min( vinyl->line_footprint / sizeof( fd_vinyl_line_t ), pair_max );
343 :
344 0 : TEST( (3UL<=line_cnt) & (line_cnt<=FD_VINYL_LINE_MAX) );
345 :
346 : /* seed is arb */
347 :
348 : /* part_thresh is arb */
349 :
350 : /* gc_thresh is arb */
351 :
352 0 : TEST( (-1<=gc_eager) & (gc_eager<=63) );
353 :
354 0 : vinyl->line_cnt = line_cnt;
355 0 : vinyl->pair_max = pair_max;
356 :
357 0 : vinyl->part_thresh = part_thresh;
358 0 : vinyl->gc_thresh = gc_thresh;
359 0 : vinyl->gc_eager = gc_eager;
360 0 : vinyl->style = FD_VINYL_BSTREAM_CTL_STYLE_RAW;
361 0 : vinyl->line_idx_lru = 0U;
362 0 : vinyl->pair_cnt = 0UL;
363 0 : vinyl->garbage_sz = 0UL;
364 :
365 0 : TEST( fd_vinyl_meta_join( vinyl->meta, _meta, _ele )==vinyl->meta );
366 :
367 0 : TEST( fd_vinyl_data_init( vinyl->data, _obj, vinyl->obj_footprint, obj_laddr0 )==vinyl->data );
368 0 : fd_vinyl_data_reset( NULL, 0UL, 0UL, 0, vinyl->data );
369 :
370 0 : fd_vinyl_line_t * line = vinyl->line;
371 0 : for( ulong line_idx=0UL; line_idx<line_cnt; line_idx++ ) {
372 0 : line[ line_idx ].obj = NULL;
373 0 : line[ line_idx ].ele_idx = ULONG_MAX;
374 0 : line[ line_idx ].ctl = fd_vinyl_line_ctl( 0UL, 0L);
375 0 : line[ line_idx ].line_idx_older = (uint)fd_ulong_if( line_idx!=0UL, line_idx-1UL, line_cnt-1UL );
376 0 : line[ line_idx ].line_idx_newer = (uint)fd_ulong_if( line_idx!=line_cnt-1UL, line_idx+1UL, 0UL );
377 0 : }
378 :
379 0 : # undef TEST
380 :
381 : /* Find snapct tile status */
382 0 : ulong snapct_tile_idx = fd_topo_find_tile( topo, "snapct", 0UL );
383 0 : FD_TEST( snapct_tile_idx!=ULONG_MAX );
384 0 : fd_topo_tile_t const * snapct_tile = &topo->tiles[ snapct_tile_idx ];
385 0 : FD_TEST( snapct_tile->metrics );
386 0 : ctx->snapct_state = &fd_metrics_tile( snapct_tile->metrics )[ MIDX( GAUGE, SNAPCT, STATE ) ];
387 :
388 : /* Find snapwm pair_cnt */
389 0 : ulong snapwm_tile_idx = fd_topo_find_tile( topo, "snapwm", 0UL );
390 0 : FD_TEST( snapwm_tile_idx!=ULONG_MAX );
391 0 : fd_topo_tile_t const * snapwm_tile = &topo->tiles[ snapwm_tile_idx ];
392 0 : FD_TEST( snapwm_tile->metrics );
393 0 : ctx->snapwm_pair_cnt = &fd_metrics_tile( snapwm_tile->metrics )[ MIDX( GAUGE, SNAPWM, ACCOUNTS_ACTIVE ) ];
394 :
395 : /* Discover mapped clients */
396 :
397 0 : ulong burst_free = FD_VINYL_REQ_MAX;
398 0 : ulong quota_free = vinyl->line_cnt - 1UL;
399 0 : ctx->exec_max = 0UL;
400 :
401 0 : for( ulong i=0UL; i<(tile->uses_obj_cnt); i++ ) {
402 :
403 0 : ulong rq_obj_id = tile->uses_obj_id[ i ];
404 0 : fd_topo_obj_t const * rq_obj = &topo->objs[ rq_obj_id ];
405 0 : if( strcmp( rq_obj->name, "vinyl_rq" ) ) continue;
406 :
407 0 : if( FD_UNLIKELY( ctx->client_cnt>=FD_VINYL_CLIENT_MAX ) ) {
408 0 : FD_LOG_ERR(( "too many vinyl clients (increase FD_VINYL_CLIENT_MAX)" ));
409 0 : }
410 :
411 0 : ulong burst_max = 1UL;
412 0 : ulong link_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "obj.%lu.link_id", rq_obj_id );
413 0 : ulong quota_max = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "obj.%lu.quota_max", rq_obj_id );
414 0 : ulong req_pool_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "obj.%lu.req_pool_obj_id", rq_obj_id );
415 0 : ulong cq_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "obj.%lu.cq_obj_id", rq_obj_id );
416 0 : FD_TEST( link_id !=ULONG_MAX );
417 0 : FD_TEST( quota_max !=ULONG_MAX );
418 0 : FD_TEST( req_pool_obj_id!=ULONG_MAX );
419 0 : FD_TEST( cq_obj_id !=ULONG_MAX );
420 :
421 0 : if( FD_UNLIKELY( burst_max > burst_free ) ) {
422 0 : FD_LOG_ERR(( "too large burst_max (increase FD_VINYL_REQ_MAX or decrease burst_max)" ));
423 0 : }
424 :
425 0 : if( FD_UNLIKELY( quota_max > fd_ulong_min( quota_free, FD_VINYL_COMP_QUOTA_MAX ) ) ) {
426 0 : FD_LOG_ERR(( "too large quota_max (increase line_cnt (currently %lu, free %lu) or decrease quota_max (currently %lu))",
427 0 : vinyl->line_cnt, quota_free, quota_max ));
428 0 : }
429 :
430 0 : for( ulong client_idx=0UL; client_idx<ctx->client_cnt; client_idx++ ) {
431 0 : if( FD_UNLIKELY( ctx->_client[ client_idx ].link_id==link_id ) ) {
432 0 : FD_LOG_ERR(( "client already joined with this link_id (%lu)", link_id ));
433 0 : }
434 0 : }
435 :
436 0 : fd_topo_obj_t const * req_pool_obj = &topo->objs[ req_pool_obj_id ];
437 0 : fd_topo_wksp_t const * client_wksp = &topo->workspaces[ req_pool_obj->wksp_id ];
438 :
439 0 : fd_vinyl_rq_t * rq; FD_TEST( (rq = fd_vinyl_rq_join( fd_topo_obj_laddr( topo, rq_obj_id ) )) );
440 0 : fd_vinyl_cq_t * cq; FD_TEST( (cq = fd_vinyl_cq_join( fd_topo_obj_laddr( topo, cq_obj_id ) )) );
441 :
442 0 : fd_shmem_join_info_t join_info;
443 0 : FD_TEST( fd_shmem_join_query_by_join( client_wksp->wksp, &join_info)==0 );
444 0 : FD_LOG_INFO(( "registered client %lu: req_gaddr=%s:%lu cq_gaddr=%s:%lu",
445 0 : ctx->client_cnt,
446 0 : fd_wksp_containing( rq )->name, fd_wksp_gaddr_fast( fd_wksp_containing( rq ), rq ),
447 0 : fd_wksp_containing( cq )->name, fd_wksp_gaddr_fast( fd_wksp_containing( cq ), cq ) ));
448 0 : ctx->_client[ ctx->client_cnt ] = (fd_vinyl_client_t) {
449 0 : .rq = rq,
450 0 : .cq = cq,
451 0 : .burst_max = 1UL,
452 0 : .seq = 0UL,
453 0 : .link_id = link_id,
454 0 : .laddr0 = (ulong)join_info.shmem,
455 0 : .laddr1 = (ulong)join_info.shmem + join_info.page_cnt*join_info.page_sz,
456 0 : .quota_rem = quota_max,
457 0 : .quota_max = quota_max
458 0 : };
459 0 : ctx->client_cnt++;
460 :
461 0 : quota_free -= quota_max;
462 0 : burst_free -= burst_max;
463 :
464 : /* Every client_cnt run loop iterations we receive at most:
465 :
466 : sum_clients recv_max = FD_VINYL_RECV_MAX - burst_free
467 :
468 : requests. To guarantee we processe requests fast enough
469 : that we never overrun our receive queue, under maximum
470 : client load, we need to process:
471 :
472 : sum_clients recv_max / client_cnt
473 :
474 : requests per run loop iteration. We thus set exec_max
475 : to the ceil sum_clients recv_max / client_cnt. */
476 :
477 0 : ctx->exec_max = (FD_VINYL_REQ_MAX - burst_free + ctx->client_cnt - 1UL) / ctx->client_cnt;
478 :
479 0 : } /* client join loop */
480 :
481 0 : }
482 :
483 : /* during_housekeeping is called periodically (approx every STEM_LAZY ns) */
484 :
485 : static void
486 0 : during_housekeeping( fd_vinyl_tile_t * ctx ) {
487 :
488 0 : fd_vinyl_t * vinyl = ctx->vinyl;
489 :
490 0 : if( FD_UNLIKELY( !ctx->booted ) ) {
491 0 : ulong const snapct_state = FD_VOLATILE_CONST( *ctx->snapct_state );
492 0 : if( snapct_state!=16UL ) {
493 0 : fd_log_sleep( 1e6 ); /* 1 ms */
494 0 : return;
495 0 : }
496 :
497 0 : if( ctx->ring ) {
498 0 : vinyl->io = fd_vinyl_io_ur_init( ctx->io_mem, IO_SPAD_MAX, ctx->bstream_fd, ctx->ring );
499 0 : if( FD_UNLIKELY( !vinyl->io ) ) FD_LOG_ERR(( "Failed to initialize io_uring I/O backend for account database" ));
500 0 : } else {
501 0 : vinyl->io = fd_vinyl_io_bd_init( ctx->io_mem, IO_SPAD_MAX, ctx->bstream_fd, 0, NULL, 0UL, 0UL );
502 0 : if( FD_UNLIKELY( !vinyl->io ) ) FD_LOG_ERR(( "Failed to initialize blocking I/O backend for account database" ));
503 0 : }
504 0 : vinyl->pair_cnt = FD_VOLATILE_CONST( *ctx->snapwm_pair_cnt );
505 :
506 : /* Once snapct tile exits, boot up vinyl */
507 0 : FD_LOG_INFO(( "vinyl server starting with %lu active records", vinyl->pair_cnt ));
508 :
509 0 : ctx->booted = 1;
510 0 : }
511 :
512 : /* If we've written enough to justify appending a parallel
513 : recovery partition, append one. */
514 :
515 0 : ulong seq_future = fd_vinyl_io_seq_future( vinyl->io );
516 0 : if( FD_UNLIKELY( (seq_future - ctx->seq_part) > vinyl->part_thresh ) ) {
517 :
518 0 : ulong seq = fd_vinyl_io_append_part( vinyl->io, ctx->seq_part, ctx->accum_dead_cnt, 0UL, NULL, 0UL );
519 0 : FD_CRIT( fd_vinyl_seq_eq( seq, seq_future ), "corruption detected" );
520 0 : ctx->seq_part = seq + FD_VINYL_BSTREAM_BLOCK_SZ;
521 :
522 0 : ctx->accum_dead_cnt = 0UL;
523 :
524 0 : ctx->accum_garbage_cnt++;
525 0 : ctx->accum_garbage_sz += FD_VINYL_BSTREAM_BLOCK_SZ;
526 :
527 0 : fd_vinyl_io_commit( vinyl->io, FD_VINYL_IO_FLAG_BLOCKING );
528 0 : FD_MCNT_INC( ACCDB, BLOCKS_PART, 1UL );
529 :
530 0 : }
531 :
532 : /* Let the number of items of garbage generated since the last
533 : compaction be accum_garbage_cnt and let the steady steady
534 : average number of live / garbage items in the bstream's past be
535 : L / G (i.e. L is the average value of pair_cnt). The average
536 : number pieces of garbage collected per garbage collection round
537 : is thus G / (L + G). If we do compact_max rounds garbage
538 : collection this async handling, we expect to collect
539 :
540 : compact_max G / (L + G)
541 :
542 : items of garbage on average. To make sure we collect garbage
543 : faster than we generate it on average, we then require:
544 :
545 : accum_garbage_cnt <~ compact_max G / (L + G)
546 : -> compact_max >~ (L + G) accum_garbage_cnt / G
547 :
548 : Let the be 2^-gc_eager be the maximum fraction of items in the
549 : bstream's past we are willing tolerate as garbage on average.
550 : We then have G = 2^-gc_eager (L + G). This implies:
551 :
552 : -> compact_max >~ accum_garbage_cnt 2^gc_eager
553 :
554 : When accum_garbage_cnt is 0, we use a compact_max of 1 to do
555 : compaction rounds at a minimum rate all the time. This allows
556 : transients (e.g. a sudden change to new steady state
557 : equilibrium, temporary disabling of garbage collection at key
558 : times for highest performance, etc) and unaccounted zero
559 : padding garbage to be absorbed when nothing else is going on. */
560 :
561 0 : int gc_eager = vinyl->gc_eager;
562 0 : if( FD_LIKELY( gc_eager>=0 ) ) {
563 :
564 : /* Saturating wide left shift */
565 0 : ulong overflow = (ctx->accum_garbage_cnt >> (63-gc_eager) >> 1); /* sigh ... avoid wide shift UB */
566 0 : ulong compact_max = fd_ulong_max( fd_ulong_if( !overflow, ctx->accum_garbage_cnt << gc_eager, ULONG_MAX ), 1UL );
567 :
568 : /**/ ctx->accum_garbage_cnt = 0UL;
569 0 : vinyl->garbage_sz += ctx->accum_garbage_sz; ctx->accum_garbage_sz = 0UL;
570 :
571 0 : ulong garbage_pre = vinyl->garbage_sz;
572 0 : fd_vinyl_compact( vinyl, compact_max );
573 0 : FD_MCNT_INC( ACCDB, CUM_GC_BYTES, garbage_pre - vinyl->garbage_sz );
574 :
575 0 : }
576 :
577 : /* Update vinyl sync block
578 : (Required to reclaim bstream space freed by compaction) */
579 :
580 0 : long now = fd_log_wallclock();
581 0 : if( now >= ctx->sync_next_ns ) {
582 0 : ctx->sync_next_ns = now + (long)30e9; /* every 30 seconds */
583 0 : fd_vinyl_io_sync( vinyl->io, FD_VINYL_IO_FLAG_BLOCKING );
584 0 : }
585 :
586 0 : }
587 :
588 : /* If should_shutdown returns non-zero, the vinyl tile is shut down */
589 :
590 : static int
591 0 : should_shutdown( fd_vinyl_tile_t * ctx ) {
592 0 : if( FD_UNLIKELY( !ctx->booted ) ) return 0;
593 0 : if( FD_LIKELY( !ctx->shutdown ) ) return 0;
594 :
595 0 : fd_vinyl_t * vinyl = ctx->vinyl;
596 0 : fd_vinyl_io_t * io = vinyl->io;
597 :
598 0 : ulong discard_cnt = ctx->req_tail - ctx->req_head;
599 :
600 : /* Append the final partition and sync so we can resume with a fast
601 : parallel recovery */
602 :
603 0 : FD_MCNT_INC( ACCDB, BLOCKS_PART, 1UL );
604 0 : fd_vinyl_io_append_part( io, ctx->seq_part, ctx->accum_dead_cnt, 0UL, NULL, 0UL );
605 :
606 0 : ctx->accum_dead_cnt = 0UL;
607 :
608 0 : ctx->accum_garbage_cnt++;
609 0 : ctx->accum_garbage_sz += FD_VINYL_BSTREAM_BLOCK_SZ;
610 :
611 0 : fd_vinyl_io_commit( io, FD_VINYL_IO_FLAG_BLOCKING );
612 :
613 0 : fd_vinyl_io_sync( io, FD_VINYL_IO_FLAG_BLOCKING );
614 :
615 : /* Drain outstanding accumulators */
616 :
617 : /**/ ctx->accum_garbage_cnt = 0UL;
618 0 : vinyl->garbage_sz += ctx->accum_garbage_sz; ctx->accum_garbage_sz = 0UL;
619 :
620 : /* Disconnect from the clients */
621 :
622 0 : ulong released_cnt = 0UL;
623 0 : for( ulong client_idx=0UL; client_idx<ctx->client_cnt; client_idx++ ) {
624 0 : released_cnt += (ctx->_client[ client_idx ].quota_max - ctx->_client[ client_idx ].quota_rem);
625 0 : }
626 :
627 0 : if( FD_UNLIKELY( discard_cnt ) ) FD_LOG_WARNING(( "halt discarded %lu received requests", discard_cnt ));
628 0 : if( FD_UNLIKELY( released_cnt ) ) FD_LOG_WARNING(( "halt released %lu outstanding acquires", released_cnt ));
629 0 : if( FD_UNLIKELY( ctx->client_cnt ) ) FD_LOG_WARNING(( "halt disconneced %lu clients", ctx->client_cnt ));
630 :
631 0 : return 1;
632 0 : }
633 :
634 : static void
635 0 : metrics_write( fd_vinyl_tile_t * ctx ) {
636 0 : if( FD_UNLIKELY( !ctx->booted ) ) return;
637 0 : fd_vinyl_t * vinyl = ctx->vinyl;
638 0 : fd_vinyl_io_t * io = vinyl->io;
639 :
640 0 : FD_MGAUGE_SET( ACCDB, BSTREAM_SEQ_ANCIENT, io->seq_ancient );
641 0 : FD_MGAUGE_SET( ACCDB, BSTREAM_SEQ_PAST, io->seq_past );
642 0 : FD_MGAUGE_SET( ACCDB, BSTREAM_SEQ_PRESENT, io->seq_present );
643 0 : FD_MGAUGE_SET( ACCDB, BSTREAM_SEQ_FUTURE, io->seq_future );
644 :
645 0 : FD_MGAUGE_SET( ACCDB, GARBAGE_BYTES, vinyl->garbage_sz );
646 0 : }
647 :
648 : /* before_credit runs every main loop iteration */
649 :
650 : static void
651 : before_credit( fd_vinyl_tile_t * ctx,
652 : fd_stem_context_t * stem,
653 0 : int * charge_busy ) {
654 0 : (void)stem;
655 0 : if( FD_UNLIKELY( !ctx->booted ) ) return;
656 :
657 0 : fd_vinyl_t * vinyl = ctx->vinyl;
658 :
659 0 : fd_vinyl_io_t * io = vinyl->io;
660 0 : fd_vinyl_meta_t * meta = vinyl->meta;
661 0 : fd_vinyl_line_t * line = vinyl->line;
662 0 : fd_vinyl_data_t * data = vinyl->data;
663 :
664 0 : ulong pair_max = vinyl->pair_max;
665 :
666 0 : fd_vinyl_meta_ele_t * ele0 = meta->ele;
667 0 : ulong ele_max = meta->ele_max;
668 0 : ulong meta_seed = meta->seed;
669 0 : ulong * lock = meta->lock;
670 0 : int lock_shift = meta->lock_shift;
671 :
672 0 : ulong data_laddr0 = (ulong)data->laddr0;
673 0 : fd_vinyl_data_vol_t const * vol = data->vol;
674 0 : ulong vol_cnt = data->vol_cnt;
675 :
676 0 : ulong line_cnt = vinyl->line_cnt;
677 :
678 : /* Select client to poll this run loop iteration */
679 :
680 0 : ctx->client_idx = fd_ulong_if( ctx->client_idx+1UL<ctx->client_cnt, ctx->client_idx+1UL, 0UL );
681 :
682 0 : fd_vinyl_client_t * client = ctx->_client + ctx->client_idx;
683 :
684 0 : fd_vinyl_rq_t * rq = client->rq;
685 0 : ulong seq = client->seq;
686 0 : ulong burst_max = client->burst_max;
687 0 : ulong link_id = client->link_id;
688 :
689 0 : ulong accum_dead_cnt = ctx->accum_dead_cnt;
690 0 : ulong accum_garbage_cnt = ctx->accum_garbage_cnt;
691 0 : ulong accum_garbage_sz = ctx->accum_garbage_sz;
692 0 : ulong accum_cache_hit = 0UL;
693 :
694 : /* Enqueue up to burst_max requests from this client into the
695 : local request queue. Using burst_max << FD_VINYL_REQ_MAX
696 : allows applications to prevent a bursty client from starving
697 : other clients of resources while preserving the spatial and
698 : temporal locality of reasonably sized O(burst_max) bursts from
699 : an individual client in processing below. Each run loop
700 : iteration can enqueue up to burst_max requests per iterations. */
701 :
702 0 : for( ulong recv_rem=fd_ulong_min( FD_VINYL_REQ_MAX-(ctx->req_tail-ctx->req_head), burst_max ); recv_rem; recv_rem-- ) {
703 0 : fd_vinyl_req_t * req = ctx->_req + (ctx->req_tail & (FD_VINYL_REQ_MAX-1UL));
704 :
705 0 : long diff = fd_vinyl_rq_recv( rq, seq, req );
706 :
707 0 : if( FD_LIKELY( diff>0L ) ) break; /* No requests waiting in rq at this time */
708 :
709 0 : if( FD_UNLIKELY( diff ) ) FD_LOG_CRIT(( "client overran request queue" ));
710 :
711 0 : *charge_busy = 1;
712 0 : seq++;
713 :
714 : /* We got the next request. Decide if we should accept it.
715 :
716 : Specifically, we ignore requests whose link_id don't match
717 : link_id (e.g. an unknown link_id or matches a different
718 : client's link_id ... don't know if it is where or even if it
719 : is safe to the completion). Even if the request provided an
720 : out-of-band location to send the completion (comp_gaddr!=0),
721 : we have no reason to trust it given the mismatch.
722 :
723 : This also gives a mechanism for a client use a single rq to
724 : send requests to multiple vinyl instances ... the client
725 : should use a different link_id for each vinyl instance. Each
726 : vinyl instance will quickly filter out the requests not
727 : addressed to it.
728 :
729 : Since we know the client_idx at this point, given a matching
730 : link_id, we stash the client_idx in the pending req link_id
731 : to eliminate the need to maintain a link_id<>client_idx map
732 : in the execution loop below. */
733 :
734 0 : if( FD_UNLIKELY( req->link_id!=link_id ) ) {
735 0 : FD_LOG_CRIT(( "received request from link_id %lu, but request specifies incorrect link_id %lu",
736 0 : link_id, req->link_id ));
737 0 : }
738 :
739 0 : req->link_id = ctx->client_idx;
740 :
741 0 : ctx->req_tail++;
742 0 : }
743 :
744 0 : client->seq = seq;
745 :
746 : /* Execute received requests */
747 :
748 0 : for( ulong exec_rem=fd_ulong_min( ctx->req_tail-ctx->req_head, ctx->exec_max ); exec_rem; exec_rem-- ) {
749 0 : fd_vinyl_req_t * req = ctx->_req + ((ctx->req_head++) & (FD_VINYL_REQ_MAX-1UL));
750 :
751 : /* Determine the client that sent this request and unpack the
752 : completion fields. We ignore requests with non-NULL but
753 : unmappable out-of-band completion because we can't send the
754 : completion in the expected manner and, in lieu of that, the
755 : receivers aren't expecting any completion to come via the cq
756 : (if any). Note that this implies requests that don't produce a
757 : completion (e.g. FETCH and FLUSH) need to either provide NULL
758 : or a valid non-NULL location for comp_gaddr to pass this
759 : validation (this is not a burden practically). */
760 :
761 0 : ulong req_id = req->req_id;
762 0 : ulong client_idx = req->link_id; /* See note above about link_id / client_idx conversion */
763 0 : ulong batch_cnt = (ulong)req->batch_cnt;
764 0 : ulong comp_gaddr = req->comp_gaddr;
765 :
766 0 : fd_vinyl_client_t * client = ctx->_client + client_idx;
767 :
768 0 : fd_vinyl_cq_t * cq = client->cq;
769 0 : ulong link_id = client->link_id;
770 0 : ulong client_laddr0 = client->laddr0;
771 0 : ulong client_laddr1 = client->laddr1;
772 0 : ulong quota_rem = client->quota_rem;
773 :
774 0 : FD_CRIT( quota_rem<=client->quota_max, "corruption detected" );
775 :
776 0 : fd_vinyl_comp_t * comp = MAP_REQ_GADDR( comp_gaddr, fd_vinyl_comp_t, 1UL );
777 0 : if( FD_UNLIKELY( (!comp) & (!!comp_gaddr) ) ) {
778 0 : FD_LOG_CRIT(( "client with link_id=%lu requested completion at invalid gaddr %lu",
779 0 : link_id, comp_gaddr ));
780 0 : }
781 :
782 0 : int comp_err = 1;
783 0 : ulong fail_cnt = 0UL;
784 :
785 0 : ulong read_cnt = 0UL;
786 0 : ulong append_cnt = 0UL;
787 :
788 0 : switch( req->type ) {
789 :
790 0 : # include "../../vinyl/fd_vinyl_case_acquire.c"
791 0 : # include "../../vinyl/fd_vinyl_case_release.c"
792 0 : # include "../../vinyl/fd_vinyl_case_erase.c"
793 : /* FIXME support more request types */
794 :
795 0 : default:
796 0 : FD_LOG_CRIT(( "unsupported request type %u", (uint)req->type ));
797 0 : comp_err = FD_VINYL_ERR_INVAL;
798 0 : break;
799 0 : }
800 :
801 0 : FD_MCNT_INC( ACCDB, REQUEST_BATCHES, 1UL );
802 0 : switch( req->type ) {
803 0 : case FD_VINYL_REQ_TYPE_ACQUIRE:
804 0 : FD_MCNT_INC( ACCDB, REQUESTS_ACQUIRE, batch_cnt );
805 0 : break;
806 0 : case FD_VINYL_REQ_TYPE_RELEASE:
807 0 : FD_MCNT_INC( ACCDB, REQUESTS_RELEASE, batch_cnt );
808 0 : break;
809 0 : case FD_VINYL_REQ_TYPE_ERASE:
810 0 : FD_MCNT_INC( ACCDB, REQUESTS_ERASE, batch_cnt );
811 0 : break;
812 0 : }
813 :
814 0 : for( ; read_cnt; read_cnt-- ) {
815 0 : fd_vinyl_io_rd_t * _rd; /* avoid pointer escape */
816 0 : fd_vinyl_io_poll( io, &_rd, FD_VINYL_IO_FLAG_BLOCKING );
817 0 : fd_vinyl_io_rd_t * rd = _rd;
818 :
819 0 : fd_vinyl_data_obj_t * obj = (fd_vinyl_data_obj_t *) rd->ctx;
820 0 : ulong seq = rd->seq; (void)seq;
821 0 : fd_vinyl_bstream_phdr_t * cphdr = (fd_vinyl_bstream_phdr_t *)rd->dst;
822 0 : ulong cpair_sz = rd->sz; (void)cpair_sz;
823 :
824 0 : fd_vinyl_data_obj_t * cobj = (fd_vinyl_data_obj_t *)fd_ulong_align_dn( (ulong)rd, FD_VINYL_BSTREAM_BLOCK_SZ );
825 :
826 0 : FD_CRIT( cphdr==fd_vinyl_data_obj_phdr( cobj ), "corruption detected" );
827 :
828 0 : ulong cpair_ctl = cphdr->ctl;
829 :
830 0 : int cpair_type = fd_vinyl_bstream_ctl_type ( cpair_ctl );
831 0 : int cpair_style = fd_vinyl_bstream_ctl_style( cpair_ctl );
832 0 : ulong cpair_val_esz = fd_vinyl_bstream_ctl_sz ( cpair_ctl );
833 :
834 0 : FD_CRIT( cpair_type==FD_VINYL_BSTREAM_CTL_TYPE_PAIR, "corruption detected" );
835 0 : FD_CRIT( cpair_sz ==fd_vinyl_bstream_pair_sz( cpair_val_esz ), "corruption detected" );
836 :
837 0 : schar * rd_err = cobj->rd_err;
838 :
839 0 : FD_CRIT ( rd_err, "corruption detected" );
840 0 : FD_ALERT( fd_vinyl_data_is_valid_obj( obj, vol, vol_cnt ), "corruption detected" );
841 :
842 0 : ulong line_idx = obj->line_idx;
843 :
844 0 : FD_CRIT( line_idx<line_cnt, "corruption detected" );
845 0 : FD_CRIT( line[ line_idx ].obj==obj, "corruption detected" );
846 :
847 0 : ulong ele_idx = line[ line_idx ].ele_idx;
848 :
849 0 : FD_CRIT ( ele_idx<ele_max, "corruption detected" );
850 0 : FD_ALERT( !memcmp( &ele0[ ele_idx ].phdr, cphdr, sizeof(fd_vinyl_bstream_phdr_t) ), "corruption detected" );
851 0 : FD_CRIT ( ele0[ ele_idx ].seq ==seq, "corruption detected" );
852 0 : FD_CRIT ( ele0[ ele_idx ].line_idx==line_idx, "corruption detected" );
853 :
854 : /* Verify data integrity */
855 :
856 0 : FD_ALERT( !fd_vinyl_bstream_pair_test( io_seed, seq, (fd_vinyl_bstream_block_t *)cphdr, cpair_sz ), "corruption detected" );
857 :
858 : /* Decode the pair */
859 :
860 0 : char * val = (char *)fd_vinyl_data_obj_val( obj );
861 0 : ulong val_sz = (ulong)cphdr->info.val_sz;
862 :
863 0 : FD_CRIT( val_sz <= FD_VINYL_VAL_MAX, "corruption detected" );
864 0 : FD_CRIT( fd_vinyl_data_obj_val_max( obj ) >= val_sz, "corruption detected" );
865 :
866 0 : if( FD_LIKELY( cpair_style==FD_VINYL_BSTREAM_CTL_STYLE_RAW ) ) {
867 :
868 0 : FD_CRIT( obj==cobj, "corruption detected" );
869 0 : FD_CRIT( cpair_val_esz==val_sz, "corruption detected" );
870 :
871 0 : } else {
872 :
873 0 : char const * cval = (char const *)fd_vinyl_data_obj_val( cobj );
874 0 : ulong cval_sz = fd_vinyl_bstream_ctl_sz( cpair_ctl );
875 :
876 0 : ulong _val_sz = (ulong)LZ4_decompress_safe( cval, val, (int)cval_sz, (int)val_sz );
877 0 : if( FD_UNLIKELY( _val_sz!=val_sz ) ) FD_LOG_CRIT(( "LZ4_decompress_safe failed" ));
878 :
879 0 : fd_vinyl_data_free( data, cobj );
880 :
881 0 : fd_vinyl_bstream_phdr_t * phdr = fd_vinyl_data_obj_phdr( obj );
882 :
883 0 : phdr->ctl = fd_vinyl_bstream_ctl( FD_VINYL_BSTREAM_CTL_TYPE_PAIR, FD_VINYL_BSTREAM_CTL_STYLE_RAW, val_sz );
884 0 : phdr->key = cphdr->key;
885 0 : phdr->info = cphdr->info;
886 :
887 0 : }
888 :
889 0 : obj->rd_active = (short)0;
890 :
891 : /* Fill any trailing region with zeros (there is at least
892 : FD_VINYL_BSTREAM_FTR_SZ) and tell the client the item was
893 : successfully processed. */
894 :
895 0 : memset( val + val_sz, 0, fd_vinyl_data_szc_obj_footprint( (ulong)obj->szc )
896 0 : - (sizeof(fd_vinyl_data_obj_t) + sizeof(fd_vinyl_bstream_phdr_t) + val_sz) );
897 :
898 0 : FD_COMPILER_MFENCE();
899 0 : *rd_err = (schar)FD_VINYL_SUCCESS;
900 0 : FD_COMPILER_MFENCE();
901 :
902 0 : }
903 :
904 0 : if( FD_UNLIKELY( append_cnt ) ) fd_vinyl_io_commit( io, FD_VINYL_IO_FLAG_BLOCKING );
905 :
906 0 : if( FD_LIKELY( comp_err<=0 ) ) fd_vinyl_cq_send( cq, comp, req_id, link_id, comp_err, batch_cnt, fail_cnt, quota_rem );
907 :
908 0 : client->quota_rem = quota_rem;
909 :
910 : /* Update metrics. Derive counters from vinyl locals
911 :
912 : append_cnt is incremented in these places:
913 : - fd_vinyl_case_erase.c (fd_vinyl_io_append_dead, with accum_dead_cnt)
914 : - fd_vinyl_case_move.c (fd_vinyl_io_append_move, with accum_move_cnt)
915 : - fd_vinyl_case_move.c (fd_vinyl_io_append(pair))
916 : - fd_vinyl_case_release.c (fd_vinyl_io_append_pair_inplace)
917 : - fd_vinyl_case_release.c (fd_vinyl_io_append_dead, with accum_dead_cnt)
918 :
919 : We can thus infer the number of pair blocks appended by
920 : subtracting accum_* */
921 :
922 0 : ulong const dead_cnt = accum_dead_cnt - ctx->accum_dead_cnt;
923 0 : FD_MCNT_INC( ACCDB, BLOCKS_PAIR, append_cnt - dead_cnt );
924 0 : FD_MCNT_INC( ACCDB, BLOCKS_DEAD, dead_cnt );
925 :
926 0 : }
927 :
928 0 : ctx->accum_dead_cnt = accum_dead_cnt;
929 0 : ctx->accum_garbage_cnt = accum_garbage_cnt;
930 0 : ctx->accum_garbage_sz = accum_garbage_sz;
931 0 : FD_MCNT_INC( ACCDB, CACHE_HITS, accum_cache_hit );
932 0 : }
933 :
934 0 : #define STEM_BURST (1UL)
935 0 : #define STEM_LAZY (10000) /* housekeep every 10 us */
936 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_vinyl_tile_t
937 0 : #define STEM_CALLBACK_CONTEXT_ALIGN fd_vinyl_align()
938 0 : #define STEM_CALLBACK_BEFORE_CREDIT before_credit
939 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
940 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
941 : #define STEM_CALLBACK_SHOULD_SHUTDOWN should_shutdown
942 :
943 : #include "../../disco/stem/fd_stem.c"
944 :
945 : fd_topo_run_tile_t fd_tile_vinyl = {
946 : .name = NAME,
947 : .populate_allowed_fds = populate_allowed_fds,
948 : .populate_allowed_seccomp = populate_allowed_seccomp,
949 : .scratch_align = scratch_align,
950 : .scratch_footprint = scratch_footprint,
951 : .privileged_init = privileged_init,
952 : .unprivileged_init = unprivileged_init,
953 : .run = stem_run
954 : };
955 :
956 : #undef NAME
|