LCOV - code coverage report
Current view: top level - util/io_uring - fd_io_uring.h (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 41 0.0 %
Date: 2026-02-13 06:06:24 Functions: 0 56 0.0 %

          Line data    Source code
       1             : #ifndef HEADER_fd_src_util_io_uring_fd_io_uring_h
       2             : #define HEADER_fd_src_util_io_uring_fd_io_uring_h
       3             : 
       4             : /* fd_io_uring.h provides APIs for job submission and completion polling
       5             :    against io_uring instances.
       6             : 
       7             :    These APIs are suitable for cooperative or interrupt-driven I/O only
       8             :    (completions delivered on the thread that submitted requests). These
       9             :    APIs do not support busy polling with kernel worker threads  */
      10             : 
      11             : #include "fd_io_uring_sys.h"
      12             : #include <stdatomic.h>
      13             : #include <linux/io_uring.h>
      14             : 
      15             : struct fd_io_uring_sq {
      16             : 
      17             :   /* State bits shared with the kernel.
      18             : 
      19             :      The kernel might set these in an interrupt context, therefore we
      20             :      accesses to be explicit.  We assume no concurrent operation
      21             :      (although io_uring supports such operation), so accesses to these
      22             :      do not need stronger consistency than C11 relaxed. */
      23             : 
      24             :   atomic_uint * khead;
      25             :   atomic_uint * ktail;
      26             :   atomic_uint * kflags;
      27             :   atomic_uint * kdropped;
      28             : 
      29             :   uint *                array;
      30             :   struct io_uring_sqe * sqes;
      31             : 
      32             :   uint sqe_head;
      33             :   uint sqe_tail;
      34             :   uint depth;
      35             : };
      36             : 
      37             : typedef struct fd_io_uring_sq fd_io_uring_sq_t;
      38             : 
      39             : struct fd_io_uring_cq {
      40             :   ulong depth;
      41             : 
      42             :   atomic_uint * khead;
      43             :   atomic_uint * ktail;
      44             :   atomic_uint * koverflow;
      45             : 
      46             :   struct io_uring_cqe * cqes;
      47             : };
      48             : 
      49             : typedef struct fd_io_uring_cq fd_io_uring_cq_t;
      50             : 
      51             : struct fd_io_uring {
      52             :   int ioring_fd;
      53             : 
      54             :   fd_io_uring_sq_t sq[1];
      55             :   fd_io_uring_cq_t cq[1];
      56             : 
      57             :   /* Kernel-allocated memory */
      58             : 
      59             :   void * kern_sq_mem;
      60             :   ulong  kern_sq_sz;
      61             :   void * kern_cq_mem;
      62             :   ulong  kern_cq_sz;
      63             :   void * kern_sqe_mem;
      64             :   ulong  kern_sqe_sz;
      65             : };
      66             : 
      67             : typedef struct fd_io_uring fd_io_uring_t;
      68             : 
      69             : FD_PROTOTYPES_BEGIN
      70             : 
      71             : /* fd_io_uring_submit flushes the submission queue and waits for
      72             :    wait_cnt completions to arrive.  Returns the number of submitted
      73             :    entries on success, or a negative errno value on error. */
      74             : 
      75             : FD_FN_UNUSED static int
      76             : fd_io_uring_submit( fd_io_uring_sq_t * sq,
      77             :                     int                ring_fd,
      78             :                     uint               wait_cnt,
      79           0 :                     uint               flags ) {
      80           0 :   uint tail = sq->sqe_tail;
      81           0 :   atomic_store_explicit( sq->ktail, tail, memory_order_release );
      82           0 :   uint head = atomic_load_explicit( sq->khead, memory_order_relaxed );
      83           0 :   sq->sqe_head = head;
      84           0 :   uint to_submit = tail - head;
      85           0 :   return fd_io_uring_enter( ring_fd, to_submit, wait_cnt, flags, NULL, 0 );
      86           0 : }
      87             : 
      88             : static inline uint
      89           0 : fd_io_uring_sq_dropped( fd_io_uring_sq_t const * sq ) {
      90           0 :   return atomic_load_explicit( sq->kdropped, memory_order_relaxed );
      91           0 : }
      92             : 
      93             : static inline uint
      94           0 : fd_io_uring_cq_overflow( fd_io_uring_cq_t const * cq ) {
      95           0 :   return atomic_load_explicit( cq->koverflow, memory_order_relaxed );
      96           0 : }
      97             : 
      98             : static inline struct io_uring_sqe *
      99           0 : fd_io_uring_get_sqe( fd_io_uring_sq_t * sq ) {
     100           0 :   uint tail  = sq->sqe_tail;
     101           0 :   uint depth = sq->depth;
     102           0 :   if( tail+1U - sq->sqe_head > depth ) {
     103           0 :     return NULL;
     104           0 :   }
     105           0 :   sq->sqe_tail = tail+1U;
     106           0 :   return &sq->sqes[ tail & (depth-1U) ];
     107           0 : }
     108             : 
     109             : /* fd_io_uring_sq_space_left returns the lower bound on the number of
     110             :    free SQEs. */
     111             : 
     112             : static inline uint
     113           0 : fd_io_uring_sq_space_left( fd_io_uring_sq_t * sq ) {
     114           0 :   uint head    = atomic_load_explicit( sq->khead, memory_order_acquire );
     115           0 :   uint pending = sq->sqe_tail - head;
     116           0 :   sq->sqe_head = head;
     117           0 :   return (uint)sq->depth - pending;
     118           0 : }
     119             : 
     120             : /* fd_io_uring_cq_ready returns the lower bound on the number of CQEs
     121             :    not yet received. */
     122             : 
     123             : static inline uint
     124           0 : fd_io_uring_cq_ready( fd_io_uring_cq_t const * cq ) {
     125           0 :   uint tail = atomic_load_explicit( cq->ktail, memory_order_acquire );
     126           0 :   uint head = atomic_load_explicit( cq->khead, memory_order_relaxed );
     127           0 :   return tail - head;
     128           0 : }
     129             : 
     130             : static inline void
     131             : fd_io_uring_cq_advance( fd_io_uring_cq_t * cq,
     132           0 :                         uint               cnt ) {
     133           0 :   uint head = atomic_load_explicit( cq->khead, memory_order_relaxed );
     134           0 :   atomic_store_explicit( cq->khead, head + cnt, memory_order_release );
     135           0 : }
     136             : 
     137             : static inline struct io_uring_cqe *
     138           0 : fd_io_uring_cq_head( fd_io_uring_cq_t const * cq ) {
     139             :   uint head = atomic_load_explicit( cq->khead, memory_order_relaxed );
     140           0 :   return &cq->cqes[ head & (cq->depth - 1U) ];
     141           0 : }
     142             : 
     143             : FD_PROTOTYPES_END
     144             : 
     145             : #endif /* HEADER_fd_src_util_io_uring_fd_io_uring_h */

Generated by: LCOV version 1.14