Line data Source code
1 : #ifndef HEADER_fd_src_waltz_h2_fd_h2_rbuf_h
2 : #define HEADER_fd_src_waltz_h2_fd_h2_rbuf_h
3 :
4 : /* fd_h2_rbuf.h provides a byte oriented unaligend ring buffer. */
5 :
6 : #include "fd_h2_base.h"
7 : #include "../../util/log/fd_log.h"
8 :
9 : struct fd_h2_rbuf {
10 : uchar * buf0; /* points to first byte of buffer */
11 : uchar * buf1; /* points one past last byte of buffer */
12 : uchar * lo; /* in [buf0,buf1) */
13 : uchar * hi; /* in [buf0,buf1) */
14 : ulong lo_off;
15 : ulong hi_off;
16 : ulong bufsz;
17 : };
18 :
19 : FD_PROTOTYPES_BEGIN
20 :
21 : /* fd_h2_rbuf_init initializes an h2_rbuf backed by the given buffer.
22 : On return, h2_rbuf has a read-write interested in buf. bufsz has no
23 : alignment requirements. */
24 :
25 : static inline fd_h2_rbuf_t *
26 : fd_h2_rbuf_init( fd_h2_rbuf_t * rbuf,
27 : void * buf,
28 60 : ulong bufsz ) {
29 60 : *rbuf = (fd_h2_rbuf_t) {
30 60 : .buf0 = (uchar *)buf,
31 60 : .buf1 = (uchar *)buf+bufsz,
32 60 : .lo = (uchar *)buf,
33 60 : .hi = (uchar *)buf,
34 60 : .bufsz = bufsz
35 60 : };
36 60 : return rbuf;
37 60 : }
38 :
39 : /* fd_h2_rbuf_used_sz returns the number of unconsumed bytes in rbuf. */
40 :
41 : FD_FN_PURE static inline ulong
42 260626326 : fd_h2_rbuf_used_sz( fd_h2_rbuf_t const * rbuf ) {
43 260626326 : return rbuf->hi_off - rbuf->lo_off;
44 260626326 : }
45 :
46 : /* fd_h2_rbuf_free_sz returns the number of bytes that can be appended
47 : using fd_h2_rbuf_push. */
48 :
49 : FD_FN_PURE static inline ulong
50 127502553 : fd_h2_rbuf_free_sz( fd_h2_rbuf_t const * rbuf ) {
51 127502553 : long used = (long)fd_h2_rbuf_used_sz( rbuf );
52 127502553 : return (ulong)fd_long_max( 0L, rbuf->buf1 - rbuf->buf0 - used );
53 127502553 : }
54 :
55 : /* fd_h2_rbuf_push appends a series of newly received bytes into rbuf.
56 : Returns chunk_sz.
57 :
58 : WARNING: The caller must not pass a chunk_sz larger than
59 : fd_h2_rbuf_free_sz bytes. */
60 :
61 : static inline void
62 : fd_h2_rbuf_push( fd_h2_rbuf_t * rbuf,
63 : void const * chunk,
64 7502049 : ulong chunk_sz ) {
65 7502049 : uchar * buf0 = rbuf->buf0;
66 7502049 : uchar * buf1 = rbuf->buf1;
67 7502049 : uchar * lo = rbuf->lo;
68 7502049 : uchar * hi = rbuf->hi;
69 7502049 : rbuf->hi_off += chunk_sz;
70 :
71 7502049 : if( FD_UNLIKELY( hi+chunk_sz > rbuf->buf1 ) ) {
72 : /* Split copy */
73 1774617 : if( FD_UNLIKELY( lo>hi ) ) {
74 0 : FD_LOG_CRIT(( "rbuf overflow: buf_sz=%lu lo=%ld hi=%ld chunk_sz=%lu",
75 0 : rbuf->bufsz, rbuf->lo-buf0, rbuf->hi-buf0, chunk_sz ));
76 0 : }
77 1774617 : ulong part1 = (ulong)( buf1-hi );
78 1774617 : ulong part2 = (ulong)( chunk_sz-part1 );
79 1774617 : fd_memcpy( hi, chunk, part1 );
80 1774617 : fd_memcpy( buf0, (void *)( (ulong)chunk+part1 ), part2 );
81 1774617 : rbuf->hi = buf0+part2;
82 1774617 : return;
83 1774617 : }
84 :
85 : /* One-shot copy */
86 5727432 : uchar * new_hi = hi+chunk_sz;
87 5727432 : if( new_hi==buf1 ) new_hi = buf0;
88 5727432 : fd_memcpy( hi, chunk, chunk_sz );
89 5727432 : rbuf->hi = new_hi;
90 5727432 : return;
91 7502049 : }
92 :
93 : /* fd_h2_rbuf_peek_used returns a pointer to the first contiguous
94 : fragment of unconsumed data. *sz is set to the number of contiguous
95 : bytes starting at rbuf->lo. *split_sz is set to the number of bytes
96 : that are unconsumed, but in a separate fragment. The caller may
97 : mangle bytes in [retval,retval+sz) if it consumes these bytes
98 : immediately afterwards. */
99 :
100 : static inline uchar *
101 : fd_h2_rbuf_peek_used( fd_h2_rbuf_t * rbuf,
102 : ulong * sz,
103 13123668 : ulong * split_sz ) {
104 13123668 : ulong used_sz = fd_h2_rbuf_used_sz( rbuf );
105 13123668 : uchar * buf0 = rbuf->buf0;
106 13123668 : uchar * buf1 = rbuf->buf1;
107 13123668 : uchar * lo = rbuf->lo;
108 13123668 : uchar * hi = rbuf->hi;
109 13123668 : uchar * end = lo+used_sz;
110 : /* FIXME make this branchless */
111 13123668 : if( end<=buf1 ) {
112 6747471 : *sz = (ulong)( hi - lo );
113 6747471 : *split_sz = 0UL;
114 6747471 : } else {
115 6376197 : *sz = (ulong)( buf1 - lo );
116 6376197 : *split_sz = (ulong)( hi - buf0 );
117 6376197 : }
118 13123668 : return lo;
119 13123668 : }
120 :
121 : /* fd_h2_rbuf_peek_free is like fd_h2_rbuf_peek_used, but refers to the
122 : free region. */
123 :
124 : static inline uchar *
125 : fd_h2_rbuf_peek_free( fd_h2_rbuf_t * rbuf,
126 : ulong * sz,
127 0 : ulong * split_sz ) {
128 0 : ulong free_sz = fd_h2_rbuf_free_sz( rbuf );
129 0 : uchar * buf0 = rbuf->buf0;
130 0 : uchar * buf1 = rbuf->buf1;
131 0 : uchar * lo = rbuf->lo;
132 0 : uchar * hi = rbuf->hi;
133 0 : uchar * end = hi+free_sz;
134 : /* FIXME make this branchless */
135 0 : if( end<=buf1 ) {
136 0 : *sz = (ulong)( buf1 - hi );
137 0 : *split_sz = 0UL;
138 0 : } else {
139 0 : *sz = (ulong)( buf1 - hi );
140 0 : *split_sz = (ulong)( buf0 - lo );
141 0 : }
142 0 : return hi;
143 0 : }
144 :
145 : /* fd_h2_rbuf_skip frees n bytes from rbuf. Freeing more bytes than
146 : returned by fd_h2_rbuf_used_sz corrupts the buffer state. */
147 :
148 : static inline void
149 : fd_h2_rbuf_skip( fd_h2_rbuf_t * rbuf,
150 13123689 : ulong n ) {
151 13123689 : uchar * lo = rbuf->lo;
152 13123689 : ulong bufsz = rbuf->bufsz;
153 13123689 : uchar * buf1 = rbuf->buf1;
154 13123689 : rbuf->lo_off += n;
155 13123689 : lo += n;
156 13123689 : if( FD_UNLIKELY( lo>=buf1 ) ) {
157 3284442 : lo -= bufsz;
158 3284442 : }
159 13123689 : rbuf->lo = lo;
160 13123689 : }
161 :
162 : /* fd_h2_rbuf_alloc marks the next n free bytes as used. */
163 :
164 : static inline void
165 : fd_h2_rbuf_alloc( fd_h2_rbuf_t * rbuf,
166 0 : ulong n ) {
167 0 : uchar * hi = rbuf->hi;
168 0 : ulong bufsz = rbuf->bufsz;
169 0 : uchar * buf1 = rbuf->buf1;
170 0 : rbuf->hi_off += n;
171 0 : hi += n;
172 0 : if( FD_UNLIKELY( hi>=buf1 ) ) {
173 0 : hi -= bufsz;
174 0 : }
175 0 : rbuf->hi = hi;
176 0 : }
177 :
178 : /* fd_h2_rbuf_pop consumes n bytes from rbuf. n is the number of bytes
179 : to consume. n is assumed to be <= fd_h2_rbuf_used(rbuf). scratch
180 : points to scratch memory with space for n bytes.
181 :
182 : If the bytes are available contiguously in rbuf, returns a pointer to
183 : them. Otherwise, the bytes are copied into scratch. The returned
184 : pointer is valid until the next mutating rbuf operation. */
185 :
186 : static inline uchar *
187 : fd_h2_rbuf_pop( fd_h2_rbuf_t * rbuf,
188 : uchar * scratch,
189 1871985 : ulong n ) {
190 1871985 : uchar * lo = rbuf->lo;
191 1871985 : uchar * buf0 = rbuf->buf0;
192 1871985 : uchar * buf1 = rbuf->buf1;
193 1871985 : ulong bufsz = rbuf->bufsz;
194 1871985 : uchar * ret = lo;
195 1871985 : rbuf->lo_off += n;
196 1871985 : uchar * end = lo+n;
197 1871985 : if( FD_UNLIKELY( (lo+n)>=buf1 ) ) {
198 466119 : end -= bufsz;
199 466119 : }
200 1871985 : if( FD_UNLIKELY( (lo+n)>buf1 ) ) {
201 441582 : ulong part0 = (ulong)( buf1-lo );
202 441582 : ulong part1 = n-part0;
203 441582 : fd_memcpy( scratch, lo, part0 );
204 441582 : fd_memcpy( scratch+part0, buf0, part1 );
205 441582 : ret = scratch;
206 441582 : }
207 1871985 : rbuf->lo = end;
208 1871985 : return ret;
209 1871985 : }
210 :
211 : static inline void
212 : fd_h2_rbuf_pop_copy( fd_h2_rbuf_t * rbuf,
213 : void * out,
214 27 : ulong n ) {
215 27 : uchar * lo = rbuf->lo;
216 27 : uchar * buf0 = rbuf->buf0;
217 27 : uchar * buf1 = rbuf->buf1;
218 27 : ulong bufsz = rbuf->bufsz;
219 27 : rbuf->lo_off += n;
220 27 : uchar * end = lo+n;
221 27 : if( FD_UNLIKELY( (lo+n)>=buf1 ) ) {
222 3 : end -= bufsz;
223 3 : }
224 27 : if( FD_UNLIKELY( (lo+n)>buf1 ) ) {
225 0 : ulong part0 = (ulong)( buf1-lo );
226 0 : ulong part1 = n-part0;
227 0 : fd_memcpy( out, lo, part0 );
228 0 : fd_memcpy( (void *)( (ulong)out+part0 ), buf0, part1 );
229 27 : } else {
230 27 : fd_memcpy( out, lo, n );
231 27 : }
232 27 : rbuf->lo = end;
233 27 : }
234 :
235 : FD_FN_PURE static inline int
236 0 : fd_h2_rbuf_is_empty( fd_h2_rbuf_t const * rbuf ) {
237 0 : return rbuf->lo_off==rbuf->hi_off;
238 0 : }
239 :
240 : FD_PROTOTYPES_END
241 :
242 : #endif /* HEADER_fd_src_waltz_h2_fd_h2_rbuf_h */
|