Line data Source code
1 : #ifndef HEADER_fd_src_waltz_h2_fd_h2_rbuf_h
2 : #define HEADER_fd_src_waltz_h2_fd_h2_rbuf_h
3 :
4 : /* fd_h2_rbuf.h provides a byte oriented unaligend ring buffer. */
5 :
6 : #include "fd_h2_base.h"
7 : #include "../../util/log/fd_log.h"
8 :
9 : struct fd_h2_rbuf {
10 : uchar * buf0; /* points to first byte of buffer */
11 : uchar * buf1; /* points one past last byte of buffer */
12 : uchar * lo; /* in [buf0,buf1) */
13 : uchar * hi; /* in [buf0,buf1) */
14 : ulong lo_off;
15 : ulong hi_off;
16 : ulong bufsz;
17 : };
18 :
19 : FD_PROTOTYPES_BEGIN
20 :
21 : /* fd_h2_rbuf_init initializes an h2_rbuf backed by the given buffer.
22 : On return, h2_rbuf has a read-write interested in buf. bufsz has no
23 : alignment requirements. */
24 :
25 : static inline fd_h2_rbuf_t *
26 : fd_h2_rbuf_init( fd_h2_rbuf_t * rbuf,
27 : void * buf,
28 0 : ulong bufsz ) {
29 0 : *rbuf = (fd_h2_rbuf_t) {
30 0 : .buf0 = (uchar *)buf,
31 0 : .buf1 = (uchar *)buf+bufsz,
32 0 : .lo = (uchar *)buf,
33 0 : .hi = (uchar *)buf,
34 0 : .bufsz = bufsz
35 0 : };
36 0 : return rbuf;
37 0 : }
38 :
39 : /* fd_h2_rbuf_used_sz returns the number of unconsumed bytes in rbuf. */
40 :
41 : FD_FN_PURE static inline ulong
42 0 : fd_h2_rbuf_used_sz( fd_h2_rbuf_t const * rbuf ) {
43 0 : return rbuf->hi_off - rbuf->lo_off;
44 0 : }
45 :
46 : /* fd_h2_rbuf_free_sz returns the number of bytes that can be appended
47 : using fd_h2_rbuf_push. */
48 :
49 : FD_FN_PURE static inline ulong
50 0 : fd_h2_rbuf_free_sz( fd_h2_rbuf_t const * rbuf ) {
51 0 : long used = (long)fd_h2_rbuf_used_sz( rbuf );
52 0 : return (ulong)fd_long_max( 0L, rbuf->buf1 - rbuf->buf0 - used );
53 0 : }
54 :
55 : /* fd_h2_rbuf_push appends a series of newly received bytes into rbuf.
56 : Returns chunk_sz.
57 :
58 : WARNING: The caller must not pass a chunk_sz larger than
59 : fd_h2_rbuf_free_sz bytes. */
60 :
61 : static inline void
62 : fd_h2_rbuf_push( fd_h2_rbuf_t * rbuf,
63 : void const * chunk,
64 0 : ulong chunk_sz ) {
65 0 : uchar * buf0 = rbuf->buf0;
66 0 : uchar * buf1 = rbuf->buf1;
67 0 : uchar * lo = rbuf->lo;
68 0 : uchar * hi = rbuf->hi;
69 0 : rbuf->hi_off += chunk_sz;
70 :
71 0 : if( FD_UNLIKELY( hi+chunk_sz > rbuf->buf1 ) ) {
72 : /* Split copy */
73 0 : if( FD_UNLIKELY( lo>hi ) ) {
74 0 : FD_LOG_CRIT(( "rbuf overflow: buf_sz=%lu lo=%ld hi=%ld chunk_sz=%lu",
75 0 : rbuf->bufsz, rbuf->lo-buf0, rbuf->hi-buf0, chunk_sz ));
76 0 : }
77 0 : ulong part1 = (ulong)( buf1-hi );
78 0 : ulong part2 = (ulong)( chunk_sz-part1 );
79 0 : fd_memcpy( hi, chunk, part1 );
80 0 : fd_memcpy( buf0, (void *)( (ulong)chunk+part1 ), part2 );
81 0 : rbuf->hi = buf0+part2;
82 0 : return;
83 0 : }
84 :
85 : /* One-shot copy */
86 0 : uchar * new_hi = hi+chunk_sz;
87 0 : if( new_hi==buf1 ) new_hi = buf0;
88 0 : fd_memcpy( hi, chunk, chunk_sz );
89 0 : rbuf->hi = new_hi;
90 0 : return;
91 0 : }
92 :
93 : /* fd_h2_rbuf_peek_used returns a pointer to the first contiguous
94 : fragment of unconsumed data. *sz is set to the number of contiguous
95 : bytes starting at rbuf->lo. *split_sz is set to the number of bytes
96 : that are unconsumed, but in a separate fragment. The caller may
97 : mangle bytes in [retval,retval+sz) if it consumes these bytes
98 : immediately afterwards. */
99 :
100 : static inline uchar *
101 : fd_h2_rbuf_peek_used( fd_h2_rbuf_t * rbuf,
102 : ulong * sz,
103 0 : ulong * split_sz ) {
104 0 : ulong used_sz = fd_h2_rbuf_used_sz( rbuf );
105 0 : uchar * buf0 = rbuf->buf0;
106 0 : uchar * buf1 = rbuf->buf1;
107 0 : uchar * lo = rbuf->lo;
108 0 : uchar * hi = rbuf->hi;
109 0 : uchar * end = lo+used_sz;
110 : /* FIXME make this branchless */
111 0 : if( end<=buf1 ) {
112 0 : *sz = (ulong)( hi - lo );
113 0 : *split_sz = 0UL;
114 0 : } else {
115 0 : *sz = (ulong)( buf1 - lo );
116 0 : *split_sz = (ulong)( hi - buf0 );
117 0 : }
118 0 : return lo;
119 0 : }
120 :
121 : /* fd_h2_rbuf_peek_free is like fd_h2_rbuf_peek_used, but refers to the
122 : free region. */
123 :
124 : static inline uchar *
125 : fd_h2_rbuf_peek_free( fd_h2_rbuf_t * rbuf,
126 : ulong * sz,
127 0 : ulong * split_sz ) {
128 0 : ulong free_sz = fd_h2_rbuf_free_sz( rbuf );
129 0 : uchar * buf0 = rbuf->buf0;
130 0 : uchar * buf1 = rbuf->buf1;
131 0 : uchar * lo = rbuf->lo;
132 0 : uchar * hi = rbuf->hi;
133 0 : uchar * end = hi+free_sz;
134 : /* FIXME make this branchless */
135 0 : if( end<=buf1 ) {
136 0 : *sz = (ulong)( buf1 - hi );
137 0 : *split_sz = 0UL;
138 0 : } else {
139 0 : *sz = (ulong)( buf1 - hi );
140 0 : *split_sz = (ulong)( buf0 - lo );
141 0 : }
142 0 : return hi;
143 0 : }
144 :
145 : /* fd_h2_rbuf_skip frees n bytes from rbuf. Freeing more bytes than
146 : returned by fd_h2_rbuf_used_sz corrupts the buffer state. */
147 :
148 : static inline void
149 : fd_h2_rbuf_skip( fd_h2_rbuf_t * rbuf,
150 0 : ulong n ) {
151 0 : uchar * lo = rbuf->lo;
152 0 : ulong bufsz = rbuf->bufsz;
153 0 : uchar * buf1 = rbuf->buf1;
154 0 : rbuf->lo_off += n;
155 0 : lo += n;
156 0 : if( FD_UNLIKELY( lo>=buf1 ) ) {
157 0 : lo -= bufsz;
158 0 : }
159 0 : rbuf->lo = lo;
160 0 : }
161 :
162 : /* fd_h2_rbuf_alloc marks the next n free bytes as used. */
163 :
164 : static inline void
165 : fd_h2_rbuf_alloc( fd_h2_rbuf_t * rbuf,
166 0 : ulong n ) {
167 0 : uchar * hi = rbuf->hi;
168 0 : ulong bufsz = rbuf->bufsz;
169 0 : uchar * buf1 = rbuf->buf1;
170 0 : rbuf->hi_off += n;
171 0 : hi += n;
172 0 : if( FD_UNLIKELY( hi>=buf1 ) ) {
173 0 : hi -= bufsz;
174 0 : }
175 0 : rbuf->hi = hi;
176 0 : }
177 :
178 : /* fd_h2_rbuf_pop consumes n bytes from rbuf. n is the number of bytes
179 : to consume. n is assumed to be <= fd_h2_rbuf_used(rbuf). scratch
180 : points to scratch memory with space for n bytes.
181 :
182 : If the bytes are available contiguously in rbuf, returns a pointer to
183 : them. Otherwise, the bytes are copied into scratch. The returned
184 : pointer is valid until the next mutating rbuf operation. */
185 :
186 : static inline uchar *
187 : fd_h2_rbuf_pop( fd_h2_rbuf_t * rbuf,
188 : uchar * scratch,
189 0 : ulong n ) {
190 0 : uchar * lo = rbuf->lo;
191 0 : uchar * buf0 = rbuf->buf0;
192 0 : uchar * buf1 = rbuf->buf1;
193 0 : ulong bufsz = rbuf->bufsz;
194 0 : uchar * ret = lo;
195 0 : rbuf->lo_off += n;
196 0 : uchar * end = lo+n;
197 0 : if( FD_UNLIKELY( (lo+n)>=buf1 ) ) {
198 0 : end -= bufsz;
199 0 : }
200 0 : if( FD_UNLIKELY( (lo+n)>buf1 ) ) {
201 0 : ulong part0 = (ulong)( buf1-lo );
202 0 : ulong part1 = n-part0;
203 0 : fd_memcpy( scratch, lo, part0 );
204 0 : fd_memcpy( scratch+part0, buf0, part1 );
205 0 : ret = scratch;
206 0 : }
207 0 : rbuf->lo = end;
208 0 : return ret;
209 0 : }
210 :
211 : static inline void
212 : fd_h2_rbuf_pop_copy( fd_h2_rbuf_t * rbuf,
213 : void * out,
214 0 : ulong n ) {
215 0 : uchar * lo = rbuf->lo;
216 0 : uchar * buf0 = rbuf->buf0;
217 0 : uchar * buf1 = rbuf->buf1;
218 0 : ulong bufsz = rbuf->bufsz;
219 0 : rbuf->lo_off += n;
220 0 : uchar * end = lo+n;
221 0 : if( FD_UNLIKELY( (lo+n)>=buf1 ) ) {
222 0 : end -= bufsz;
223 0 : }
224 0 : if( FD_UNLIKELY( (lo+n)>buf1 ) ) {
225 0 : ulong part0 = (ulong)( buf1-lo );
226 0 : ulong part1 = n-part0;
227 0 : fd_memcpy( out, lo, part0 );
228 0 : fd_memcpy( (void *)( (ulong)out+part0 ), buf0, part1 );
229 0 : } else {
230 0 : fd_memcpy( out, lo, n );
231 0 : }
232 0 : rbuf->lo = end;
233 0 : }
234 :
235 : FD_FN_PURE static inline int
236 0 : fd_h2_rbuf_is_empty( fd_h2_rbuf_t const * rbuf ) {
237 0 : return rbuf->lo_off==rbuf->hi_off;
238 0 : }
239 :
240 : FD_PROTOTYPES_END
241 :
242 : #endif /* HEADER_fd_src_waltz_h2_fd_h2_rbuf_h */
|