Line | Count | Source (jump to first uncovered line) |
1 | | /*************************************************************************** |
2 | | * _ _ ____ _ |
3 | | * Project ___| | | | _ \| | |
4 | | * / __| | | | |_) | | |
5 | | * | (__| |_| | _ <| |___ |
6 | | * \___|\___/|_| \_\_____| |
7 | | * |
8 | | * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. |
9 | | * |
10 | | * This software is licensed as described in the file COPYING, which |
11 | | * you should have received as part of this distribution. The terms |
12 | | * are also available at https://curl.se/docs/copyright.html. |
13 | | * |
14 | | * You may opt to use, copy, modify, merge, publish, distribute and/or sell |
15 | | * copies of the Software, and permit persons to whom the Software is |
16 | | * furnished to do so, under the terms of the COPYING file. |
17 | | * |
18 | | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
19 | | * KIND, either express or implied. |
20 | | * |
21 | | * SPDX-License-Identifier: curl |
22 | | * |
23 | | ***************************************************************************/ |
24 | | |
25 | | #include "curl_setup.h" |
26 | | #include "bufq.h" |
27 | | |
28 | | /* The last 3 #include files should be in this order */ |
29 | | #include "curl_printf.h" |
30 | | #include "curl_memory.h" |
31 | | #include "memdebug.h" |
32 | | |
33 | | static bool chunk_is_empty(const struct buf_chunk *chunk) |
34 | 0 | { |
35 | 0 | return chunk->r_offset >= chunk->w_offset; |
36 | 0 | } |
37 | | |
38 | | static bool chunk_is_full(const struct buf_chunk *chunk) |
39 | 0 | { |
40 | 0 | return chunk->w_offset >= chunk->dlen; |
41 | 0 | } |
42 | | |
43 | | static size_t chunk_len(const struct buf_chunk *chunk) |
44 | 0 | { |
45 | 0 | return chunk->w_offset - chunk->r_offset; |
46 | 0 | } |
47 | | |
48 | | static size_t chunk_space(const struct buf_chunk *chunk) |
49 | 0 | { |
50 | 0 | return chunk->dlen - chunk->w_offset; |
51 | 0 | } |
52 | | |
53 | | static void chunk_reset(struct buf_chunk *chunk) |
54 | 0 | { |
55 | 0 | chunk->next = NULL; |
56 | 0 | chunk->r_offset = chunk->w_offset = 0; |
57 | 0 | } |
58 | | |
59 | | static size_t chunk_append(struct buf_chunk *chunk, |
60 | | const unsigned char *buf, size_t len) |
61 | 0 | { |
62 | 0 | unsigned char *p = &chunk->x.data[chunk->w_offset]; |
63 | 0 | size_t n = chunk->dlen - chunk->w_offset; |
64 | 0 | DEBUGASSERT(chunk->dlen >= chunk->w_offset); |
65 | 0 | if(n) { |
66 | 0 | n = CURLMIN(n, len); |
67 | 0 | memcpy(p, buf, n); |
68 | 0 | chunk->w_offset += n; |
69 | 0 | } |
70 | 0 | return n; |
71 | 0 | } |
72 | | |
73 | | static size_t chunk_read(struct buf_chunk *chunk, |
74 | | unsigned char *buf, size_t len) |
75 | 0 | { |
76 | 0 | unsigned char *p = &chunk->x.data[chunk->r_offset]; |
77 | 0 | size_t n = chunk->w_offset - chunk->r_offset; |
78 | 0 | DEBUGASSERT(chunk->w_offset >= chunk->r_offset); |
79 | 0 | if(!n) { |
80 | 0 | return 0; |
81 | 0 | } |
82 | 0 | else if(n <= len) { |
83 | 0 | memcpy(buf, p, n); |
84 | 0 | chunk->r_offset = chunk->w_offset = 0; |
85 | 0 | return n; |
86 | 0 | } |
87 | 0 | else { |
88 | 0 | memcpy(buf, p, len); |
89 | 0 | chunk->r_offset += len; |
90 | 0 | return len; |
91 | 0 | } |
92 | 0 | } |
93 | | |
94 | | static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len, |
95 | | Curl_bufq_reader *reader, |
96 | | void *reader_ctx, CURLcode *err) |
97 | 0 | { |
98 | 0 | unsigned char *p = &chunk->x.data[chunk->w_offset]; |
99 | 0 | size_t n = chunk->dlen - chunk->w_offset; /* free amount */ |
100 | 0 | ssize_t nread; |
101 | |
|
102 | 0 | DEBUGASSERT(chunk->dlen >= chunk->w_offset); |
103 | 0 | if(!n) { |
104 | 0 | *err = CURLE_AGAIN; |
105 | 0 | return -1; |
106 | 0 | } |
107 | 0 | if(max_len && n > max_len) |
108 | 0 | n = max_len; |
109 | 0 | nread = reader(reader_ctx, p, n, err); |
110 | 0 | if(nread > 0) { |
111 | 0 | DEBUGASSERT((size_t)nread <= n); |
112 | 0 | chunk->w_offset += nread; |
113 | 0 | } |
114 | 0 | return nread; |
115 | 0 | } |
116 | | |
117 | | static void chunk_peek(const struct buf_chunk *chunk, |
118 | | const unsigned char **pbuf, size_t *plen) |
119 | 0 | { |
120 | 0 | DEBUGASSERT(chunk->w_offset >= chunk->r_offset); |
121 | 0 | *pbuf = &chunk->x.data[chunk->r_offset]; |
122 | 0 | *plen = chunk->w_offset - chunk->r_offset; |
123 | 0 | } |
124 | | |
125 | | static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset, |
126 | | const unsigned char **pbuf, size_t *plen) |
127 | 0 | { |
128 | 0 | offset += chunk->r_offset; |
129 | 0 | DEBUGASSERT(chunk->w_offset >= offset); |
130 | 0 | *pbuf = &chunk->x.data[offset]; |
131 | 0 | *plen = chunk->w_offset - offset; |
132 | 0 | } |
133 | | |
134 | | static size_t chunk_skip(struct buf_chunk *chunk, size_t amount) |
135 | 0 | { |
136 | 0 | size_t n = chunk->w_offset - chunk->r_offset; |
137 | 0 | DEBUGASSERT(chunk->w_offset >= chunk->r_offset); |
138 | 0 | if(n) { |
139 | 0 | n = CURLMIN(n, amount); |
140 | 0 | chunk->r_offset += n; |
141 | 0 | if(chunk->r_offset == chunk->w_offset) |
142 | 0 | chunk->r_offset = chunk->w_offset = 0; |
143 | 0 | } |
144 | 0 | return n; |
145 | 0 | } |
146 | | |
147 | | static void chunk_shift(struct buf_chunk *chunk) |
148 | 0 | { |
149 | 0 | if(chunk->r_offset) { |
150 | 0 | if(!chunk_is_empty(chunk)) { |
151 | 0 | size_t n = chunk->w_offset - chunk->r_offset; |
152 | 0 | memmove(chunk->x.data, chunk->x.data + chunk->r_offset, n); |
153 | 0 | chunk->w_offset -= chunk->r_offset; |
154 | 0 | chunk->r_offset = 0; |
155 | 0 | } |
156 | 0 | else { |
157 | 0 | chunk->r_offset = chunk->w_offset = 0; |
158 | 0 | } |
159 | 0 | } |
160 | 0 | } |
161 | | |
162 | | static void chunk_list_free(struct buf_chunk **anchor) |
163 | 0 | { |
164 | 0 | struct buf_chunk *chunk; |
165 | 0 | while(*anchor) { |
166 | 0 | chunk = *anchor; |
167 | 0 | *anchor = chunk->next; |
168 | 0 | free(chunk); |
169 | 0 | } |
170 | 0 | } |
171 | | |
172 | | |
173 | | |
174 | | void Curl_bufcp_init(struct bufc_pool *pool, |
175 | | size_t chunk_size, size_t spare_max) |
176 | 0 | { |
177 | 0 | DEBUGASSERT(chunk_size > 0); |
178 | 0 | DEBUGASSERT(spare_max > 0); |
179 | 0 | memset(pool, 0, sizeof(*pool)); |
180 | 0 | pool->chunk_size = chunk_size; |
181 | 0 | pool->spare_max = spare_max; |
182 | 0 | } |
183 | | |
184 | | static CURLcode bufcp_take(struct bufc_pool *pool, |
185 | | struct buf_chunk **pchunk) |
186 | 0 | { |
187 | 0 | struct buf_chunk *chunk = NULL; |
188 | |
|
189 | 0 | if(pool->spare) { |
190 | 0 | chunk = pool->spare; |
191 | 0 | pool->spare = chunk->next; |
192 | 0 | --pool->spare_count; |
193 | 0 | chunk_reset(chunk); |
194 | 0 | *pchunk = chunk; |
195 | 0 | return CURLE_OK; |
196 | 0 | } |
197 | | |
198 | 0 | chunk = calloc(1, sizeof(*chunk) + pool->chunk_size); |
199 | 0 | if(!chunk) { |
200 | 0 | *pchunk = NULL; |
201 | 0 | return CURLE_OUT_OF_MEMORY; |
202 | 0 | } |
203 | 0 | chunk->dlen = pool->chunk_size; |
204 | 0 | *pchunk = chunk; |
205 | 0 | return CURLE_OK; |
206 | 0 | } |
207 | | |
208 | | static void bufcp_put(struct bufc_pool *pool, |
209 | | struct buf_chunk *chunk) |
210 | 0 | { |
211 | 0 | if(pool->spare_count >= pool->spare_max) { |
212 | 0 | free(chunk); |
213 | 0 | } |
214 | 0 | else { |
215 | 0 | chunk_reset(chunk); |
216 | 0 | chunk->next = pool->spare; |
217 | 0 | pool->spare = chunk; |
218 | 0 | ++pool->spare_count; |
219 | 0 | } |
220 | 0 | } |
221 | | |
222 | | void Curl_bufcp_free(struct bufc_pool *pool) |
223 | 0 | { |
224 | 0 | chunk_list_free(&pool->spare); |
225 | 0 | pool->spare_count = 0; |
226 | 0 | } |
227 | | |
228 | | static void bufq_init(struct bufq *q, struct bufc_pool *pool, |
229 | | size_t chunk_size, size_t max_chunks, int opts) |
230 | 0 | { |
231 | 0 | DEBUGASSERT(chunk_size > 0); |
232 | 0 | DEBUGASSERT(max_chunks > 0); |
233 | 0 | memset(q, 0, sizeof(*q)); |
234 | 0 | q->chunk_size = chunk_size; |
235 | 0 | q->max_chunks = max_chunks; |
236 | 0 | q->pool = pool; |
237 | 0 | q->opts = opts; |
238 | 0 | } |
239 | | |
240 | | void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks, |
241 | | int opts) |
242 | 0 | { |
243 | 0 | bufq_init(q, NULL, chunk_size, max_chunks, opts); |
244 | 0 | } |
245 | | |
246 | | void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks) |
247 | 0 | { |
248 | 0 | bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE); |
249 | 0 | } |
250 | | |
251 | | void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool, |
252 | | size_t max_chunks, int opts) |
253 | 0 | { |
254 | 0 | bufq_init(q, pool, pool->chunk_size, max_chunks, opts); |
255 | 0 | } |
256 | | |
257 | | void Curl_bufq_free(struct bufq *q) |
258 | 0 | { |
259 | 0 | chunk_list_free(&q->head); |
260 | 0 | chunk_list_free(&q->spare); |
261 | 0 | q->tail = NULL; |
262 | 0 | q->chunk_count = 0; |
263 | 0 | } |
264 | | |
265 | | void Curl_bufq_reset(struct bufq *q) |
266 | 0 | { |
267 | 0 | struct buf_chunk *chunk; |
268 | 0 | while(q->head) { |
269 | 0 | chunk = q->head; |
270 | 0 | q->head = chunk->next; |
271 | 0 | chunk->next = q->spare; |
272 | 0 | q->spare = chunk; |
273 | 0 | } |
274 | 0 | q->tail = NULL; |
275 | 0 | } |
276 | | |
277 | | size_t Curl_bufq_len(const struct bufq *q) |
278 | 0 | { |
279 | 0 | const struct buf_chunk *chunk = q->head; |
280 | 0 | size_t len = 0; |
281 | 0 | while(chunk) { |
282 | 0 | len += chunk_len(chunk); |
283 | 0 | chunk = chunk->next; |
284 | 0 | } |
285 | 0 | return len; |
286 | 0 | } |
287 | | |
288 | | size_t Curl_bufq_space(const struct bufq *q) |
289 | 0 | { |
290 | 0 | size_t space = 0; |
291 | 0 | if(q->tail) |
292 | 0 | space += chunk_space(q->tail); |
293 | 0 | if(q->spare) { |
294 | 0 | struct buf_chunk *chunk = q->spare; |
295 | 0 | while(chunk) { |
296 | 0 | space += chunk->dlen; |
297 | 0 | chunk = chunk->next; |
298 | 0 | } |
299 | 0 | } |
300 | 0 | if(q->chunk_count < q->max_chunks) { |
301 | 0 | space += (q->max_chunks - q->chunk_count) * q->chunk_size; |
302 | 0 | } |
303 | 0 | return space; |
304 | 0 | } |
305 | | |
306 | | bool Curl_bufq_is_empty(const struct bufq *q) |
307 | 0 | { |
308 | 0 | return !q->head || chunk_is_empty(q->head); |
309 | 0 | } |
310 | | |
311 | | bool Curl_bufq_is_full(const struct bufq *q) |
312 | 0 | { |
313 | 0 | if(!q->tail || q->spare) |
314 | 0 | return FALSE; |
315 | 0 | if(q->chunk_count < q->max_chunks) |
316 | 0 | return FALSE; |
317 | 0 | if(q->chunk_count > q->max_chunks) |
318 | 0 | return TRUE; |
319 | | /* we have no spares and cannot make more, is the tail full? */ |
320 | 0 | return chunk_is_full(q->tail); |
321 | 0 | } |
322 | | |
323 | | static struct buf_chunk *get_spare(struct bufq *q) |
324 | 0 | { |
325 | 0 | struct buf_chunk *chunk = NULL; |
326 | |
|
327 | 0 | if(q->spare) { |
328 | 0 | chunk = q->spare; |
329 | 0 | q->spare = chunk->next; |
330 | 0 | chunk_reset(chunk); |
331 | 0 | return chunk; |
332 | 0 | } |
333 | | |
334 | 0 | if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT))) |
335 | 0 | return NULL; |
336 | | |
337 | 0 | if(q->pool) { |
338 | 0 | if(bufcp_take(q->pool, &chunk)) |
339 | 0 | return NULL; |
340 | 0 | ++q->chunk_count; |
341 | 0 | return chunk; |
342 | 0 | } |
343 | 0 | else { |
344 | 0 | chunk = calloc(1, sizeof(*chunk) + q->chunk_size); |
345 | 0 | if(!chunk) |
346 | 0 | return NULL; |
347 | 0 | chunk->dlen = q->chunk_size; |
348 | 0 | ++q->chunk_count; |
349 | 0 | return chunk; |
350 | 0 | } |
351 | 0 | } |
352 | | |
353 | | static void prune_head(struct bufq *q) |
354 | 0 | { |
355 | 0 | struct buf_chunk *chunk; |
356 | |
|
357 | 0 | while(q->head && chunk_is_empty(q->head)) { |
358 | 0 | chunk = q->head; |
359 | 0 | q->head = chunk->next; |
360 | 0 | if(q->tail == chunk) |
361 | 0 | q->tail = q->head; |
362 | 0 | if(q->pool) { |
363 | 0 | bufcp_put(q->pool, chunk); |
364 | 0 | --q->chunk_count; |
365 | 0 | } |
366 | 0 | else if((q->chunk_count > q->max_chunks) || |
367 | 0 | (q->opts & BUFQ_OPT_NO_SPARES)) { |
368 | | /* SOFT_LIMIT allowed us more than max. free spares until |
369 | | * we are at max again. Or free them if we are configured |
370 | | * to not use spares. */ |
371 | 0 | free(chunk); |
372 | 0 | --q->chunk_count; |
373 | 0 | } |
374 | 0 | else { |
375 | 0 | chunk->next = q->spare; |
376 | 0 | q->spare = chunk; |
377 | 0 | } |
378 | 0 | } |
379 | 0 | } |
380 | | |
381 | | static struct buf_chunk *get_non_full_tail(struct bufq *q) |
382 | 0 | { |
383 | 0 | struct buf_chunk *chunk; |
384 | |
|
385 | 0 | if(q->tail && !chunk_is_full(q->tail)) |
386 | 0 | return q->tail; |
387 | 0 | chunk = get_spare(q); |
388 | 0 | if(chunk) { |
389 | | /* new tail, and possibly new head */ |
390 | 0 | if(q->tail) { |
391 | 0 | q->tail->next = chunk; |
392 | 0 | q->tail = chunk; |
393 | 0 | } |
394 | 0 | else { |
395 | 0 | DEBUGASSERT(!q->head); |
396 | 0 | q->head = q->tail = chunk; |
397 | 0 | } |
398 | 0 | } |
399 | 0 | return chunk; |
400 | 0 | } |
401 | | |
402 | | ssize_t Curl_bufq_write(struct bufq *q, |
403 | | const unsigned char *buf, size_t len, |
404 | | CURLcode *err) |
405 | 0 | { |
406 | 0 | struct buf_chunk *tail; |
407 | 0 | ssize_t nwritten = 0; |
408 | 0 | size_t n; |
409 | |
|
410 | 0 | DEBUGASSERT(q->max_chunks > 0); |
411 | 0 | while(len) { |
412 | 0 | tail = get_non_full_tail(q); |
413 | 0 | if(!tail) { |
414 | 0 | if(q->chunk_count < q->max_chunks) { |
415 | 0 | *err = CURLE_OUT_OF_MEMORY; |
416 | 0 | return -1; |
417 | 0 | } |
418 | 0 | break; |
419 | 0 | } |
420 | 0 | n = chunk_append(tail, buf, len); |
421 | 0 | if(!n) |
422 | 0 | break; |
423 | 0 | nwritten += n; |
424 | 0 | buf += n; |
425 | 0 | len -= n; |
426 | 0 | } |
427 | 0 | if(nwritten == 0 && len) { |
428 | 0 | *err = CURLE_AGAIN; |
429 | 0 | return -1; |
430 | 0 | } |
431 | 0 | *err = CURLE_OK; |
432 | 0 | return nwritten; |
433 | 0 | } |
434 | | |
435 | | ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len, |
436 | | CURLcode *err) |
437 | 0 | { |
438 | 0 | ssize_t nread = 0; |
439 | 0 | size_t n; |
440 | |
|
441 | 0 | *err = CURLE_OK; |
442 | 0 | while(len && q->head) { |
443 | 0 | n = chunk_read(q->head, buf, len); |
444 | 0 | if(n) { |
445 | 0 | nread += n; |
446 | 0 | buf += n; |
447 | 0 | len -= n; |
448 | 0 | } |
449 | 0 | prune_head(q); |
450 | 0 | } |
451 | 0 | if(nread == 0) { |
452 | 0 | *err = CURLE_AGAIN; |
453 | 0 | return -1; |
454 | 0 | } |
455 | 0 | return nread; |
456 | 0 | } |
457 | | |
458 | | bool Curl_bufq_peek(struct bufq *q, |
459 | | const unsigned char **pbuf, size_t *plen) |
460 | 0 | { |
461 | 0 | if(q->head && chunk_is_empty(q->head)) { |
462 | 0 | prune_head(q); |
463 | 0 | } |
464 | 0 | if(q->head && !chunk_is_empty(q->head)) { |
465 | 0 | chunk_peek(q->head, pbuf, plen); |
466 | 0 | return TRUE; |
467 | 0 | } |
468 | 0 | *pbuf = NULL; |
469 | 0 | *plen = 0; |
470 | 0 | return FALSE; |
471 | 0 | } |
472 | | |
473 | | bool Curl_bufq_peek_at(struct bufq *q, size_t offset, |
474 | | const unsigned char **pbuf, size_t *plen) |
475 | 0 | { |
476 | 0 | struct buf_chunk *c = q->head; |
477 | 0 | size_t clen; |
478 | |
|
479 | 0 | while(c) { |
480 | 0 | clen = chunk_len(c); |
481 | 0 | if(!clen) |
482 | 0 | break; |
483 | 0 | if(offset >= clen) { |
484 | 0 | offset -= clen; |
485 | 0 | c = c->next; |
486 | 0 | continue; |
487 | 0 | } |
488 | 0 | chunk_peek_at(c, offset, pbuf, plen); |
489 | 0 | return TRUE; |
490 | 0 | } |
491 | 0 | *pbuf = NULL; |
492 | 0 | *plen = 0; |
493 | 0 | return FALSE; |
494 | 0 | } |
495 | | |
496 | | void Curl_bufq_skip(struct bufq *q, size_t amount) |
497 | 0 | { |
498 | 0 | size_t n; |
499 | |
|
500 | 0 | while(amount && q->head) { |
501 | 0 | n = chunk_skip(q->head, amount); |
502 | 0 | amount -= n; |
503 | 0 | prune_head(q); |
504 | 0 | } |
505 | 0 | } |
506 | | |
507 | | void Curl_bufq_skip_and_shift(struct bufq *q, size_t amount) |
508 | 0 | { |
509 | 0 | Curl_bufq_skip(q, amount); |
510 | 0 | if(q->tail) |
511 | 0 | chunk_shift(q->tail); |
512 | 0 | } |
513 | | |
514 | | ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer, |
515 | | void *writer_ctx, CURLcode *err) |
516 | 0 | { |
517 | 0 | const unsigned char *buf; |
518 | 0 | size_t blen; |
519 | 0 | ssize_t nwritten = 0; |
520 | |
|
521 | 0 | while(Curl_bufq_peek(q, &buf, &blen)) { |
522 | 0 | ssize_t chunk_written; |
523 | |
|
524 | 0 | chunk_written = writer(writer_ctx, buf, blen, err); |
525 | 0 | if(chunk_written < 0) { |
526 | 0 | if(!nwritten || *err != CURLE_AGAIN) { |
527 | | /* blocked on first write or real error, fail */ |
528 | 0 | nwritten = -1; |
529 | 0 | } |
530 | 0 | break; |
531 | 0 | } |
532 | 0 | if(!chunk_written) { |
533 | 0 | if(!nwritten) { |
534 | | /* treat as blocked */ |
535 | 0 | *err = CURLE_AGAIN; |
536 | 0 | nwritten = -1; |
537 | 0 | } |
538 | 0 | break; |
539 | 0 | } |
540 | 0 | Curl_bufq_skip(q, (size_t)chunk_written); |
541 | 0 | nwritten += chunk_written; |
542 | 0 | } |
543 | 0 | return nwritten; |
544 | 0 | } |
545 | | |
546 | | ssize_t Curl_bufq_write_pass(struct bufq *q, |
547 | | const unsigned char *buf, size_t len, |
548 | | Curl_bufq_writer *writer, void *writer_ctx, |
549 | | CURLcode *err) |
550 | 0 | { |
551 | 0 | ssize_t nwritten = 0, n; |
552 | |
|
553 | 0 | *err = CURLE_OK; |
554 | 0 | while(len) { |
555 | 0 | if(Curl_bufq_is_full(q)) { |
556 | | /* try to make room in case we are full */ |
557 | 0 | n = Curl_bufq_pass(q, writer, writer_ctx, err); |
558 | 0 | if(n < 0) { |
559 | 0 | if(*err != CURLE_AGAIN) { |
560 | | /* real error, fail */ |
561 | 0 | return -1; |
562 | 0 | } |
563 | | /* would block, bufq is full, give up */ |
564 | 0 | break; |
565 | 0 | } |
566 | 0 | } |
567 | | |
568 | | /* Add whatever is remaining now to bufq */ |
569 | 0 | n = Curl_bufq_write(q, buf, len, err); |
570 | 0 | if(n < 0) { |
571 | 0 | if(*err != CURLE_AGAIN) { |
572 | | /* real error, fail */ |
573 | 0 | return -1; |
574 | 0 | } |
575 | | /* no room in bufq */ |
576 | 0 | break; |
577 | 0 | } |
578 | | /* edge case of writer returning 0 (and len is >0) |
579 | | * break or we might enter an infinite loop here */ |
580 | 0 | if(n == 0) |
581 | 0 | break; |
582 | | |
583 | | /* Maybe only part of `data` has been added, continue to loop */ |
584 | 0 | buf += (size_t)n; |
585 | 0 | len -= (size_t)n; |
586 | 0 | nwritten += (size_t)n; |
587 | 0 | } |
588 | | |
589 | 0 | if(!nwritten && len) { |
590 | 0 | *err = CURLE_AGAIN; |
591 | 0 | return -1; |
592 | 0 | } |
593 | 0 | return nwritten; |
594 | 0 | } |
595 | | |
596 | | ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len, |
597 | | Curl_bufq_reader *reader, void *reader_ctx, |
598 | | CURLcode *err) |
599 | 0 | { |
600 | 0 | struct buf_chunk *tail = NULL; |
601 | 0 | ssize_t nread; |
602 | |
|
603 | 0 | *err = CURLE_AGAIN; |
604 | 0 | tail = get_non_full_tail(q); |
605 | 0 | if(!tail) { |
606 | 0 | if(q->chunk_count < q->max_chunks) { |
607 | 0 | *err = CURLE_OUT_OF_MEMORY; |
608 | 0 | return -1; |
609 | 0 | } |
610 | | /* full, blocked */ |
611 | 0 | *err = CURLE_AGAIN; |
612 | 0 | return -1; |
613 | 0 | } |
614 | | |
615 | 0 | nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err); |
616 | 0 | if(nread < 0) { |
617 | 0 | return -1; |
618 | 0 | } |
619 | 0 | else if(nread == 0) { |
620 | | /* eof */ |
621 | 0 | *err = CURLE_OK; |
622 | 0 | } |
623 | 0 | return nread; |
624 | 0 | } |
625 | | |
626 | | /** |
627 | | * Read up to `max_len` bytes and append it to the end of the buffer queue. |
628 | | * if `max_len` is 0, no limit is imposed and the call behaves exactly |
629 | | * the same as `Curl_bufq_slurp()`. |
630 | | * Returns the total amount of buf read (may be 0) or -1 on other |
631 | | * reader errors. |
632 | | * Note that even in case of a -1 chunks may have been read and |
633 | | * the buffer queue will have different length than before. |
634 | | */ |
635 | | static ssize_t bufq_slurpn(struct bufq *q, size_t max_len, |
636 | | Curl_bufq_reader *reader, void *reader_ctx, |
637 | | CURLcode *err) |
638 | 0 | { |
639 | 0 | ssize_t nread = 0, n; |
640 | |
|
641 | 0 | *err = CURLE_AGAIN; |
642 | 0 | while(1) { |
643 | |
|
644 | 0 | n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err); |
645 | 0 | if(n < 0) { |
646 | 0 | if(!nread || *err != CURLE_AGAIN) { |
647 | | /* blocked on first read or real error, fail */ |
648 | 0 | nread = -1; |
649 | 0 | } |
650 | 0 | else |
651 | 0 | *err = CURLE_OK; |
652 | 0 | break; |
653 | 0 | } |
654 | 0 | else if(n == 0) { |
655 | | /* eof */ |
656 | 0 | *err = CURLE_OK; |
657 | 0 | break; |
658 | 0 | } |
659 | 0 | nread += (size_t)n; |
660 | 0 | if(max_len) { |
661 | 0 | DEBUGASSERT((size_t)n <= max_len); |
662 | 0 | max_len -= (size_t)n; |
663 | 0 | if(!max_len) |
664 | 0 | break; |
665 | 0 | } |
666 | | /* give up slurping when we get less bytes than we asked for */ |
667 | 0 | if(q->tail && !chunk_is_full(q->tail)) |
668 | 0 | break; |
669 | 0 | } |
670 | 0 | return nread; |
671 | 0 | } |
672 | | |
673 | | ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader, |
674 | | void *reader_ctx, CURLcode *err) |
675 | 0 | { |
676 | 0 | return bufq_slurpn(q, 0, reader, reader_ctx, err); |
677 | 0 | } |