Line | Count | Source |
1 | | /*************************************************************************** |
2 | | * _ _ ____ _ |
3 | | * Project ___| | | | _ \| | |
4 | | * / __| | | | |_) | | |
5 | | * | (__| |_| | _ <| |___ |
6 | | * \___|\___/|_| \_\_____| |
7 | | * |
8 | | * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. |
9 | | * |
10 | | * This software is licensed as described in the file COPYING, which |
11 | | * you should have received as part of this distribution. The terms |
12 | | * are also available at https://curl.se/docs/copyright.html. |
13 | | * |
14 | | * You may opt to use, copy, modify, merge, publish, distribute and/or sell |
15 | | * copies of the Software, and permit persons to whom the Software is |
16 | | * furnished to do so, under the terms of the COPYING file. |
17 | | * |
18 | | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
19 | | * KIND, either express or implied. |
20 | | * |
21 | | * SPDX-License-Identifier: curl |
22 | | * |
23 | | ***************************************************************************/ |
24 | | #include "curl_setup.h" |
25 | | |
26 | | #include "bufq.h" |
27 | | |
28 | | static bool chunk_is_empty(const struct buf_chunk *chunk) |
29 | 71.7M | { |
30 | 71.7M | return chunk->r_offset >= chunk->w_offset; |
31 | 71.7M | } |
32 | | |
33 | | static bool chunk_is_full(const struct buf_chunk *chunk) |
34 | 26.8M | { |
35 | 26.8M | return chunk->w_offset >= chunk->dlen; |
36 | 26.8M | } |
37 | | |
38 | | static size_t chunk_len(const struct buf_chunk *chunk) |
39 | 928M | { |
40 | 928M | return chunk->w_offset - chunk->r_offset; |
41 | 928M | } |
42 | | |
43 | | static void chunk_reset(struct buf_chunk *chunk) |
44 | 15.9M | { |
45 | 15.9M | chunk->next = NULL; |
46 | 15.9M | chunk->r_offset = chunk->w_offset = 0; |
47 | 15.9M | } |
48 | | |
49 | | static size_t chunk_append(struct buf_chunk *chunk, |
50 | | const uint8_t *buf, size_t len) |
51 | 12.7M | { |
52 | 12.7M | uint8_t *p = &chunk->x.data[chunk->w_offset]; |
53 | 12.7M | size_t n = chunk->dlen - chunk->w_offset; |
54 | 12.7M | DEBUGASSERT(chunk->dlen >= chunk->w_offset); |
55 | 12.7M | if(n) { |
56 | 12.7M | n = CURLMIN(n, len); |
57 | 12.7M | memcpy(p, buf, n); |
58 | 12.7M | chunk->w_offset += n; |
59 | 12.7M | } |
60 | 12.7M | return n; |
61 | 12.7M | } |
62 | | |
63 | | static size_t chunk_read(struct buf_chunk *chunk, |
64 | | uint8_t *buf, size_t len) |
65 | 1.89M | { |
66 | 1.89M | uint8_t *p = &chunk->x.data[chunk->r_offset]; |
67 | 1.89M | size_t n = chunk->w_offset - chunk->r_offset; |
68 | 1.89M | DEBUGASSERT(chunk->w_offset >= chunk->r_offset); |
69 | 1.89M | if(!n) { |
70 | 368 | return 0; |
71 | 368 | } |
72 | 1.89M | else if(n <= len) { |
73 | 1.88M | memcpy(buf, p, n); |
74 | 1.88M | chunk->r_offset = chunk->w_offset = 0; |
75 | 1.88M | return n; |
76 | 1.88M | } |
77 | 3.15k | else { |
78 | 3.15k | memcpy(buf, p, len); |
79 | 3.15k | chunk->r_offset += len; |
80 | 3.15k | return len; |
81 | 3.15k | } |
82 | 1.89M | } |
83 | | |
84 | | static CURLcode chunk_slurpn(struct buf_chunk *chunk, size_t max_len, |
85 | | Curl_bufq_reader *reader, |
86 | | void *reader_ctx, size_t *pnread) |
87 | 8.89M | { |
88 | 8.89M | uint8_t *p = &chunk->x.data[chunk->w_offset]; |
89 | 8.89M | size_t n = chunk->dlen - chunk->w_offset; /* free amount */ |
90 | 8.89M | CURLcode result; |
91 | | |
92 | 8.89M | *pnread = 0; |
93 | 8.89M | DEBUGASSERT(chunk->dlen >= chunk->w_offset); |
94 | 8.89M | if(!n) |
95 | 0 | return CURLE_AGAIN; |
96 | 8.89M | if(max_len && n > max_len) |
97 | 3.41k | n = max_len; |
98 | 8.89M | result = reader(reader_ctx, p, n, pnread); |
99 | 8.89M | if(!result) { |
100 | 3.65M | DEBUGASSERT(*pnread <= n); |
101 | 3.65M | chunk->w_offset += *pnread; |
102 | 3.65M | } |
103 | 8.89M | return result; |
104 | 8.89M | } |
105 | | |
106 | | static void chunk_peek(const struct buf_chunk *chunk, |
107 | | const uint8_t **pbuf, size_t *plen) |
108 | 3.34M | { |
109 | 3.34M | DEBUGASSERT(chunk->w_offset >= chunk->r_offset); |
110 | 3.34M | *pbuf = &chunk->x.data[chunk->r_offset]; |
111 | 3.34M | *plen = chunk->w_offset - chunk->r_offset; |
112 | 3.34M | } |
113 | | |
114 | | static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset, |
115 | | const uint8_t **pbuf, size_t *plen) |
116 | 4.10k | { |
117 | 4.10k | offset += chunk->r_offset; |
118 | 4.10k | DEBUGASSERT(chunk->w_offset >= offset); |
119 | 4.10k | *pbuf = &chunk->x.data[offset]; |
120 | 4.10k | *plen = chunk->w_offset - offset; |
121 | 4.10k | } |
122 | | |
123 | | static size_t chunk_skip(struct buf_chunk *chunk, size_t amount) |
124 | 4.50M | { |
125 | 4.50M | size_t n = chunk->w_offset - chunk->r_offset; |
126 | 4.50M | DEBUGASSERT(chunk->w_offset >= chunk->r_offset); |
127 | 4.50M | if(n) { |
128 | 4.50M | n = CURLMIN(n, amount); |
129 | 4.50M | chunk->r_offset += n; |
130 | 4.50M | if(chunk->r_offset == chunk->w_offset) |
131 | 4.49M | chunk->r_offset = chunk->w_offset = 0; |
132 | 4.50M | } |
133 | 4.50M | return n; |
134 | 4.50M | } |
135 | | |
136 | | static void chunk_list_free(struct buf_chunk **anchor) |
137 | 442k | { |
138 | 442k | struct buf_chunk *chunk; |
139 | 722k | while(*anchor) { |
140 | 279k | chunk = *anchor; |
141 | 279k | *anchor = chunk->next; |
142 | 279k | curlx_free(chunk); |
143 | 279k | } |
144 | 442k | } |
145 | | |
146 | | void Curl_bufcp_init(struct bufc_pool *pool, |
147 | | size_t chunk_size, size_t spare_max) |
148 | 19.0k | { |
149 | 19.0k | DEBUGASSERT(chunk_size > 0); |
150 | 19.0k | DEBUGASSERT(spare_max > 0); |
151 | 19.0k | memset(pool, 0, sizeof(*pool)); |
152 | 19.0k | pool->chunk_size = chunk_size; |
153 | 19.0k | pool->spare_max = spare_max; |
154 | 19.0k | } |
155 | | |
156 | | static CURLcode bufcp_take(struct bufc_pool *pool, |
157 | | struct buf_chunk **pchunk) |
158 | 3.37M | { |
159 | 3.37M | struct buf_chunk *chunk = NULL; |
160 | | |
161 | 3.37M | if(pool->spare) { |
162 | 3.12M | chunk = pool->spare; |
163 | 3.12M | pool->spare = chunk->next; |
164 | 3.12M | --pool->spare_count; |
165 | 3.12M | chunk_reset(chunk); |
166 | 3.12M | *pchunk = chunk; |
167 | 3.12M | return CURLE_OK; |
168 | 3.12M | } |
169 | | |
170 | | /* Check for integer overflow before allocation */ |
171 | 250k | if(pool->chunk_size > SIZE_MAX - sizeof(*chunk)) { |
172 | 0 | *pchunk = NULL; |
173 | 0 | return CURLE_OUT_OF_MEMORY; |
174 | 0 | } |
175 | | |
176 | 250k | chunk = curlx_calloc(1, sizeof(*chunk) + pool->chunk_size); |
177 | 250k | if(!chunk) { |
178 | 0 | *pchunk = NULL; |
179 | 0 | return CURLE_OUT_OF_MEMORY; |
180 | 0 | } |
181 | 250k | chunk->dlen = pool->chunk_size; |
182 | 250k | *pchunk = chunk; |
183 | 250k | return CURLE_OK; |
184 | 250k | } |
185 | | |
186 | | static void bufcp_put(struct bufc_pool *pool, |
187 | | struct buf_chunk *chunk) |
188 | 3.24M | { |
189 | 3.24M | if(pool->spare_count >= pool->spare_max) { |
190 | 87.4k | curlx_free(chunk); |
191 | 87.4k | } |
192 | 3.16M | else { |
193 | 3.16M | chunk_reset(chunk); |
194 | 3.16M | chunk->next = pool->spare; |
195 | 3.16M | pool->spare = chunk; |
196 | 3.16M | ++pool->spare_count; |
197 | 3.16M | } |
198 | 3.24M | } |
199 | | |
200 | | void Curl_bufcp_free(struct bufc_pool *pool) |
201 | 19.0k | { |
202 | 19.0k | chunk_list_free(&pool->spare); |
203 | 19.0k | pool->spare_count = 0; |
204 | 19.0k | } |
205 | | |
206 | | static void bufq_init(struct bufq *q, struct bufc_pool *pool, |
207 | | size_t chunk_size, size_t max_chunks, int opts) |
208 | 211k | { |
209 | 211k | DEBUGASSERT(chunk_size > 0); |
210 | 211k | DEBUGASSERT(max_chunks > 0); |
211 | 211k | memset(q, 0, sizeof(*q)); |
212 | 211k | q->chunk_size = chunk_size; |
213 | 211k | q->max_chunks = max_chunks; |
214 | 211k | q->pool = pool; |
215 | 211k | q->opts = opts; |
216 | 211k | } |
217 | | |
218 | | void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks, |
219 | | int opts) |
220 | 155k | { |
221 | 155k | bufq_init(q, NULL, chunk_size, max_chunks, opts); |
222 | 155k | } |
223 | | |
224 | | void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks) |
225 | 575 | { |
226 | 575 | bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE); |
227 | 575 | } |
228 | | |
229 | | void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool, |
230 | | size_t max_chunks, int opts) |
231 | 56.2k | { |
232 | 56.2k | bufq_init(q, pool, pool->chunk_size, max_chunks, opts); |
233 | 56.2k | } |
234 | | |
235 | | void Curl_bufq_free(struct bufq *q) |
236 | 211k | { |
237 | 211k | chunk_list_free(&q->head); |
238 | 211k | chunk_list_free(&q->spare); |
239 | 211k | q->tail = NULL; |
240 | 211k | q->chunk_count = 0; |
241 | 211k | } |
242 | | |
243 | | void Curl_bufq_reset(struct bufq *q) |
244 | 2.45M | { |
245 | 2.45M | struct buf_chunk *chunk; |
246 | 9.05M | while(q->head) { |
247 | 6.60M | chunk = q->head; |
248 | 6.60M | q->head = chunk->next; |
249 | 6.60M | chunk->next = q->spare; |
250 | 6.60M | q->spare = chunk; |
251 | 6.60M | } |
252 | 2.45M | q->tail = NULL; |
253 | 2.45M | } |
254 | | |
255 | | size_t Curl_bufq_len(const struct bufq *q) |
256 | 10.5M | { |
257 | 10.5M | const struct buf_chunk *chunk = q->head; |
258 | 10.5M | size_t len = 0; |
259 | 936M | while(chunk) { |
260 | 925M | len += chunk_len(chunk); |
261 | 925M | chunk = chunk->next; |
262 | 925M | } |
263 | 10.5M | return len; |
264 | 10.5M | } |
265 | | |
266 | | bool Curl_bufq_is_empty(const struct bufq *q) |
267 | 112M | { |
268 | 112M | return !q->head || chunk_is_empty(q->head); |
269 | 112M | } |
270 | | |
271 | | bool Curl_bufq_is_full(const struct bufq *q) |
272 | 230k | { |
273 | 230k | if(!q->tail || q->spare) |
274 | 48.1k | return FALSE; |
275 | 181k | if(q->chunk_count < q->max_chunks) |
276 | 0 | return FALSE; |
277 | 181k | if(q->chunk_count > q->max_chunks) |
278 | 448 | return TRUE; |
279 | | /* we have no spares and cannot make more, is the tail full? */ |
280 | 181k | return chunk_is_full(q->tail); |
281 | 181k | } |
282 | | |
283 | | static struct buf_chunk *get_spare(struct bufq *q) |
284 | 15.2M | { |
285 | 15.2M | struct buf_chunk *chunk = NULL; |
286 | | |
287 | 15.2M | if(q->spare) { |
288 | 9.67M | chunk = q->spare; |
289 | 9.67M | q->spare = chunk->next; |
290 | 9.67M | chunk_reset(chunk); |
291 | 9.67M | return chunk; |
292 | 9.67M | } |
293 | | |
294 | 5.56M | if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT))) |
295 | 2.06M | return NULL; |
296 | | |
297 | 3.50M | if(q->pool) { |
298 | 3.37M | if(bufcp_take(q->pool, &chunk)) |
299 | 0 | return NULL; |
300 | 3.37M | ++q->chunk_count; |
301 | 3.37M | return chunk; |
302 | 3.37M | } |
303 | 132k | else { |
304 | | /* Check for integer overflow before allocation */ |
305 | 132k | if(q->chunk_size > SIZE_MAX - sizeof(*chunk)) { |
306 | 0 | return NULL; |
307 | 0 | } |
308 | | |
309 | 132k | chunk = curlx_calloc(1, sizeof(*chunk) + q->chunk_size); |
310 | 132k | if(!chunk) |
311 | 0 | return NULL; |
312 | 132k | chunk->dlen = q->chunk_size; |
313 | 132k | ++q->chunk_count; |
314 | 132k | return chunk; |
315 | 132k | } |
316 | 3.50M | } |
317 | | |
318 | | static void prune_head(struct bufq *q) |
319 | 6.39M | { |
320 | 6.39M | struct buf_chunk *chunk; |
321 | | |
322 | 12.7M | while(q->head && chunk_is_empty(q->head)) { |
323 | 6.38M | chunk = q->head; |
324 | 6.38M | q->head = chunk->next; |
325 | 6.38M | if(q->tail == chunk) |
326 | 172k | q->tail = q->head; |
327 | 6.38M | if(q->pool) { |
328 | 3.24M | bufcp_put(q->pool, chunk); |
329 | 3.24M | --q->chunk_count; |
330 | 3.24M | } |
331 | 3.13M | else if((q->chunk_count > q->max_chunks) || |
332 | 3.12M | (q->opts & BUFQ_OPT_NO_SPARES)) { |
333 | | /* SOFT_LIMIT allowed us more than max. free spares until |
334 | | * we are at max again. Or free them if we are configured |
335 | | * to not use spares. */ |
336 | 14.8k | curlx_free(chunk); |
337 | 14.8k | --q->chunk_count; |
338 | 14.8k | } |
339 | 3.12M | else { |
340 | 3.12M | chunk->next = q->spare; |
341 | 3.12M | q->spare = chunk; |
342 | 3.12M | } |
343 | 6.38M | } |
344 | 6.39M | } |
345 | | |
346 | | static struct buf_chunk *get_non_full_tail(struct bufq *q) |
347 | 23.7M | { |
348 | 23.7M | struct buf_chunk *chunk; |
349 | | |
350 | 23.7M | if(q->tail && !chunk_is_full(q->tail)) |
351 | 8.48M | return q->tail; |
352 | 15.2M | chunk = get_spare(q); |
353 | 15.2M | if(chunk) { |
354 | | /* new tail, and possibly new head */ |
355 | 13.1M | if(q->tail) { |
356 | 12.9M | q->tail->next = chunk; |
357 | 12.9M | q->tail = chunk; |
358 | 12.9M | } |
359 | 212k | else { |
360 | 212k | DEBUGASSERT(!q->head); |
361 | 212k | q->head = q->tail = chunk; |
362 | 212k | } |
363 | 13.1M | } |
364 | 15.2M | return chunk; |
365 | 15.2M | } |
366 | | |
367 | | CURLcode Curl_bufq_write(struct bufq *q, |
368 | | const uint8_t *buf, size_t len, |
369 | | size_t *pnwritten) |
370 | 11.2M | { |
371 | 11.2M | struct buf_chunk *tail; |
372 | 11.2M | size_t n; |
373 | | |
374 | 11.2M | DEBUGASSERT(q->max_chunks > 0); |
375 | 11.2M | *pnwritten = 0; |
376 | 24.0M | while(len) { |
377 | 13.8M | tail = get_non_full_tail(q); |
378 | 13.8M | if(!tail) { |
379 | 1.04M | if((q->chunk_count < q->max_chunks) || (q->opts & BUFQ_OPT_SOFT_LIMIT)) |
380 | | /* should have gotten a tail, but did not */ |
381 | 0 | return CURLE_OUT_OF_MEMORY; |
382 | 1.04M | break; |
383 | 1.04M | } |
384 | 12.7M | n = chunk_append(tail, buf, len); |
385 | 12.7M | if(!n) |
386 | 0 | break; |
387 | 12.7M | *pnwritten += n; |
388 | 12.7M | buf += n; |
389 | 12.7M | len -= n; |
390 | 12.7M | } |
391 | 11.2M | return (!*pnwritten && len) ? CURLE_AGAIN : CURLE_OK; |
392 | 11.2M | } |
393 | | |
394 | | CURLcode Curl_bufq_cwrite(struct bufq *q, |
395 | | const char *buf, size_t len, |
396 | | size_t *pnwritten) |
397 | 6.26M | { |
398 | 6.26M | return Curl_bufq_write(q, (const uint8_t *)buf, len, pnwritten); |
399 | 6.26M | } |
400 | | |
401 | | CURLcode Curl_bufq_read(struct bufq *q, uint8_t *buf, size_t len, |
402 | | size_t *pnread) |
403 | 247k | { |
404 | 247k | *pnread = 0; |
405 | 2.13M | while(len && q->head) { |
406 | 1.89M | size_t n = chunk_read(q->head, buf, len); |
407 | 1.89M | if(n) { |
408 | 1.89M | *pnread += n; |
409 | 1.89M | buf += n; |
410 | 1.89M | len -= n; |
411 | 1.89M | } |
412 | 1.89M | prune_head(q); |
413 | 1.89M | } |
414 | 247k | return (!*pnread) ? CURLE_AGAIN : CURLE_OK; |
415 | 247k | } |
416 | | |
417 | | CURLcode Curl_bufq_cread(struct bufq *q, char *buf, size_t len, |
418 | | size_t *pnread) |
419 | 4.29k | { |
420 | 4.29k | return Curl_bufq_read(q, (uint8_t *)buf, len, pnread); |
421 | 4.29k | } |
422 | | |
423 | | bool Curl_bufq_peek(struct bufq *q, |
424 | | const uint8_t **pbuf, size_t *plen) |
425 | 4.10M | { |
426 | 4.10M | if(q->head && chunk_is_empty(q->head)) { |
427 | 5.35k | prune_head(q); |
428 | 5.35k | } |
429 | 4.10M | if(q->head && !chunk_is_empty(q->head)) { |
430 | 3.34M | chunk_peek(q->head, pbuf, plen); |
431 | 3.34M | return TRUE; |
432 | 3.34M | } |
433 | 762k | *pbuf = NULL; |
434 | 762k | *plen = 0; |
435 | 762k | return FALSE; |
436 | 4.10M | } |
437 | | |
438 | | bool Curl_bufq_peek_at(struct bufq *q, size_t offset, |
439 | | const uint8_t **pbuf, size_t *plen) |
440 | 228k | { |
441 | 228k | struct buf_chunk *c = q->head; |
442 | 228k | size_t clen; |
443 | | |
444 | 2.75M | while(c) { |
445 | 2.52M | clen = chunk_len(c); |
446 | 2.52M | if(!clen) |
447 | 273 | break; |
448 | 2.52M | if(offset >= clen) { |
449 | 2.52M | offset -= clen; |
450 | 2.52M | c = c->next; |
451 | 2.52M | continue; |
452 | 2.52M | } |
453 | 4.10k | chunk_peek_at(c, offset, pbuf, plen); |
454 | 4.10k | return TRUE; |
455 | 2.52M | } |
456 | 224k | *pbuf = NULL; |
457 | 224k | *plen = 0; |
458 | 224k | return FALSE; |
459 | 228k | } |
460 | | |
461 | | void Curl_bufq_skip(struct bufq *q, size_t amount) |
462 | 4.06M | { |
463 | 4.06M | size_t n; |
464 | | |
465 | 8.56M | while(amount && q->head) { |
466 | 4.50M | n = chunk_skip(q->head, amount); |
467 | 4.50M | amount -= n; |
468 | 4.50M | prune_head(q); |
469 | 4.50M | } |
470 | 4.06M | } |
471 | | |
472 | | CURLcode Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer, |
473 | | void *writer_ctx, size_t *pwritten) |
474 | 715k | { |
475 | 715k | const uint8_t *buf; |
476 | 715k | size_t blen; |
477 | 715k | CURLcode result = CURLE_OK; |
478 | | |
479 | 715k | *pwritten = 0; |
480 | 3.05M | while(Curl_bufq_peek(q, &buf, &blen)) { |
481 | 2.34M | size_t chunk_written; |
482 | | |
483 | 2.34M | result = writer(writer_ctx, buf, blen, &chunk_written); |
484 | 2.34M | if(result) { |
485 | 5.23k | if((result == CURLE_AGAIN) && *pwritten) { |
486 | | /* blocked on subsequent write, report success */ |
487 | 4.12k | result = CURLE_OK; |
488 | 4.12k | } |
489 | 5.23k | break; |
490 | 5.23k | } |
491 | 2.33M | if(!chunk_written) { |
492 | 0 | if(!*pwritten) { |
493 | | /* treat as blocked */ |
494 | 0 | result = CURLE_AGAIN; |
495 | 0 | } |
496 | 0 | break; |
497 | 0 | } |
498 | 2.33M | *pwritten += chunk_written; |
499 | 2.33M | Curl_bufq_skip(q, chunk_written); |
500 | 2.33M | } |
501 | 715k | return result; |
502 | 715k | } |
503 | | |
504 | | CURLcode Curl_bufq_write_pass(struct bufq *q, |
505 | | const uint8_t *buf, size_t len, |
506 | | Curl_bufq_writer *writer, void *writer_ctx, |
507 | | size_t *pwritten) |
508 | 79.0k | { |
509 | 79.0k | CURLcode result = CURLE_OK; |
510 | 79.0k | size_t n; |
511 | | |
512 | 79.0k | *pwritten = 0; |
513 | 161k | while(len) { |
514 | 81.9k | if(Curl_bufq_is_full(q)) { |
515 | | /* try to make room in case we are full */ |
516 | 2.85k | result = Curl_bufq_pass(q, writer, writer_ctx, &n); |
517 | 2.85k | if(result) { |
518 | 0 | if(result != CURLE_AGAIN) { |
519 | | /* real error, fail */ |
520 | 0 | return result; |
521 | 0 | } |
522 | | /* would block, bufq is full, give up */ |
523 | 0 | break; |
524 | 0 | } |
525 | 2.85k | } |
526 | | |
527 | | /* Add to bufq as much as there is room for */ |
528 | 81.9k | result = Curl_bufq_write(q, buf, len, &n); |
529 | 81.9k | if(result) { |
530 | 0 | if(result != CURLE_AGAIN) |
531 | | /* real error, fail */ |
532 | 0 | return result; |
533 | | /* result == CURLE_AGAIN */ |
534 | 0 | if(*pwritten) |
535 | | /* we did write successfully before */ |
536 | 0 | result = CURLE_OK; |
537 | 0 | return result; |
538 | 0 | } |
539 | 81.9k | else if(n == 0) |
540 | | /* edge case of writer returning 0 (and len is >0) |
541 | | * break or we might enter an infinite loop here */ |
542 | 0 | break; |
543 | | |
544 | | /* Track what we added to bufq */ |
545 | 81.9k | buf += n; |
546 | 81.9k | len -= n; |
547 | 81.9k | *pwritten += n; |
548 | 81.9k | } |
549 | | |
550 | 79.0k | return (!*pwritten && len) ? CURLE_AGAIN : CURLE_OK; |
551 | 79.0k | } |
552 | | |
553 | | CURLcode Curl_bufq_sipn(struct bufq *q, size_t max_len, |
554 | | Curl_bufq_reader *reader, void *reader_ctx, |
555 | | size_t *pnread) |
556 | 9.91M | { |
557 | 9.91M | struct buf_chunk *tail = NULL; |
558 | | |
559 | 9.91M | *pnread = 0; |
560 | 9.91M | tail = get_non_full_tail(q); |
561 | 9.91M | if(!tail) { |
562 | 1.02M | if(q->chunk_count < q->max_chunks) |
563 | 0 | return CURLE_OUT_OF_MEMORY; |
564 | | /* full, blocked */ |
565 | 1.02M | return CURLE_AGAIN; |
566 | 1.02M | } |
567 | | |
568 | 8.89M | return chunk_slurpn(tail, max_len, reader, reader_ctx, pnread); |
569 | 9.91M | } |
570 | | |
571 | | /** |
572 | | * Read up to `max_len` bytes and append it to the end of the buffer queue. |
573 | | * if `max_len` is 0, no limit is imposed and the call behaves exactly |
574 | | * the same as `Curl_bufq_slurp()`. |
575 | | * Returns the total amount of buf read (may be 0) in `pnread` or error |
576 | | * Note that even in case of an error chunks may have been read and |
577 | | * the buffer queue will have different length than before. |
578 | | */ |
579 | | static CURLcode bufq_slurpn(struct bufq *q, size_t max_len, |
580 | | Curl_bufq_reader *reader, void *reader_ctx, |
581 | | size_t *pnread) |
582 | 603k | { |
583 | 603k | CURLcode result; |
584 | | |
585 | 603k | *pnread = 0; |
586 | 3.74M | while(1) { |
587 | 3.74M | size_t n; |
588 | 3.74M | result = Curl_bufq_sipn(q, max_len, reader, reader_ctx, &n); |
589 | 3.74M | if(result) { |
590 | 599k | if(!*pnread || result != CURLE_AGAIN) { |
591 | | /* blocked on first read or real error, fail */ |
592 | 591k | return result; |
593 | 591k | } |
594 | 7.51k | result = CURLE_OK; |
595 | 7.51k | break; |
596 | 599k | } |
597 | 3.14M | else if(n == 0) { |
598 | | /* eof, result remains CURLE_OK */ |
599 | 0 | break; |
600 | 0 | } |
601 | 3.14M | *pnread += n; |
602 | 3.14M | if(max_len) { |
603 | 0 | DEBUGASSERT(n <= max_len); |
604 | 0 | max_len -= n; |
605 | 0 | if(!max_len) |
606 | 0 | break; |
607 | 0 | } |
608 | | /* give up slurping when we get less bytes than we asked for */ |
609 | 3.14M | if(q->tail && !chunk_is_full(q->tail)) |
610 | 4.17k | break; |
611 | 3.14M | } |
612 | 11.6k | return result; |
613 | 603k | } |
614 | | |
615 | | CURLcode Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader, |
616 | | void *reader_ctx, size_t *pnread) |
617 | 603k | { |
618 | 603k | return bufq_slurpn(q, 0, reader, reader_ctx, pnread); |
619 | 603k | } |