/src/gdal/curl/lib/http_chunks.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*************************************************************************** |
2 | | * _ _ ____ _ |
3 | | * Project ___| | | | _ \| | |
4 | | * / __| | | | |_) | | |
5 | | * | (__| |_| | _ <| |___ |
6 | | * \___|\___/|_| \_\_____| |
7 | | * |
8 | | * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. |
9 | | * |
10 | | * This software is licensed as described in the file COPYING, which |
11 | | * you should have received as part of this distribution. The terms |
12 | | * are also available at https://curl.se/docs/copyright.html. |
13 | | * |
14 | | * You may opt to use, copy, modify, merge, publish, distribute and/or sell |
15 | | * copies of the Software, and permit persons to whom the Software is |
16 | | * furnished to do so, under the terms of the COPYING file. |
17 | | * |
18 | | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
19 | | * KIND, either express or implied. |
20 | | * |
21 | | * SPDX-License-Identifier: curl |
22 | | * |
23 | | ***************************************************************************/ |
24 | | |
25 | | #include "curl_setup.h" |
26 | | |
27 | | #ifndef CURL_DISABLE_HTTP |
28 | | |
29 | | #include "urldata.h" /* it includes http_chunks.h */ |
30 | | #include "curl_printf.h" |
31 | | #include "curl_trc.h" |
32 | | #include "sendf.h" /* for the client write stuff */ |
33 | | #include "curlx/dynbuf.h" |
34 | | #include "content_encoding.h" |
35 | | #include "http.h" |
36 | | #include "multiif.h" |
37 | | #include "curlx/strparse.h" |
38 | | #include "curlx/warnless.h" |
39 | | |
40 | | /* The last #include files should be: */ |
41 | | #include "curl_memory.h" |
42 | | #include "memdebug.h" |
43 | | |
44 | | /* |
45 | | * Chunk format (simplified): |
46 | | * |
47 | | * <HEX SIZE>[ chunk extension ] CRLF |
48 | | * <DATA> CRLF |
49 | | * |
50 | | * Highlights from RFC2616 section 3.6 say: |
51 | | |
52 | | The chunked encoding modifies the body of a message in order to |
53 | | transfer it as a series of chunks, each with its own size indicator, |
54 | | followed by an OPTIONAL trailer containing entity-header fields. This |
55 | | allows dynamically produced content to be transferred along with the |
56 | | information necessary for the recipient to verify that it has |
57 | | received the full message. |
58 | | |
59 | | Chunked-Body = *chunk |
60 | | last-chunk |
61 | | trailer |
62 | | CRLF |
63 | | |
64 | | chunk = chunk-size [ chunk-extension ] CRLF |
65 | | chunk-data CRLF |
66 | | chunk-size = 1*HEX |
67 | | last-chunk = 1*("0") [ chunk-extension ] CRLF |
68 | | |
69 | | chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) |
70 | | chunk-ext-name = token |
71 | | chunk-ext-val = token | quoted-string |
72 | | chunk-data = chunk-size(OCTET) |
73 | | trailer = *(entity-header CRLF) |
74 | | |
75 | | The chunk-size field is a string of hex digits indicating the size of |
76 | | the chunk. The chunked encoding is ended by any chunk whose size is |
77 | | zero, followed by the trailer, which is terminated by an empty line. |
78 | | |
79 | | */ |
80 | | |
81 | | void Curl_httpchunk_init(struct Curl_easy *data, struct Curl_chunker *ch, |
82 | | bool ignore_body) |
83 | 16 | { |
84 | 16 | (void)data; |
85 | 16 | ch->hexindex = 0; /* start at 0 */ |
86 | 16 | ch->state = CHUNK_HEX; /* we get hex first! */ |
87 | 16 | ch->last_code = CHUNKE_OK; |
88 | 16 | curlx_dyn_init(&ch->trailer, DYN_H1_TRAILER); |
89 | 16 | ch->ignore_body = ignore_body; |
90 | 16 | } |
91 | | |
92 | | void Curl_httpchunk_reset(struct Curl_easy *data, struct Curl_chunker *ch, |
93 | | bool ignore_body) |
94 | 0 | { |
95 | 0 | (void)data; |
96 | 0 | ch->hexindex = 0; /* start at 0 */ |
97 | 0 | ch->state = CHUNK_HEX; /* we get hex first! */ |
98 | 0 | ch->last_code = CHUNKE_OK; |
99 | 0 | curlx_dyn_reset(&ch->trailer); |
100 | 0 | ch->ignore_body = ignore_body; |
101 | 0 | } |
102 | | |
103 | | void Curl_httpchunk_free(struct Curl_easy *data, struct Curl_chunker *ch) |
104 | 16 | { |
105 | 16 | (void)data; |
106 | 16 | curlx_dyn_free(&ch->trailer); |
107 | 16 | } |
108 | | |
109 | | bool Curl_httpchunk_is_done(struct Curl_easy *data, struct Curl_chunker *ch) |
110 | 0 | { |
111 | 0 | (void)data; |
112 | 0 | return ch->state == CHUNK_DONE; |
113 | 0 | } |
114 | | |
115 | | static CURLcode httpchunk_readwrite(struct Curl_easy *data, |
116 | | struct Curl_chunker *ch, |
117 | | struct Curl_cwriter *cw_next, |
118 | | const char *buf, size_t blen, |
119 | | size_t *pconsumed) |
120 | 16 | { |
121 | 16 | CURLcode result = CURLE_OK; |
122 | 16 | size_t piece; |
123 | | |
124 | 16 | *pconsumed = 0; /* nothing's written yet */ |
125 | | /* first check terminal states that will not progress anywhere */ |
126 | 16 | if(ch->state == CHUNK_DONE) |
127 | 0 | return CURLE_OK; |
128 | 16 | if(ch->state == CHUNK_FAILED) |
129 | 0 | return CURLE_RECV_ERROR; |
130 | | |
131 | | /* the original data is written to the client, but we go on with the |
132 | | chunk read process, to properly calculate the content length */ |
133 | 16 | if(data->set.http_te_skip && !ch->ignore_body) { |
134 | 0 | if(cw_next) |
135 | 0 | result = Curl_cwriter_write(data, cw_next, CLIENTWRITE_BODY, buf, blen); |
136 | 0 | else |
137 | 0 | result = Curl_client_write(data, CLIENTWRITE_BODY, buf, blen); |
138 | 0 | if(result) { |
139 | 0 | ch->state = CHUNK_FAILED; |
140 | 0 | ch->last_code = CHUNKE_PASSTHRU_ERROR; |
141 | 0 | return result; |
142 | 0 | } |
143 | 0 | } |
144 | | |
145 | 112 | while(blen) { |
146 | 112 | switch(ch->state) { |
147 | 32 | case CHUNK_HEX: |
148 | 32 | if(ISXDIGIT(*buf)) { |
149 | 16 | if(ch->hexindex >= CHUNK_MAXNUM_LEN) { |
150 | 0 | failf(data, "chunk hex-length longer than %d", CHUNK_MAXNUM_LEN); |
151 | 0 | ch->state = CHUNK_FAILED; |
152 | 0 | ch->last_code = CHUNKE_TOO_LONG_HEX; /* longer than we support */ |
153 | 0 | return CURLE_RECV_ERROR; |
154 | 0 | } |
155 | 16 | ch->hexbuffer[ch->hexindex++] = *buf; |
156 | 16 | buf++; |
157 | 16 | blen--; |
158 | 16 | (*pconsumed)++; |
159 | 16 | } |
160 | 16 | else { |
161 | 16 | const char *p; |
162 | 16 | if(0 == ch->hexindex) { |
163 | | /* This is illegal data, we received junk where we expected |
164 | | a hexadecimal digit. */ |
165 | 0 | failf(data, "chunk hex-length char not a hex digit: 0x%x", *buf); |
166 | 0 | ch->state = CHUNK_FAILED; |
167 | 0 | ch->last_code = CHUNKE_ILLEGAL_HEX; |
168 | 0 | return CURLE_RECV_ERROR; |
169 | 0 | } |
170 | | /* blen and buf are unmodified */ |
171 | 16 | ch->hexbuffer[ch->hexindex] = 0; |
172 | 16 | p = &ch->hexbuffer[0]; |
173 | 16 | if(curlx_str_hex(&p, &ch->datasize, CURL_OFF_T_MAX)) { |
174 | 0 | failf(data, "invalid chunk size: '%s'", ch->hexbuffer); |
175 | 0 | ch->state = CHUNK_FAILED; |
176 | 0 | ch->last_code = CHUNKE_ILLEGAL_HEX; |
177 | 0 | return CURLE_RECV_ERROR; |
178 | 0 | } |
179 | 16 | ch->state = CHUNK_LF; /* now wait for the CRLF */ |
180 | 16 | } |
181 | 32 | break; |
182 | | |
183 | 32 | case CHUNK_LF: |
184 | | /* waiting for the LF after a chunk size */ |
185 | 32 | if(*buf == 0x0a) { |
186 | | /* we are now expecting data to come, unless size was zero! */ |
187 | 16 | if(0 == ch->datasize) { |
188 | 16 | ch->state = CHUNK_TRAILER; /* now check for trailers */ |
189 | 16 | } |
190 | 0 | else { |
191 | 0 | ch->state = CHUNK_DATA; |
192 | 0 | CURL_TRC_WRITE(data, "http_chunked, chunk start of %" |
193 | 0 | FMT_OFF_T " bytes", ch->datasize); |
194 | 0 | } |
195 | 16 | } |
196 | | |
197 | 32 | buf++; |
198 | 32 | blen--; |
199 | 32 | (*pconsumed)++; |
200 | 32 | break; |
201 | | |
202 | 0 | case CHUNK_DATA: |
203 | | /* We expect 'datasize' of data. We have 'blen' right now, it can be |
204 | | more or less than 'datasize'. Get the smallest piece. |
205 | | */ |
206 | 0 | piece = blen; |
207 | 0 | if(ch->datasize < (curl_off_t)blen) |
208 | 0 | piece = curlx_sotouz(ch->datasize); |
209 | | |
210 | | /* Write the data portion available */ |
211 | 0 | if(!data->set.http_te_skip && !ch->ignore_body) { |
212 | 0 | if(cw_next) |
213 | 0 | result = Curl_cwriter_write(data, cw_next, CLIENTWRITE_BODY, |
214 | 0 | buf, piece); |
215 | 0 | else |
216 | 0 | result = Curl_client_write(data, CLIENTWRITE_BODY, buf, piece); |
217 | 0 | if(result) { |
218 | 0 | ch->state = CHUNK_FAILED; |
219 | 0 | ch->last_code = CHUNKE_PASSTHRU_ERROR; |
220 | 0 | return result; |
221 | 0 | } |
222 | 0 | } |
223 | | |
224 | 0 | *pconsumed += piece; |
225 | 0 | ch->datasize -= piece; /* decrease amount left to expect */ |
226 | 0 | buf += piece; /* move read pointer forward */ |
227 | 0 | blen -= piece; /* decrease space left in this round */ |
228 | 0 | CURL_TRC_WRITE(data, "http_chunked, write %zu body bytes, %" |
229 | 0 | FMT_OFF_T " bytes in chunk remain", |
230 | 0 | piece, ch->datasize); |
231 | |
|
232 | 0 | if(0 == ch->datasize) |
233 | | /* end of data this round, we now expect a trailing CRLF */ |
234 | 0 | ch->state = CHUNK_POSTLF; |
235 | 0 | break; |
236 | | |
237 | 0 | case CHUNK_POSTLF: |
238 | 0 | if(*buf == 0x0a) { |
239 | | /* The last one before we go back to hex state and start all over. */ |
240 | 0 | Curl_httpchunk_reset(data, ch, ch->ignore_body); |
241 | 0 | } |
242 | 0 | else if(*buf != 0x0d) { |
243 | 0 | ch->state = CHUNK_FAILED; |
244 | 0 | ch->last_code = CHUNKE_BAD_CHUNK; |
245 | 0 | return CURLE_RECV_ERROR; |
246 | 0 | } |
247 | 0 | buf++; |
248 | 0 | blen--; |
249 | 0 | (*pconsumed)++; |
250 | 0 | break; |
251 | | |
252 | 16 | case CHUNK_TRAILER: |
253 | 16 | if((*buf == 0x0d) || (*buf == 0x0a)) { |
254 | 16 | char *tr = curlx_dyn_ptr(&ch->trailer); |
255 | | /* this is the end of a trailer, but if the trailer was zero bytes |
256 | | there was no trailer and we move on */ |
257 | | |
258 | 16 | if(tr) { |
259 | 0 | result = curlx_dyn_addn(&ch->trailer, STRCONST("\x0d\x0a")); |
260 | 0 | if(result) { |
261 | 0 | ch->state = CHUNK_FAILED; |
262 | 0 | ch->last_code = CHUNKE_OUT_OF_MEMORY; |
263 | 0 | return result; |
264 | 0 | } |
265 | 0 | tr = curlx_dyn_ptr(&ch->trailer); |
266 | 0 | if(!data->set.http_te_skip) { |
267 | 0 | size_t trlen = curlx_dyn_len(&ch->trailer); |
268 | 0 | if(cw_next) |
269 | 0 | result = Curl_cwriter_write(data, cw_next, |
270 | 0 | CLIENTWRITE_HEADER| |
271 | 0 | CLIENTWRITE_TRAILER, |
272 | 0 | tr, trlen); |
273 | 0 | else |
274 | 0 | result = Curl_client_write(data, |
275 | 0 | CLIENTWRITE_HEADER| |
276 | 0 | CLIENTWRITE_TRAILER, |
277 | 0 | tr, trlen); |
278 | 0 | if(result) { |
279 | 0 | ch->state = CHUNK_FAILED; |
280 | 0 | ch->last_code = CHUNKE_PASSTHRU_ERROR; |
281 | 0 | return result; |
282 | 0 | } |
283 | 0 | } |
284 | 0 | curlx_dyn_reset(&ch->trailer); |
285 | 0 | ch->state = CHUNK_TRAILER_CR; |
286 | 0 | if(*buf == 0x0a) |
287 | | /* already on the LF */ |
288 | 0 | break; |
289 | 0 | } |
290 | 16 | else { |
291 | | /* no trailer, we are on the final CRLF pair */ |
292 | 16 | ch->state = CHUNK_TRAILER_POSTCR; |
293 | 16 | break; /* do not advance the pointer */ |
294 | 16 | } |
295 | 16 | } |
296 | 0 | else { |
297 | 0 | result = curlx_dyn_addn(&ch->trailer, buf, 1); |
298 | 0 | if(result) { |
299 | 0 | ch->state = CHUNK_FAILED; |
300 | 0 | ch->last_code = CHUNKE_OUT_OF_MEMORY; |
301 | 0 | return result; |
302 | 0 | } |
303 | 0 | } |
304 | 0 | buf++; |
305 | 0 | blen--; |
306 | 0 | (*pconsumed)++; |
307 | 0 | break; |
308 | | |
309 | 0 | case CHUNK_TRAILER_CR: |
310 | 0 | if(*buf == 0x0a) { |
311 | 0 | ch->state = CHUNK_TRAILER_POSTCR; |
312 | 0 | buf++; |
313 | 0 | blen--; |
314 | 0 | (*pconsumed)++; |
315 | 0 | } |
316 | 0 | else { |
317 | 0 | ch->state = CHUNK_FAILED; |
318 | 0 | ch->last_code = CHUNKE_BAD_CHUNK; |
319 | 0 | return CURLE_RECV_ERROR; |
320 | 0 | } |
321 | 0 | break; |
322 | | |
323 | 16 | case CHUNK_TRAILER_POSTCR: |
324 | | /* We enter this state when a CR should arrive so we expect to |
325 | | have to first pass a CR before we wait for LF */ |
326 | 16 | if((*buf != 0x0d) && (*buf != 0x0a)) { |
327 | | /* not a CR then it must be another header in the trailer */ |
328 | 0 | ch->state = CHUNK_TRAILER; |
329 | 0 | break; |
330 | 0 | } |
331 | 16 | if(*buf == 0x0d) { |
332 | | /* skip if CR */ |
333 | 16 | buf++; |
334 | 16 | blen--; |
335 | 16 | (*pconsumed)++; |
336 | 16 | } |
337 | | /* now wait for the final LF */ |
338 | 16 | ch->state = CHUNK_STOP; |
339 | 16 | break; |
340 | | |
341 | 16 | case CHUNK_STOP: |
342 | 16 | if(*buf == 0x0a) { |
343 | 16 | blen--; |
344 | 16 | (*pconsumed)++; |
345 | | /* Record the length of any data left in the end of the buffer |
346 | | even if there is no more chunks to read */ |
347 | 16 | ch->datasize = blen; |
348 | 16 | ch->state = CHUNK_DONE; |
349 | 16 | CURL_TRC_WRITE(data, "http_chunk, response complete"); |
350 | 16 | return CURLE_OK; |
351 | 16 | } |
352 | 0 | else { |
353 | 0 | ch->state = CHUNK_FAILED; |
354 | 0 | ch->last_code = CHUNKE_BAD_CHUNK; |
355 | 0 | CURL_TRC_WRITE(data, "http_chunk error, expected 0x0a, seeing 0x%ux", |
356 | 0 | (unsigned int)*buf); |
357 | 0 | return CURLE_RECV_ERROR; |
358 | 0 | } |
359 | 0 | case CHUNK_DONE: |
360 | 0 | return CURLE_OK; |
361 | | |
362 | 0 | case CHUNK_FAILED: |
363 | 0 | return CURLE_RECV_ERROR; |
364 | 112 | } |
365 | | |
366 | 112 | } |
367 | 0 | return CURLE_OK; |
368 | 16 | } |
369 | | |
370 | | static const char *Curl_chunked_strerror(CHUNKcode code) |
371 | 0 | { |
372 | 0 | switch(code) { |
373 | 0 | default: |
374 | 0 | return "OK"; |
375 | 0 | case CHUNKE_TOO_LONG_HEX: |
376 | 0 | return "Too long hexadecimal number"; |
377 | 0 | case CHUNKE_ILLEGAL_HEX: |
378 | 0 | return "Illegal or missing hexadecimal sequence"; |
379 | 0 | case CHUNKE_BAD_CHUNK: |
380 | 0 | return "Malformed encoding found"; |
381 | 0 | case CHUNKE_PASSTHRU_ERROR: |
382 | 0 | return "Error writing data to client"; |
383 | 0 | case CHUNKE_BAD_ENCODING: |
384 | 0 | return "Bad content-encoding found"; |
385 | 0 | case CHUNKE_OUT_OF_MEMORY: |
386 | 0 | return "Out of memory"; |
387 | 0 | } |
388 | 0 | } |
389 | | |
390 | | CURLcode Curl_httpchunk_read(struct Curl_easy *data, |
391 | | struct Curl_chunker *ch, |
392 | | char *buf, size_t blen, |
393 | | size_t *pconsumed) |
394 | 0 | { |
395 | 0 | return httpchunk_readwrite(data, ch, NULL, buf, blen, pconsumed); |
396 | 0 | } |
397 | | |
398 | | struct chunked_writer { |
399 | | struct Curl_cwriter super; |
400 | | struct Curl_chunker ch; |
401 | | }; |
402 | | |
403 | | static CURLcode cw_chunked_init(struct Curl_easy *data, |
404 | | struct Curl_cwriter *writer) |
405 | 16 | { |
406 | 16 | struct chunked_writer *ctx = writer->ctx; |
407 | | |
408 | 16 | data->req.chunk = TRUE; /* chunks coming our way. */ |
409 | 16 | Curl_httpchunk_init(data, &ctx->ch, FALSE); |
410 | 16 | return CURLE_OK; |
411 | 16 | } |
412 | | |
413 | | static void cw_chunked_close(struct Curl_easy *data, |
414 | | struct Curl_cwriter *writer) |
415 | 16 | { |
416 | 16 | struct chunked_writer *ctx = writer->ctx; |
417 | 16 | Curl_httpchunk_free(data, &ctx->ch); |
418 | 16 | } |
419 | | |
420 | | static CURLcode cw_chunked_write(struct Curl_easy *data, |
421 | | struct Curl_cwriter *writer, int type, |
422 | | const char *buf, size_t blen) |
423 | 48 | { |
424 | 48 | struct chunked_writer *ctx = writer->ctx; |
425 | 48 | CURLcode result; |
426 | 48 | size_t consumed; |
427 | | |
428 | 48 | if(!(type & CLIENTWRITE_BODY)) |
429 | 32 | return Curl_cwriter_write(data, writer->next, type, buf, blen); |
430 | | |
431 | 16 | consumed = 0; |
432 | 16 | result = httpchunk_readwrite(data, &ctx->ch, writer->next, buf, blen, |
433 | 16 | &consumed); |
434 | | |
435 | 16 | if(result) { |
436 | 0 | if(CHUNKE_PASSTHRU_ERROR == ctx->ch.last_code) { |
437 | 0 | failf(data, "Failed reading the chunked-encoded stream"); |
438 | 0 | } |
439 | 0 | else { |
440 | 0 | failf(data, "%s in chunked-encoding", |
441 | 0 | Curl_chunked_strerror(ctx->ch.last_code)); |
442 | 0 | } |
443 | 0 | return result; |
444 | 0 | } |
445 | | |
446 | 16 | blen -= consumed; |
447 | 16 | if(CHUNK_DONE == ctx->ch.state) { |
448 | | /* chunks read successfully, download is complete */ |
449 | 16 | data->req.download_done = TRUE; |
450 | 16 | if(blen) { |
451 | 0 | infof(data, "Leftovers after chunking: %zu bytes", blen); |
452 | 0 | } |
453 | 16 | } |
454 | 0 | else if((type & CLIENTWRITE_EOS) && !data->req.no_body) { |
455 | 0 | failf(data, "transfer closed with outstanding read data remaining"); |
456 | 0 | return CURLE_PARTIAL_FILE; |
457 | 0 | } |
458 | | |
459 | 16 | return CURLE_OK; |
460 | 16 | } |
461 | | |
462 | | /* HTTP chunked Transfer-Encoding decoder */ |
463 | | const struct Curl_cwtype Curl_httpchunk_unencoder = { |
464 | | "chunked", |
465 | | NULL, |
466 | | cw_chunked_init, |
467 | | cw_chunked_write, |
468 | | cw_chunked_close, |
469 | | sizeof(struct chunked_writer) |
470 | | }; |
471 | | |
472 | | /* max length of an HTTP chunk that we want to generate */ |
473 | | #define CURL_CHUNKED_MINLEN (1024) |
474 | 0 | #define CURL_CHUNKED_MAXLEN (64 * 1024) |
475 | | |
476 | | struct chunked_reader { |
477 | | struct Curl_creader super; |
478 | | struct bufq chunkbuf; |
479 | | BIT(read_eos); /* we read an EOS from the next reader */ |
480 | | BIT(eos); /* we have returned an EOS */ |
481 | | }; |
482 | | |
483 | | static CURLcode cr_chunked_init(struct Curl_easy *data, |
484 | | struct Curl_creader *reader) |
485 | 0 | { |
486 | 0 | struct chunked_reader *ctx = reader->ctx; |
487 | 0 | (void)data; |
488 | 0 | Curl_bufq_init2(&ctx->chunkbuf, CURL_CHUNKED_MAXLEN, 2, BUFQ_OPT_SOFT_LIMIT); |
489 | 0 | return CURLE_OK; |
490 | 0 | } |
491 | | |
492 | | static void cr_chunked_close(struct Curl_easy *data, |
493 | | struct Curl_creader *reader) |
494 | 0 | { |
495 | 0 | struct chunked_reader *ctx = reader->ctx; |
496 | 0 | (void)data; |
497 | 0 | Curl_bufq_free(&ctx->chunkbuf); |
498 | 0 | } |
499 | | |
500 | | static CURLcode add_last_chunk(struct Curl_easy *data, |
501 | | struct Curl_creader *reader) |
502 | 0 | { |
503 | 0 | struct chunked_reader *ctx = reader->ctx; |
504 | 0 | struct curl_slist *trailers = NULL, *tr; |
505 | 0 | CURLcode result; |
506 | 0 | size_t n; |
507 | 0 | int rc; |
508 | |
|
509 | 0 | if(!data->set.trailer_callback) { |
510 | 0 | CURL_TRC_READ(data, "http_chunk, added last, empty chunk"); |
511 | 0 | return Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("0\r\n\r\n"), &n); |
512 | 0 | } |
513 | | |
514 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("0\r\n"), &n); |
515 | 0 | if(result) |
516 | 0 | goto out; |
517 | | |
518 | 0 | Curl_set_in_callback(data, TRUE); |
519 | 0 | rc = data->set.trailer_callback(&trailers, data->set.trailer_data); |
520 | 0 | Curl_set_in_callback(data, FALSE); |
521 | |
|
522 | 0 | if(rc != CURL_TRAILERFUNC_OK) { |
523 | 0 | failf(data, "operation aborted by trailing headers callback"); |
524 | 0 | result = CURLE_ABORTED_BY_CALLBACK; |
525 | 0 | goto out; |
526 | 0 | } |
527 | | |
528 | 0 | for(tr = trailers; tr; tr = tr->next) { |
529 | | /* only add correctly formatted trailers */ |
530 | 0 | char *ptr = strchr(tr->data, ':'); |
531 | 0 | if(!ptr || *(ptr + 1) != ' ') { |
532 | 0 | infof(data, "Malformatted trailing header, skipping trailer"); |
533 | 0 | continue; |
534 | 0 | } |
535 | | |
536 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, tr->data, |
537 | 0 | strlen(tr->data), &n); |
538 | 0 | if(!result) |
539 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("\r\n"), &n); |
540 | 0 | if(result) |
541 | 0 | goto out; |
542 | 0 | } |
543 | | |
544 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("\r\n"), &n); |
545 | |
|
546 | 0 | out: |
547 | 0 | curl_slist_free_all(trailers); |
548 | 0 | CURL_TRC_READ(data, "http_chunk, added last chunk with trailers " |
549 | 0 | "from client -> %d", result); |
550 | 0 | return result; |
551 | 0 | } |
552 | | |
553 | | static CURLcode add_chunk(struct Curl_easy *data, |
554 | | struct Curl_creader *reader, |
555 | | char *buf, size_t blen) |
556 | 0 | { |
557 | 0 | struct chunked_reader *ctx = reader->ctx; |
558 | 0 | CURLcode result; |
559 | 0 | char tmp[CURL_CHUNKED_MINLEN]; |
560 | 0 | size_t nread; |
561 | 0 | bool eos; |
562 | |
|
563 | 0 | DEBUGASSERT(!ctx->read_eos); |
564 | 0 | blen = CURLMIN(blen, CURL_CHUNKED_MAXLEN); /* respect our buffer pref */ |
565 | 0 | if(blen < sizeof(tmp)) { |
566 | | /* small read, make a chunk of decent size */ |
567 | 0 | buf = tmp; |
568 | 0 | blen = sizeof(tmp); |
569 | 0 | } |
570 | 0 | else { |
571 | | /* larger read, make a chunk that will fit when read back */ |
572 | 0 | blen -= (8 + 2 + 2); /* deduct max overhead, 8 hex + 2*crlf */ |
573 | 0 | } |
574 | |
|
575 | 0 | result = Curl_creader_read(data, reader->next, buf, blen, &nread, &eos); |
576 | 0 | if(result) |
577 | 0 | return result; |
578 | 0 | if(eos) |
579 | 0 | ctx->read_eos = TRUE; |
580 | |
|
581 | 0 | if(nread) { |
582 | | /* actually got bytes, wrap them into the chunkbuf */ |
583 | 0 | char hd[11] = ""; |
584 | 0 | int hdlen; |
585 | 0 | size_t n; |
586 | |
|
587 | 0 | hdlen = msnprintf(hd, sizeof(hd), "%zx\r\n", nread); |
588 | 0 | if(hdlen <= 0) |
589 | 0 | return CURLE_READ_ERROR; |
590 | | /* On a soft-limited bufq, we do not need to check that all was written */ |
591 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, hd, hdlen, &n); |
592 | 0 | if(!result) |
593 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, buf, nread, &n); |
594 | 0 | if(!result) |
595 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, "\r\n", 2, &n); |
596 | 0 | CURL_TRC_READ(data, "http_chunk, made chunk of %zu bytes -> %d", |
597 | 0 | nread, result); |
598 | 0 | if(result) |
599 | 0 | return result; |
600 | 0 | } |
601 | | |
602 | 0 | if(ctx->read_eos) |
603 | 0 | return add_last_chunk(data, reader); |
604 | 0 | return CURLE_OK; |
605 | 0 | } |
606 | | |
607 | | static CURLcode cr_chunked_read(struct Curl_easy *data, |
608 | | struct Curl_creader *reader, |
609 | | char *buf, size_t blen, |
610 | | size_t *pnread, bool *peos) |
611 | 0 | { |
612 | 0 | struct chunked_reader *ctx = reader->ctx; |
613 | 0 | CURLcode result = CURLE_READ_ERROR; |
614 | |
|
615 | 0 | *pnread = 0; |
616 | 0 | *peos = ctx->eos; |
617 | |
|
618 | 0 | if(!ctx->eos) { |
619 | 0 | if(!ctx->read_eos && Curl_bufq_is_empty(&ctx->chunkbuf)) { |
620 | | /* Still getting data form the next reader, buffer is empty */ |
621 | 0 | result = add_chunk(data, reader, buf, blen); |
622 | 0 | if(result) |
623 | 0 | return result; |
624 | 0 | } |
625 | | |
626 | 0 | if(!Curl_bufq_is_empty(&ctx->chunkbuf)) { |
627 | 0 | result = Curl_bufq_cread(&ctx->chunkbuf, buf, blen, pnread); |
628 | 0 | if(!result && ctx->read_eos && Curl_bufq_is_empty(&ctx->chunkbuf)) { |
629 | | /* no more data, read all, done. */ |
630 | 0 | ctx->eos = TRUE; |
631 | 0 | *peos = TRUE; |
632 | 0 | } |
633 | 0 | return result; |
634 | 0 | } |
635 | 0 | } |
636 | | /* We may get here, because we are done or because callbacks paused */ |
637 | 0 | DEBUGASSERT(ctx->eos || !ctx->read_eos); |
638 | 0 | return CURLE_OK; |
639 | 0 | } |
640 | | |
641 | | static curl_off_t cr_chunked_total_length(struct Curl_easy *data, |
642 | | struct Curl_creader *reader) |
643 | 0 | { |
644 | | /* this reader changes length depending on input */ |
645 | 0 | (void)data; |
646 | 0 | (void)reader; |
647 | 0 | return -1; |
648 | 0 | } |
649 | | |
650 | | /* HTTP chunked Transfer-Encoding encoder */ |
651 | | const struct Curl_crtype Curl_httpchunk_encoder = { |
652 | | "chunked", |
653 | | cr_chunked_init, |
654 | | cr_chunked_read, |
655 | | cr_chunked_close, |
656 | | Curl_creader_def_needs_rewind, |
657 | | cr_chunked_total_length, |
658 | | Curl_creader_def_resume_from, |
659 | | Curl_creader_def_rewind, |
660 | | Curl_creader_def_unpause, |
661 | | Curl_creader_def_is_paused, |
662 | | Curl_creader_def_done, |
663 | | sizeof(struct chunked_reader) |
664 | | }; |
665 | | |
666 | | CURLcode Curl_httpchunk_add_reader(struct Curl_easy *data) |
667 | 0 | { |
668 | 0 | struct Curl_creader *reader = NULL; |
669 | 0 | CURLcode result; |
670 | |
|
671 | 0 | result = Curl_creader_create(&reader, data, &Curl_httpchunk_encoder, |
672 | 0 | CURL_CR_TRANSFER_ENCODE); |
673 | 0 | if(!result) |
674 | 0 | result = Curl_creader_add(data, reader); |
675 | |
|
676 | 0 | if(result && reader) |
677 | 0 | Curl_creader_free(data, reader); |
678 | 0 | return result; |
679 | 0 | } |
680 | | |
681 | | #endif /* CURL_DISABLE_HTTP */ |