/src/PROJ/curl/lib/http_chunks.c
Line | Count | Source |
1 | | /*************************************************************************** |
2 | | * _ _ ____ _ |
3 | | * Project ___| | | | _ \| | |
4 | | * / __| | | | |_) | | |
5 | | * | (__| |_| | _ <| |___ |
6 | | * \___|\___/|_| \_\_____| |
7 | | * |
8 | | * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. |
9 | | * |
10 | | * This software is licensed as described in the file COPYING, which |
11 | | * you should have received as part of this distribution. The terms |
12 | | * are also available at https://curl.se/docs/copyright.html. |
13 | | * |
14 | | * You may opt to use, copy, modify, merge, publish, distribute and/or sell |
15 | | * copies of the Software, and permit persons to whom the Software is |
16 | | * furnished to do so, under the terms of the COPYING file. |
17 | | * |
18 | | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
19 | | * KIND, either express or implied. |
20 | | * |
21 | | * SPDX-License-Identifier: curl |
22 | | * |
23 | | ***************************************************************************/ |
24 | | |
25 | | #include "curl_setup.h" |
26 | | |
27 | | #ifndef CURL_DISABLE_HTTP |
28 | | |
29 | | #include "urldata.h" /* it includes http_chunks.h */ |
30 | | #include "curl_trc.h" |
31 | | #include "sendf.h" /* for the client write stuff */ |
32 | | #include "curlx/dynbuf.h" |
33 | | #include "content_encoding.h" |
34 | | #include "http.h" |
35 | | #include "multiif.h" |
36 | | #include "curlx/strparse.h" |
37 | | #include "curlx/warnless.h" |
38 | | |
39 | | /* The last #include files should be: */ |
40 | | #include "curl_memory.h" |
41 | | #include "memdebug.h" |
42 | | |
43 | | /* |
44 | | * Chunk format (simplified): |
45 | | * |
46 | | * <HEX SIZE>[ chunk extension ] CRLF |
47 | | * <DATA> CRLF |
48 | | * |
49 | | * Highlights from RFC2616 section 3.6 say: |
50 | | |
51 | | The chunked encoding modifies the body of a message in order to |
52 | | transfer it as a series of chunks, each with its own size indicator, |
53 | | followed by an OPTIONAL trailer containing entity-header fields. This |
54 | | allows dynamically produced content to be transferred along with the |
55 | | information necessary for the recipient to verify that it has |
56 | | received the full message. |
57 | | |
58 | | Chunked-Body = *chunk |
59 | | last-chunk |
60 | | trailer |
61 | | CRLF |
62 | | |
63 | | chunk = chunk-size [ chunk-extension ] CRLF |
64 | | chunk-data CRLF |
65 | | chunk-size = 1*HEX |
66 | | last-chunk = 1*("0") [ chunk-extension ] CRLF |
67 | | |
68 | | chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) |
69 | | chunk-ext-name = token |
70 | | chunk-ext-val = token | quoted-string |
71 | | chunk-data = chunk-size(OCTET) |
72 | | trailer = *(entity-header CRLF) |
73 | | |
74 | | The chunk-size field is a string of hex digits indicating the size of |
75 | | the chunk. The chunked encoding is ended by any chunk whose size is |
76 | | zero, followed by the trailer, which is terminated by an empty line. |
77 | | |
78 | | */ |
79 | | |
80 | | void Curl_httpchunk_init(struct Curl_easy *data, struct Curl_chunker *ch, |
81 | | bool ignore_body) |
82 | 0 | { |
83 | 0 | (void)data; |
84 | 0 | ch->hexindex = 0; /* start at 0 */ |
85 | 0 | ch->state = CHUNK_HEX; /* we get hex first! */ |
86 | 0 | ch->last_code = CHUNKE_OK; |
87 | 0 | curlx_dyn_init(&ch->trailer, DYN_H1_TRAILER); |
88 | 0 | ch->ignore_body = ignore_body; |
89 | 0 | } |
90 | | |
91 | | void Curl_httpchunk_reset(struct Curl_easy *data, struct Curl_chunker *ch, |
92 | | bool ignore_body) |
93 | 0 | { |
94 | 0 | (void)data; |
95 | 0 | ch->hexindex = 0; /* start at 0 */ |
96 | 0 | ch->state = CHUNK_HEX; /* we get hex first! */ |
97 | 0 | ch->last_code = CHUNKE_OK; |
98 | 0 | curlx_dyn_reset(&ch->trailer); |
99 | 0 | ch->ignore_body = ignore_body; |
100 | 0 | } |
101 | | |
102 | | void Curl_httpchunk_free(struct Curl_easy *data, struct Curl_chunker *ch) |
103 | 0 | { |
104 | 0 | (void)data; |
105 | 0 | curlx_dyn_free(&ch->trailer); |
106 | 0 | } |
107 | | |
108 | | bool Curl_httpchunk_is_done(struct Curl_easy *data, struct Curl_chunker *ch) |
109 | 0 | { |
110 | 0 | (void)data; |
111 | 0 | return ch->state == CHUNK_DONE; |
112 | 0 | } |
113 | | |
114 | | static CURLcode httpchunk_readwrite(struct Curl_easy *data, |
115 | | struct Curl_chunker *ch, |
116 | | struct Curl_cwriter *cw_next, |
117 | | const char *buf, size_t blen, |
118 | | size_t *pconsumed) |
119 | 0 | { |
120 | 0 | CURLcode result = CURLE_OK; |
121 | 0 | size_t piece; |
122 | |
|
123 | 0 | *pconsumed = 0; /* nothing's written yet */ |
124 | | /* first check terminal states that will not progress anywhere */ |
125 | 0 | if(ch->state == CHUNK_DONE) |
126 | 0 | return CURLE_OK; |
127 | 0 | if(ch->state == CHUNK_FAILED) |
128 | 0 | return CURLE_RECV_ERROR; |
129 | | |
130 | | /* the original data is written to the client, but we go on with the |
131 | | chunk read process, to properly calculate the content length */ |
132 | 0 | if(data->set.http_te_skip && !ch->ignore_body) { |
133 | 0 | if(cw_next) |
134 | 0 | result = Curl_cwriter_write(data, cw_next, CLIENTWRITE_BODY, buf, blen); |
135 | 0 | else |
136 | 0 | result = Curl_client_write(data, CLIENTWRITE_BODY, buf, blen); |
137 | 0 | if(result) { |
138 | 0 | ch->state = CHUNK_FAILED; |
139 | 0 | ch->last_code = CHUNKE_PASSTHRU_ERROR; |
140 | 0 | return result; |
141 | 0 | } |
142 | 0 | } |
143 | | |
144 | 0 | while(blen) { |
145 | 0 | switch(ch->state) { |
146 | 0 | case CHUNK_HEX: |
147 | 0 | if(ISXDIGIT(*buf)) { |
148 | 0 | if(ch->hexindex >= CHUNK_MAXNUM_LEN) { |
149 | 0 | failf(data, "chunk hex-length longer than %d", CHUNK_MAXNUM_LEN); |
150 | 0 | ch->state = CHUNK_FAILED; |
151 | 0 | ch->last_code = CHUNKE_TOO_LONG_HEX; /* longer than we support */ |
152 | 0 | return CURLE_RECV_ERROR; |
153 | 0 | } |
154 | 0 | ch->hexbuffer[ch->hexindex++] = *buf; |
155 | 0 | buf++; |
156 | 0 | blen--; |
157 | 0 | (*pconsumed)++; |
158 | 0 | } |
159 | 0 | else { |
160 | 0 | const char *p; |
161 | 0 | if(ch->hexindex == 0) { |
162 | | /* This is illegal data, we received junk where we expected |
163 | | a hexadecimal digit. */ |
164 | 0 | failf(data, "chunk hex-length char not a hex digit: 0x%x", *buf); |
165 | 0 | ch->state = CHUNK_FAILED; |
166 | 0 | ch->last_code = CHUNKE_ILLEGAL_HEX; |
167 | 0 | return CURLE_RECV_ERROR; |
168 | 0 | } |
169 | | /* blen and buf are unmodified */ |
170 | 0 | ch->hexbuffer[ch->hexindex] = 0; |
171 | 0 | p = &ch->hexbuffer[0]; |
172 | 0 | if(curlx_str_hex(&p, &ch->datasize, CURL_OFF_T_MAX)) { |
173 | 0 | failf(data, "invalid chunk size: '%s'", ch->hexbuffer); |
174 | 0 | ch->state = CHUNK_FAILED; |
175 | 0 | ch->last_code = CHUNKE_ILLEGAL_HEX; |
176 | 0 | return CURLE_RECV_ERROR; |
177 | 0 | } |
178 | 0 | ch->state = CHUNK_LF; /* now wait for the CRLF */ |
179 | 0 | } |
180 | 0 | break; |
181 | | |
182 | 0 | case CHUNK_LF: |
183 | | /* waiting for the LF after a chunk size */ |
184 | 0 | if(*buf == 0x0a) { |
185 | | /* we are now expecting data to come, unless size was zero! */ |
186 | 0 | if(ch->datasize == 0) { |
187 | 0 | ch->state = CHUNK_TRAILER; /* now check for trailers */ |
188 | 0 | } |
189 | 0 | else { |
190 | 0 | ch->state = CHUNK_DATA; |
191 | 0 | CURL_TRC_WRITE(data, "http_chunked, chunk start of %" |
192 | 0 | FMT_OFF_T " bytes", ch->datasize); |
193 | 0 | } |
194 | 0 | } |
195 | |
|
196 | 0 | buf++; |
197 | 0 | blen--; |
198 | 0 | (*pconsumed)++; |
199 | 0 | break; |
200 | | |
201 | 0 | case CHUNK_DATA: |
202 | | /* We expect 'datasize' of data. We have 'blen' right now, it can be |
203 | | more or less than 'datasize'. Get the smallest piece. |
204 | | */ |
205 | 0 | piece = blen; |
206 | 0 | if(ch->datasize < (curl_off_t)blen) |
207 | 0 | piece = curlx_sotouz(ch->datasize); |
208 | | |
209 | | /* Write the data portion available */ |
210 | 0 | if(!data->set.http_te_skip && !ch->ignore_body) { |
211 | 0 | if(cw_next) |
212 | 0 | result = Curl_cwriter_write(data, cw_next, CLIENTWRITE_BODY, |
213 | 0 | buf, piece); |
214 | 0 | else |
215 | 0 | result = Curl_client_write(data, CLIENTWRITE_BODY, buf, piece); |
216 | 0 | if(result) { |
217 | 0 | ch->state = CHUNK_FAILED; |
218 | 0 | ch->last_code = CHUNKE_PASSTHRU_ERROR; |
219 | 0 | return result; |
220 | 0 | } |
221 | 0 | } |
222 | | |
223 | 0 | *pconsumed += piece; |
224 | 0 | ch->datasize -= piece; /* decrease amount left to expect */ |
225 | 0 | buf += piece; /* move read pointer forward */ |
226 | 0 | blen -= piece; /* decrease space left in this round */ |
227 | 0 | CURL_TRC_WRITE(data, "http_chunked, write %zu body bytes, %" |
228 | 0 | FMT_OFF_T " bytes in chunk remain", |
229 | 0 | piece, ch->datasize); |
230 | |
|
231 | 0 | if(ch->datasize == 0) |
232 | | /* end of data this round, we now expect a trailing CRLF */ |
233 | 0 | ch->state = CHUNK_POSTLF; |
234 | 0 | break; |
235 | | |
236 | 0 | case CHUNK_POSTLF: |
237 | 0 | if(*buf == 0x0a) { |
238 | | /* The last one before we go back to hex state and start all over. */ |
239 | 0 | Curl_httpchunk_reset(data, ch, ch->ignore_body); |
240 | 0 | } |
241 | 0 | else if(*buf != 0x0d) { |
242 | 0 | ch->state = CHUNK_FAILED; |
243 | 0 | ch->last_code = CHUNKE_BAD_CHUNK; |
244 | 0 | return CURLE_RECV_ERROR; |
245 | 0 | } |
246 | 0 | buf++; |
247 | 0 | blen--; |
248 | 0 | (*pconsumed)++; |
249 | 0 | break; |
250 | | |
251 | 0 | case CHUNK_TRAILER: |
252 | 0 | if((*buf == 0x0d) || (*buf == 0x0a)) { |
253 | 0 | char *tr = curlx_dyn_ptr(&ch->trailer); |
254 | | /* this is the end of a trailer, but if the trailer was zero bytes |
255 | | there was no trailer and we move on */ |
256 | |
|
257 | 0 | if(tr) { |
258 | 0 | result = curlx_dyn_addn(&ch->trailer, STRCONST("\x0d\x0a")); |
259 | 0 | if(result) { |
260 | 0 | ch->state = CHUNK_FAILED; |
261 | 0 | ch->last_code = CHUNKE_OUT_OF_MEMORY; |
262 | 0 | return result; |
263 | 0 | } |
264 | 0 | tr = curlx_dyn_ptr(&ch->trailer); |
265 | 0 | if(!data->set.http_te_skip) { |
266 | 0 | size_t trlen = curlx_dyn_len(&ch->trailer); |
267 | 0 | if(cw_next) |
268 | 0 | result = Curl_cwriter_write(data, cw_next, |
269 | 0 | CLIENTWRITE_HEADER| |
270 | 0 | CLIENTWRITE_TRAILER, |
271 | 0 | tr, trlen); |
272 | 0 | else |
273 | 0 | result = Curl_client_write(data, |
274 | 0 | CLIENTWRITE_HEADER| |
275 | 0 | CLIENTWRITE_TRAILER, |
276 | 0 | tr, trlen); |
277 | 0 | if(result) { |
278 | 0 | ch->state = CHUNK_FAILED; |
279 | 0 | ch->last_code = CHUNKE_PASSTHRU_ERROR; |
280 | 0 | return result; |
281 | 0 | } |
282 | 0 | } |
283 | 0 | curlx_dyn_reset(&ch->trailer); |
284 | 0 | ch->state = CHUNK_TRAILER_CR; |
285 | 0 | if(*buf == 0x0a) |
286 | | /* already on the LF */ |
287 | 0 | break; |
288 | 0 | } |
289 | 0 | else { |
290 | | /* no trailer, we are on the final CRLF pair */ |
291 | 0 | ch->state = CHUNK_TRAILER_POSTCR; |
292 | 0 | break; /* do not advance the pointer */ |
293 | 0 | } |
294 | 0 | } |
295 | 0 | else { |
296 | 0 | result = curlx_dyn_addn(&ch->trailer, buf, 1); |
297 | 0 | if(result) { |
298 | 0 | ch->state = CHUNK_FAILED; |
299 | 0 | ch->last_code = CHUNKE_OUT_OF_MEMORY; |
300 | 0 | return result; |
301 | 0 | } |
302 | 0 | } |
303 | 0 | buf++; |
304 | 0 | blen--; |
305 | 0 | (*pconsumed)++; |
306 | 0 | break; |
307 | | |
308 | 0 | case CHUNK_TRAILER_CR: |
309 | 0 | if(*buf == 0x0a) { |
310 | 0 | ch->state = CHUNK_TRAILER_POSTCR; |
311 | 0 | buf++; |
312 | 0 | blen--; |
313 | 0 | (*pconsumed)++; |
314 | 0 | } |
315 | 0 | else { |
316 | 0 | ch->state = CHUNK_FAILED; |
317 | 0 | ch->last_code = CHUNKE_BAD_CHUNK; |
318 | 0 | return CURLE_RECV_ERROR; |
319 | 0 | } |
320 | 0 | break; |
321 | | |
322 | 0 | case CHUNK_TRAILER_POSTCR: |
323 | | /* We enter this state when a CR should arrive so we expect to |
324 | | have to first pass a CR before we wait for LF */ |
325 | 0 | if((*buf != 0x0d) && (*buf != 0x0a)) { |
326 | | /* not a CR then it must be another header in the trailer */ |
327 | 0 | ch->state = CHUNK_TRAILER; |
328 | 0 | break; |
329 | 0 | } |
330 | 0 | if(*buf == 0x0d) { |
331 | | /* skip if CR */ |
332 | 0 | buf++; |
333 | 0 | blen--; |
334 | 0 | (*pconsumed)++; |
335 | 0 | } |
336 | | /* now wait for the final LF */ |
337 | 0 | ch->state = CHUNK_STOP; |
338 | 0 | break; |
339 | | |
340 | 0 | case CHUNK_STOP: |
341 | 0 | if(*buf == 0x0a) { |
342 | 0 | blen--; |
343 | 0 | (*pconsumed)++; |
344 | | /* Record the length of any data left in the end of the buffer |
345 | | even if there is no more chunks to read */ |
346 | 0 | ch->datasize = blen; |
347 | 0 | ch->state = CHUNK_DONE; |
348 | 0 | CURL_TRC_WRITE(data, "http_chunk, response complete"); |
349 | 0 | return CURLE_OK; |
350 | 0 | } |
351 | 0 | else { |
352 | 0 | ch->state = CHUNK_FAILED; |
353 | 0 | ch->last_code = CHUNKE_BAD_CHUNK; |
354 | 0 | CURL_TRC_WRITE(data, "http_chunk error, expected 0x0a, seeing 0x%ux", |
355 | 0 | (unsigned int)*buf); |
356 | 0 | return CURLE_RECV_ERROR; |
357 | 0 | } |
358 | 0 | case CHUNK_DONE: |
359 | 0 | return CURLE_OK; |
360 | | |
361 | 0 | case CHUNK_FAILED: |
362 | 0 | return CURLE_RECV_ERROR; |
363 | 0 | } |
364 | |
|
365 | 0 | } |
366 | 0 | return CURLE_OK; |
367 | 0 | } |
368 | | |
369 | | static const char *Curl_chunked_strerror(CHUNKcode code) |
370 | 0 | { |
371 | 0 | switch(code) { |
372 | 0 | default: |
373 | 0 | return "OK"; |
374 | 0 | case CHUNKE_TOO_LONG_HEX: |
375 | 0 | return "Too long hexadecimal number"; |
376 | 0 | case CHUNKE_ILLEGAL_HEX: |
377 | 0 | return "Illegal or missing hexadecimal sequence"; |
378 | 0 | case CHUNKE_BAD_CHUNK: |
379 | 0 | return "Malformed encoding found"; |
380 | 0 | case CHUNKE_PASSTHRU_ERROR: |
381 | 0 | return "Error writing data to client"; |
382 | 0 | case CHUNKE_BAD_ENCODING: |
383 | 0 | return "Bad content-encoding found"; |
384 | 0 | case CHUNKE_OUT_OF_MEMORY: |
385 | 0 | return "Out of memory"; |
386 | 0 | } |
387 | 0 | } |
388 | | |
389 | | CURLcode Curl_httpchunk_read(struct Curl_easy *data, |
390 | | struct Curl_chunker *ch, |
391 | | char *buf, size_t blen, |
392 | | size_t *pconsumed) |
393 | 0 | { |
394 | 0 | return httpchunk_readwrite(data, ch, NULL, buf, blen, pconsumed); |
395 | 0 | } |
396 | | |
397 | | struct chunked_writer { |
398 | | struct Curl_cwriter super; |
399 | | struct Curl_chunker ch; |
400 | | }; |
401 | | |
402 | | static CURLcode cw_chunked_init(struct Curl_easy *data, |
403 | | struct Curl_cwriter *writer) |
404 | 0 | { |
405 | 0 | struct chunked_writer *ctx = writer->ctx; |
406 | |
|
407 | 0 | data->req.chunk = TRUE; /* chunks coming our way. */ |
408 | 0 | Curl_httpchunk_init(data, &ctx->ch, FALSE); |
409 | 0 | return CURLE_OK; |
410 | 0 | } |
411 | | |
412 | | static void cw_chunked_close(struct Curl_easy *data, |
413 | | struct Curl_cwriter *writer) |
414 | 0 | { |
415 | 0 | struct chunked_writer *ctx = writer->ctx; |
416 | 0 | Curl_httpchunk_free(data, &ctx->ch); |
417 | 0 | } |
418 | | |
419 | | static CURLcode cw_chunked_write(struct Curl_easy *data, |
420 | | struct Curl_cwriter *writer, int type, |
421 | | const char *buf, size_t blen) |
422 | 0 | { |
423 | 0 | struct chunked_writer *ctx = writer->ctx; |
424 | 0 | CURLcode result; |
425 | 0 | size_t consumed; |
426 | |
|
427 | 0 | if(!(type & CLIENTWRITE_BODY)) |
428 | 0 | return Curl_cwriter_write(data, writer->next, type, buf, blen); |
429 | | |
430 | 0 | consumed = 0; |
431 | 0 | result = httpchunk_readwrite(data, &ctx->ch, writer->next, buf, blen, |
432 | 0 | &consumed); |
433 | |
|
434 | 0 | if(result) { |
435 | 0 | if(CHUNKE_PASSTHRU_ERROR == ctx->ch.last_code) { |
436 | 0 | failf(data, "Failed reading the chunked-encoded stream"); |
437 | 0 | } |
438 | 0 | else { |
439 | 0 | failf(data, "%s in chunked-encoding", |
440 | 0 | Curl_chunked_strerror(ctx->ch.last_code)); |
441 | 0 | } |
442 | 0 | return result; |
443 | 0 | } |
444 | | |
445 | 0 | blen -= consumed; |
446 | 0 | if(CHUNK_DONE == ctx->ch.state) { |
447 | | /* chunks read successfully, download is complete */ |
448 | 0 | data->req.download_done = TRUE; |
449 | 0 | if(blen) { |
450 | 0 | infof(data, "Leftovers after chunking: %zu bytes", blen); |
451 | 0 | } |
452 | 0 | } |
453 | 0 | else if((type & CLIENTWRITE_EOS) && !data->req.no_body) { |
454 | 0 | failf(data, "transfer closed with outstanding read data remaining"); |
455 | 0 | return CURLE_PARTIAL_FILE; |
456 | 0 | } |
457 | | |
458 | 0 | return CURLE_OK; |
459 | 0 | } |
460 | | |
461 | | /* HTTP chunked Transfer-Encoding decoder */ |
462 | | const struct Curl_cwtype Curl_httpchunk_unencoder = { |
463 | | "chunked", |
464 | | NULL, |
465 | | cw_chunked_init, |
466 | | cw_chunked_write, |
467 | | cw_chunked_close, |
468 | | sizeof(struct chunked_writer) |
469 | | }; |
470 | | |
471 | | /* max length of an HTTP chunk that we want to generate */ |
472 | | #define CURL_CHUNKED_MINLEN (1024) |
473 | 0 | #define CURL_CHUNKED_MAXLEN (64 * 1024) |
474 | | |
475 | | struct chunked_reader { |
476 | | struct Curl_creader super; |
477 | | struct bufq chunkbuf; |
478 | | BIT(read_eos); /* we read an EOS from the next reader */ |
479 | | BIT(eos); /* we have returned an EOS */ |
480 | | }; |
481 | | |
482 | | static CURLcode cr_chunked_init(struct Curl_easy *data, |
483 | | struct Curl_creader *reader) |
484 | 0 | { |
485 | 0 | struct chunked_reader *ctx = reader->ctx; |
486 | 0 | (void)data; |
487 | 0 | Curl_bufq_init2(&ctx->chunkbuf, CURL_CHUNKED_MAXLEN, 2, BUFQ_OPT_SOFT_LIMIT); |
488 | 0 | return CURLE_OK; |
489 | 0 | } |
490 | | |
491 | | static void cr_chunked_close(struct Curl_easy *data, |
492 | | struct Curl_creader *reader) |
493 | 0 | { |
494 | 0 | struct chunked_reader *ctx = reader->ctx; |
495 | 0 | (void)data; |
496 | 0 | Curl_bufq_free(&ctx->chunkbuf); |
497 | 0 | } |
498 | | |
499 | | static CURLcode add_last_chunk(struct Curl_easy *data, |
500 | | struct Curl_creader *reader) |
501 | 0 | { |
502 | 0 | struct chunked_reader *ctx = reader->ctx; |
503 | 0 | struct curl_slist *trailers = NULL, *tr; |
504 | 0 | CURLcode result; |
505 | 0 | size_t n; |
506 | 0 | int rc; |
507 | |
|
508 | 0 | if(!data->set.trailer_callback) { |
509 | 0 | CURL_TRC_READ(data, "http_chunk, added last, empty chunk"); |
510 | 0 | return Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("0\r\n\r\n"), &n); |
511 | 0 | } |
512 | | |
513 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("0\r\n"), &n); |
514 | 0 | if(result) |
515 | 0 | goto out; |
516 | | |
517 | 0 | Curl_set_in_callback(data, TRUE); |
518 | 0 | rc = data->set.trailer_callback(&trailers, data->set.trailer_data); |
519 | 0 | Curl_set_in_callback(data, FALSE); |
520 | |
|
521 | 0 | if(rc != CURL_TRAILERFUNC_OK) { |
522 | 0 | failf(data, "operation aborted by trailing headers callback"); |
523 | 0 | result = CURLE_ABORTED_BY_CALLBACK; |
524 | 0 | goto out; |
525 | 0 | } |
526 | | |
527 | 0 | for(tr = trailers; tr; tr = tr->next) { |
528 | | /* only add correctly formatted trailers */ |
529 | 0 | char *ptr = strchr(tr->data, ':'); |
530 | 0 | if(!ptr || *(ptr + 1) != ' ') { |
531 | 0 | infof(data, "Malformatted trailing header, skipping trailer"); |
532 | 0 | continue; |
533 | 0 | } |
534 | | |
535 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, tr->data, |
536 | 0 | strlen(tr->data), &n); |
537 | 0 | if(!result) |
538 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("\r\n"), &n); |
539 | 0 | if(result) |
540 | 0 | goto out; |
541 | 0 | } |
542 | | |
543 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("\r\n"), &n); |
544 | |
|
545 | 0 | out: |
546 | 0 | curl_slist_free_all(trailers); |
547 | 0 | CURL_TRC_READ(data, "http_chunk, added last chunk with trailers " |
548 | 0 | "from client -> %d", result); |
549 | 0 | return result; |
550 | 0 | } |
551 | | |
552 | | static CURLcode add_chunk(struct Curl_easy *data, |
553 | | struct Curl_creader *reader, |
554 | | char *buf, size_t blen) |
555 | 0 | { |
556 | 0 | struct chunked_reader *ctx = reader->ctx; |
557 | 0 | CURLcode result; |
558 | 0 | char tmp[CURL_CHUNKED_MINLEN]; |
559 | 0 | size_t nread; |
560 | 0 | bool eos; |
561 | |
|
562 | 0 | DEBUGASSERT(!ctx->read_eos); |
563 | 0 | blen = CURLMIN(blen, CURL_CHUNKED_MAXLEN); /* respect our buffer pref */ |
564 | 0 | if(blen < sizeof(tmp)) { |
565 | | /* small read, make a chunk of decent size */ |
566 | 0 | buf = tmp; |
567 | 0 | blen = sizeof(tmp); |
568 | 0 | } |
569 | 0 | else { |
570 | | /* larger read, make a chunk that will fit when read back */ |
571 | 0 | blen -= (8 + 2 + 2); /* deduct max overhead, 8 hex + 2*crlf */ |
572 | 0 | } |
573 | |
|
574 | 0 | result = Curl_creader_read(data, reader->next, buf, blen, &nread, &eos); |
575 | 0 | if(result) |
576 | 0 | return result; |
577 | 0 | if(eos) |
578 | 0 | ctx->read_eos = TRUE; |
579 | |
|
580 | 0 | if(nread) { |
581 | | /* actually got bytes, wrap them into the chunkbuf */ |
582 | 0 | char hd[11] = ""; |
583 | 0 | int hdlen; |
584 | 0 | size_t n; |
585 | |
|
586 | 0 | hdlen = curl_msnprintf(hd, sizeof(hd), "%zx\r\n", nread); |
587 | 0 | if(hdlen <= 0) |
588 | 0 | return CURLE_READ_ERROR; |
589 | | /* On a soft-limited bufq, we do not need to check that all was written */ |
590 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, hd, hdlen, &n); |
591 | 0 | if(!result) |
592 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, buf, nread, &n); |
593 | 0 | if(!result) |
594 | 0 | result = Curl_bufq_cwrite(&ctx->chunkbuf, "\r\n", 2, &n); |
595 | 0 | CURL_TRC_READ(data, "http_chunk, made chunk of %zu bytes -> %d", |
596 | 0 | nread, result); |
597 | 0 | if(result) |
598 | 0 | return result; |
599 | 0 | } |
600 | | |
601 | 0 | if(ctx->read_eos) |
602 | 0 | return add_last_chunk(data, reader); |
603 | 0 | return CURLE_OK; |
604 | 0 | } |
605 | | |
606 | | static CURLcode cr_chunked_read(struct Curl_easy *data, |
607 | | struct Curl_creader *reader, |
608 | | char *buf, size_t blen, |
609 | | size_t *pnread, bool *peos) |
610 | 0 | { |
611 | 0 | struct chunked_reader *ctx = reader->ctx; |
612 | 0 | CURLcode result = CURLE_READ_ERROR; |
613 | |
|
614 | 0 | *pnread = 0; |
615 | 0 | *peos = ctx->eos; |
616 | |
|
617 | 0 | if(!ctx->eos) { |
618 | 0 | if(!ctx->read_eos && Curl_bufq_is_empty(&ctx->chunkbuf)) { |
619 | | /* Still getting data form the next reader, buffer is empty */ |
620 | 0 | result = add_chunk(data, reader, buf, blen); |
621 | 0 | if(result) |
622 | 0 | return result; |
623 | 0 | } |
624 | | |
625 | 0 | if(!Curl_bufq_is_empty(&ctx->chunkbuf)) { |
626 | 0 | result = Curl_bufq_cread(&ctx->chunkbuf, buf, blen, pnread); |
627 | 0 | if(!result && ctx->read_eos && Curl_bufq_is_empty(&ctx->chunkbuf)) { |
628 | | /* no more data, read all, done. */ |
629 | 0 | ctx->eos = TRUE; |
630 | 0 | *peos = TRUE; |
631 | 0 | } |
632 | 0 | return result; |
633 | 0 | } |
634 | 0 | } |
635 | | /* We may get here, because we are done or because callbacks paused */ |
636 | 0 | DEBUGASSERT(ctx->eos || !ctx->read_eos); |
637 | 0 | return CURLE_OK; |
638 | 0 | } |
639 | | |
640 | | static curl_off_t cr_chunked_total_length(struct Curl_easy *data, |
641 | | struct Curl_creader *reader) |
642 | 0 | { |
643 | | /* this reader changes length depending on input */ |
644 | 0 | (void)data; |
645 | 0 | (void)reader; |
646 | 0 | return -1; |
647 | 0 | } |
648 | | |
649 | | /* HTTP chunked Transfer-Encoding encoder */ |
650 | | const struct Curl_crtype Curl_httpchunk_encoder = { |
651 | | "chunked", |
652 | | cr_chunked_init, |
653 | | cr_chunked_read, |
654 | | cr_chunked_close, |
655 | | Curl_creader_def_needs_rewind, |
656 | | cr_chunked_total_length, |
657 | | Curl_creader_def_resume_from, |
658 | | Curl_creader_def_cntrl, |
659 | | Curl_creader_def_is_paused, |
660 | | Curl_creader_def_done, |
661 | | sizeof(struct chunked_reader) |
662 | | }; |
663 | | |
664 | | CURLcode Curl_httpchunk_add_reader(struct Curl_easy *data) |
665 | 0 | { |
666 | 0 | struct Curl_creader *reader = NULL; |
667 | 0 | CURLcode result; |
668 | |
|
669 | 0 | result = Curl_creader_create(&reader, data, &Curl_httpchunk_encoder, |
670 | 0 | CURL_CR_TRANSFER_ENCODE); |
671 | 0 | if(!result) |
672 | 0 | result = Curl_creader_add(data, reader); |
673 | |
|
674 | 0 | if(result && reader) |
675 | 0 | Curl_creader_free(data, reader); |
676 | 0 | return result; |
677 | 0 | } |
678 | | |
679 | | #endif /* CURL_DISABLE_HTTP */ |