Line | Count | Source (jump to first uncovered line) |
1 | | /*************************************************************************** |
2 | | * _ _ ____ _ |
3 | | * Project ___| | | | _ \| | |
4 | | * / __| | | | |_) | | |
5 | | * | (__| |_| | _ <| |___ |
6 | | * \___|\___/|_| \_\_____| |
7 | | * |
8 | | * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. |
9 | | * |
10 | | * This software is licensed as described in the file COPYING, which |
11 | | * you should have received as part of this distribution. The terms |
12 | | * are also available at https://curl.se/docs/copyright.html. |
13 | | * |
14 | | * You may opt to use, copy, modify, merge, publish, distribute and/or sell |
15 | | * copies of the Software, and permit persons to whom the Software is |
16 | | * furnished to do so, under the terms of the COPYING file. |
17 | | * |
18 | | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
19 | | * KIND, either express or implied. |
20 | | * |
21 | | * SPDX-License-Identifier: curl |
22 | | * |
23 | | ***************************************************************************/ |
24 | | |
25 | | #include "curl_setup.h" |
26 | | |
27 | | #ifdef HAVE_NETINET_IN_H |
28 | | #include <netinet/in.h> |
29 | | #endif |
30 | | |
31 | | #ifdef HAVE_LINUX_TCP_H |
32 | | #include <linux/tcp.h> |
33 | | #elif defined(HAVE_NETINET_TCP_H) |
34 | | #include <netinet/tcp.h> |
35 | | #endif |
36 | | |
37 | | #include <curl/curl.h> |
38 | | |
39 | | #include "urldata.h" |
40 | | #include "sendf.h" |
41 | | #include "cfilters.h" |
42 | | #include "connect.h" |
43 | | #include "vtls/vtls.h" |
44 | | #include "vssh/ssh.h" |
45 | | #include "easyif.h" |
46 | | #include "multiif.h" |
47 | | #include "strerror.h" |
48 | | #include "select.h" |
49 | | #include "strdup.h" |
50 | | #include "http2.h" |
51 | | #include "headers.h" |
52 | | #include "ws.h" |
53 | | |
54 | | /* The last 3 #include files should be in this order */ |
55 | | #include "curl_printf.h" |
56 | | #include "curl_memory.h" |
57 | | #include "memdebug.h" |
58 | | |
59 | | #if defined(CURL_DO_LINEEND_CONV) && !defined(CURL_DISABLE_FTP) |
60 | | /* |
61 | | * convert_lineends() changes CRLF (\r\n) end-of-line markers to a single LF |
62 | | * (\n), with special processing for CRLF sequences that are split between two |
63 | | * blocks of data. Remaining, bare CRs are changed to LFs. The possibly new |
64 | | * size of the data is returned. |
65 | | */ |
66 | | static size_t convert_lineends(struct Curl_easy *data, |
67 | | char *startPtr, size_t size) |
68 | 0 | { |
69 | 0 | char *inPtr, *outPtr; |
70 | | |
71 | | /* sanity check */ |
72 | 0 | if(!startPtr || (size < 1)) { |
73 | 0 | return size; |
74 | 0 | } |
75 | | |
76 | 0 | if(data->state.prev_block_had_trailing_cr) { |
77 | | /* The previous block of incoming data |
78 | | had a trailing CR, which was turned into a LF. */ |
79 | 0 | if(*startPtr == '\n') { |
80 | | /* This block of incoming data starts with the |
81 | | previous block's LF so get rid of it */ |
82 | 0 | memmove(startPtr, startPtr + 1, size-1); |
83 | 0 | size--; |
84 | | /* and it wasn't a bare CR but a CRLF conversion instead */ |
85 | 0 | data->state.crlf_conversions++; |
86 | 0 | } |
87 | 0 | data->state.prev_block_had_trailing_cr = FALSE; /* reset the flag */ |
88 | 0 | } |
89 | | |
90 | | /* find 1st CR, if any */ |
91 | 0 | inPtr = outPtr = memchr(startPtr, '\r', size); |
92 | 0 | if(inPtr) { |
93 | | /* at least one CR, now look for CRLF */ |
94 | 0 | while(inPtr < (startPtr + size-1)) { |
95 | | /* note that it's size-1, so we'll never look past the last byte */ |
96 | 0 | if(memcmp(inPtr, "\r\n", 2) == 0) { |
97 | | /* CRLF found, bump past the CR and copy the NL */ |
98 | 0 | inPtr++; |
99 | 0 | *outPtr = *inPtr; |
100 | | /* keep track of how many CRLFs we converted */ |
101 | 0 | data->state.crlf_conversions++; |
102 | 0 | } |
103 | 0 | else { |
104 | 0 | if(*inPtr == '\r') { |
105 | | /* lone CR, move LF instead */ |
106 | 0 | *outPtr = '\n'; |
107 | 0 | } |
108 | 0 | else { |
109 | | /* not a CRLF nor a CR, just copy whatever it is */ |
110 | 0 | *outPtr = *inPtr; |
111 | 0 | } |
112 | 0 | } |
113 | 0 | outPtr++; |
114 | 0 | inPtr++; |
115 | 0 | } /* end of while loop */ |
116 | |
|
117 | 0 | if(inPtr < startPtr + size) { |
118 | | /* handle last byte */ |
119 | 0 | if(*inPtr == '\r') { |
120 | | /* deal with a CR at the end of the buffer */ |
121 | 0 | *outPtr = '\n'; /* copy a NL instead */ |
122 | | /* note that a CRLF might be split across two blocks */ |
123 | 0 | data->state.prev_block_had_trailing_cr = TRUE; |
124 | 0 | } |
125 | 0 | else { |
126 | | /* copy last byte */ |
127 | 0 | *outPtr = *inPtr; |
128 | 0 | } |
129 | 0 | outPtr++; |
130 | 0 | } |
131 | 0 | if(outPtr < startPtr + size) |
132 | | /* tidy up by null terminating the now shorter data */ |
133 | 0 | *outPtr = '\0'; |
134 | |
|
135 | 0 | return (outPtr - startPtr); |
136 | 0 | } |
137 | 0 | return size; |
138 | 0 | } |
139 | | #endif /* CURL_DO_LINEEND_CONV && !CURL_DISABLE_FTP */ |
140 | | |
141 | | /* |
142 | | * Curl_write() is an internal write function that sends data to the |
143 | | * server. Works with plain sockets, SCP, SSL or kerberos. |
144 | | * |
145 | | * If the write would block (CURLE_AGAIN), we return CURLE_OK and |
146 | | * (*written == 0). Otherwise we return regular CURLcode value. |
147 | | */ |
148 | | CURLcode Curl_write(struct Curl_easy *data, |
149 | | curl_socket_t sockfd, |
150 | | const void *mem, |
151 | | size_t len, |
152 | | ssize_t *written) |
153 | 0 | { |
154 | 0 | ssize_t bytes_written; |
155 | 0 | CURLcode result = CURLE_OK; |
156 | 0 | struct connectdata *conn; |
157 | 0 | int num; |
158 | 0 | DEBUGASSERT(data); |
159 | 0 | DEBUGASSERT(data->conn); |
160 | 0 | conn = data->conn; |
161 | 0 | num = (sockfd != CURL_SOCKET_BAD && sockfd == conn->sock[SECONDARYSOCKET]); |
162 | |
|
163 | 0 | #ifdef CURLDEBUG |
164 | 0 | { |
165 | | /* Allow debug builds to override this logic to force short sends |
166 | | */ |
167 | 0 | char *p = getenv("CURL_SMALLSENDS"); |
168 | 0 | if(p) { |
169 | 0 | size_t altsize = (size_t)strtoul(p, NULL, 10); |
170 | 0 | if(altsize) |
171 | 0 | len = CURLMIN(len, altsize); |
172 | 0 | } |
173 | 0 | } |
174 | 0 | #endif |
175 | 0 | bytes_written = conn->send[num](data, num, mem, len, &result); |
176 | |
|
177 | 0 | *written = bytes_written; |
178 | 0 | if(bytes_written >= 0) |
179 | | /* we completely ignore the curlcode value when subzero is not returned */ |
180 | 0 | return CURLE_OK; |
181 | | |
182 | | /* handle CURLE_AGAIN or a send failure */ |
183 | 0 | switch(result) { |
184 | 0 | case CURLE_AGAIN: |
185 | 0 | *written = 0; |
186 | 0 | return CURLE_OK; |
187 | | |
188 | 0 | case CURLE_OK: |
189 | | /* general send failure */ |
190 | 0 | return CURLE_SEND_ERROR; |
191 | | |
192 | 0 | default: |
193 | | /* we got a specific curlcode, forward it */ |
194 | 0 | return result; |
195 | 0 | } |
196 | 0 | } |
197 | | |
198 | | static CURLcode pausewrite(struct Curl_easy *data, |
199 | | int type, /* what type of data */ |
200 | | const char *ptr, |
201 | | size_t len) |
202 | 0 | { |
203 | | /* signalled to pause sending on this connection, but since we have data |
204 | | we want to send we need to dup it to save a copy for when the sending |
205 | | is again enabled */ |
206 | 0 | struct SingleRequest *k = &data->req; |
207 | 0 | struct UrlState *s = &data->state; |
208 | 0 | unsigned int i; |
209 | 0 | bool newtype = TRUE; |
210 | |
|
211 | 0 | Curl_conn_ev_data_pause(data, TRUE); |
212 | |
|
213 | 0 | if(s->tempcount) { |
214 | 0 | for(i = 0; i< s->tempcount; i++) { |
215 | 0 | if(s->tempwrite[i].type == type) { |
216 | | /* data for this type exists */ |
217 | 0 | newtype = FALSE; |
218 | 0 | break; |
219 | 0 | } |
220 | 0 | } |
221 | 0 | DEBUGASSERT(i < 3); |
222 | 0 | if(i >= 3) |
223 | | /* There are more types to store than what fits: very bad */ |
224 | 0 | return CURLE_OUT_OF_MEMORY; |
225 | 0 | } |
226 | 0 | else |
227 | 0 | i = 0; |
228 | | |
229 | 0 | if(newtype) { |
230 | | /* store this information in the state struct for later use */ |
231 | 0 | Curl_dyn_init(&s->tempwrite[i].b, DYN_PAUSE_BUFFER); |
232 | 0 | s->tempwrite[i].type = type; |
233 | 0 | s->tempcount++; |
234 | 0 | } |
235 | |
|
236 | 0 | if(Curl_dyn_addn(&s->tempwrite[i].b, (unsigned char *)ptr, len)) |
237 | 0 | return CURLE_OUT_OF_MEMORY; |
238 | | |
239 | | /* mark the connection as RECV paused */ |
240 | 0 | k->keepon |= KEEP_RECV_PAUSE; |
241 | |
|
242 | 0 | return CURLE_OK; |
243 | 0 | } |
244 | | |
245 | | |
246 | | /* chop_write() writes chunks of data not larger than CURL_MAX_WRITE_SIZE via |
247 | | * client write callback(s) and takes care of pause requests from the |
248 | | * callbacks. |
249 | | */ |
250 | | static CURLcode chop_write(struct Curl_easy *data, |
251 | | int type, |
252 | | char *optr, |
253 | | size_t olen) |
254 | 0 | { |
255 | 0 | struct connectdata *conn = data->conn; |
256 | 0 | curl_write_callback writeheader = NULL; |
257 | 0 | curl_write_callback writebody = NULL; |
258 | 0 | char *ptr = optr; |
259 | 0 | size_t len = olen; |
260 | 0 | void *writebody_ptr = data->set.out; |
261 | |
|
262 | 0 | if(!len) |
263 | 0 | return CURLE_OK; |
264 | | |
265 | | /* If reading is paused, append this data to the already held data for this |
266 | | type. */ |
267 | 0 | if(data->req.keepon & KEEP_RECV_PAUSE) |
268 | 0 | return pausewrite(data, type, ptr, len); |
269 | | |
270 | | /* Determine the callback(s) to use. */ |
271 | 0 | if(type & CLIENTWRITE_BODY) { |
272 | 0 | #ifdef USE_WEBSOCKETS |
273 | 0 | if(conn->handler->protocol & (CURLPROTO_WS|CURLPROTO_WSS)) { |
274 | 0 | writebody = Curl_ws_writecb; |
275 | 0 | writebody_ptr = data; |
276 | 0 | } |
277 | 0 | else |
278 | 0 | #endif |
279 | 0 | writebody = data->set.fwrite_func; |
280 | 0 | } |
281 | 0 | if((type & CLIENTWRITE_HEADER) && |
282 | 0 | (data->set.fwrite_header || data->set.writeheader)) { |
283 | | /* |
284 | | * Write headers to the same callback or to the especially setup |
285 | | * header callback function (added after version 7.7.1). |
286 | | */ |
287 | 0 | writeheader = |
288 | 0 | data->set.fwrite_header? data->set.fwrite_header: data->set.fwrite_func; |
289 | 0 | } |
290 | | |
291 | | /* Chop data, write chunks. */ |
292 | 0 | while(len) { |
293 | 0 | size_t chunklen = len <= CURL_MAX_WRITE_SIZE? len: CURL_MAX_WRITE_SIZE; |
294 | |
|
295 | 0 | if(writebody) { |
296 | 0 | size_t wrote; |
297 | 0 | Curl_set_in_callback(data, true); |
298 | 0 | wrote = writebody(ptr, 1, chunklen, writebody_ptr); |
299 | 0 | Curl_set_in_callback(data, false); |
300 | |
|
301 | 0 | if(CURL_WRITEFUNC_PAUSE == wrote) { |
302 | 0 | if(conn->handler->flags & PROTOPT_NONETWORK) { |
303 | | /* Protocols that work without network cannot be paused. This is |
304 | | actually only FILE:// just now, and it can't pause since the |
305 | | transfer isn't done using the "normal" procedure. */ |
306 | 0 | failf(data, "Write callback asked for PAUSE when not supported"); |
307 | 0 | return CURLE_WRITE_ERROR; |
308 | 0 | } |
309 | 0 | return pausewrite(data, type, ptr, len); |
310 | 0 | } |
311 | 0 | if(wrote != chunklen) { |
312 | 0 | failf(data, "Failure writing output to destination"); |
313 | 0 | return CURLE_WRITE_ERROR; |
314 | 0 | } |
315 | 0 | } |
316 | | |
317 | 0 | ptr += chunklen; |
318 | 0 | len -= chunklen; |
319 | 0 | } |
320 | | |
321 | 0 | #ifndef CURL_DISABLE_HTTP |
322 | | /* HTTP header, but not status-line */ |
323 | 0 | if((conn->handler->protocol & PROTO_FAMILY_HTTP) && |
324 | 0 | (type & CLIENTWRITE_HEADER) && !(type & CLIENTWRITE_STATUS) ) { |
325 | 0 | unsigned char htype = (unsigned char) |
326 | 0 | (type & CLIENTWRITE_CONNECT ? CURLH_CONNECT : |
327 | 0 | (type & CLIENTWRITE_1XX ? CURLH_1XX : |
328 | 0 | (type & CLIENTWRITE_TRAILER ? CURLH_TRAILER : |
329 | 0 | CURLH_HEADER))); |
330 | 0 | CURLcode result = Curl_headers_push(data, optr, htype); |
331 | 0 | if(result) |
332 | 0 | return result; |
333 | 0 | } |
334 | 0 | #endif |
335 | | |
336 | 0 | if(writeheader) { |
337 | 0 | size_t wrote; |
338 | |
|
339 | 0 | Curl_set_in_callback(data, true); |
340 | 0 | wrote = writeheader(optr, 1, olen, data->set.writeheader); |
341 | 0 | Curl_set_in_callback(data, false); |
342 | |
|
343 | 0 | if(CURL_WRITEFUNC_PAUSE == wrote) |
344 | | /* here we pass in the HEADER bit only since if this was body as well |
345 | | then it was passed already and clearly that didn't trigger the |
346 | | pause, so this is saved for later with the HEADER bit only */ |
347 | 0 | return pausewrite(data, CLIENTWRITE_HEADER | |
348 | 0 | (type & (CLIENTWRITE_STATUS|CLIENTWRITE_CONNECT| |
349 | 0 | CLIENTWRITE_1XX|CLIENTWRITE_TRAILER)), |
350 | 0 | optr, olen); |
351 | 0 | if(wrote != olen) { |
352 | 0 | failf(data, "Failed writing header"); |
353 | 0 | return CURLE_WRITE_ERROR; |
354 | 0 | } |
355 | 0 | } |
356 | | |
357 | 0 | return CURLE_OK; |
358 | 0 | } |
359 | | |
360 | | |
361 | | /* Curl_client_write() sends data to the write callback(s) |
362 | | |
363 | | The bit pattern defines to what "streams" to write to. Body and/or header. |
364 | | The defines are in sendf.h of course. |
365 | | |
366 | | If CURL_DO_LINEEND_CONV is enabled, data is converted IN PLACE to the |
367 | | local character encoding. This is a problem and should be changed in |
368 | | the future to leave the original data alone. |
369 | | */ |
370 | | CURLcode Curl_client_write(struct Curl_easy *data, |
371 | | int type, |
372 | | char *ptr, |
373 | | size_t len) |
374 | 0 | { |
375 | 0 | #if !defined(CURL_DISABLE_FTP) && defined(CURL_DO_LINEEND_CONV) |
376 | | /* FTP data may need conversion. */ |
377 | 0 | if((type & CLIENTWRITE_BODY) && |
378 | 0 | (data->conn->handler->protocol & PROTO_FAMILY_FTP) && |
379 | 0 | data->conn->proto.ftpc.transfertype == 'A') { |
380 | | /* convert end-of-line markers */ |
381 | 0 | len = convert_lineends(data, ptr, len); |
382 | 0 | } |
383 | 0 | #endif |
384 | 0 | return chop_write(data, type, ptr, len); |
385 | 0 | } |
386 | | |
387 | | /* |
388 | | * Internal read-from-socket function. This is meant to deal with plain |
389 | | * sockets, SSL sockets and kerberos sockets. |
390 | | * |
391 | | * Returns a regular CURLcode value. |
392 | | */ |
393 | | CURLcode Curl_read(struct Curl_easy *data, /* transfer */ |
394 | | curl_socket_t sockfd, /* read from this socket */ |
395 | | char *buf, /* store read data here */ |
396 | | size_t sizerequested, /* max amount to read */ |
397 | | ssize_t *n) /* amount bytes read */ |
398 | 0 | { |
399 | 0 | CURLcode result = CURLE_RECV_ERROR; |
400 | 0 | ssize_t nread = 0; |
401 | 0 | size_t bytesfromsocket = 0; |
402 | 0 | char *buffertofill = NULL; |
403 | 0 | struct connectdata *conn = data->conn; |
404 | | |
405 | | /* Set 'num' to 0 or 1, depending on which socket that has been sent here. |
406 | | If it is the second socket, we set num to 1. Otherwise to 0. This lets |
407 | | us use the correct ssl handle. */ |
408 | 0 | int num = (sockfd == conn->sock[SECONDARYSOCKET]); |
409 | |
|
410 | 0 | *n = 0; /* reset amount to zero */ |
411 | |
|
412 | 0 | bytesfromsocket = CURLMIN(sizerequested, (size_t)data->set.buffer_size); |
413 | 0 | buffertofill = buf; |
414 | |
|
415 | 0 | nread = conn->recv[num](data, num, buffertofill, bytesfromsocket, &result); |
416 | 0 | if(nread < 0) |
417 | 0 | goto out; |
418 | | |
419 | 0 | *n += nread; |
420 | 0 | result = CURLE_OK; |
421 | 0 | out: |
422 | | /* DEBUGF(infof(data, "Curl_read(handle=%p) -> %d, nread=%ld", |
423 | | data, result, nread)); */ |
424 | 0 | return result; |
425 | 0 | } |
426 | | |