/src/PROJ/curl/lib/multi.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*************************************************************************** |
2 | | * _ _ ____ _ |
3 | | * Project ___| | | | _ \| | |
4 | | * / __| | | | |_) | | |
5 | | * | (__| |_| | _ <| |___ |
6 | | * \___|\___/|_| \_\_____| |
7 | | * |
8 | | * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. |
9 | | * |
10 | | * This software is licensed as described in the file COPYING, which |
11 | | * you should have received as part of this distribution. The terms |
12 | | * are also available at https://curl.se/docs/copyright.html. |
13 | | * |
14 | | * You may opt to use, copy, modify, merge, publish, distribute and/or sell |
15 | | * copies of the Software, and permit persons to whom the Software is |
16 | | * furnished to do so, under the terms of the COPYING file. |
17 | | * |
18 | | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
19 | | * KIND, either express or implied. |
20 | | * |
21 | | * SPDX-License-Identifier: curl |
22 | | * |
23 | | ***************************************************************************/ |
24 | | |
25 | | #include "curl_setup.h" |
26 | | |
27 | | #include <curl/curl.h> |
28 | | |
29 | | #include "urldata.h" |
30 | | #include "transfer.h" |
31 | | #include "url.h" |
32 | | #include "cfilters.h" |
33 | | #include "connect.h" |
34 | | #include "progress.h" |
35 | | #include "easyif.h" |
36 | | #include "share.h" |
37 | | #include "psl.h" |
38 | | #include "multiif.h" |
39 | | #include "multi_ev.h" |
40 | | #include "sendf.h" |
41 | | #include "curlx/timeval.h" |
42 | | #include "http.h" |
43 | | #include "select.h" |
44 | | #include "curlx/warnless.h" |
45 | | #include "curlx/wait.h" |
46 | | #include "speedcheck.h" |
47 | | #include "conncache.h" |
48 | | #include "multihandle.h" |
49 | | #include "sigpipe.h" |
50 | | #include "vtls/vtls.h" |
51 | | #include "vtls/vtls_scache.h" |
52 | | #include "http_proxy.h" |
53 | | #include "http2.h" |
54 | | #include "socketpair.h" |
55 | | #include "socks.h" |
56 | | #include "urlapi-int.h" |
57 | | /* The last 3 #include files should be in this order */ |
58 | | #include "curl_printf.h" |
59 | | #include "curl_memory.h" |
60 | | #include "memdebug.h" |
61 | | |
62 | | /* initial multi->xfers table size for a full multi */ |
63 | 0 | #define CURL_XFER_TABLE_SIZE 512 |
64 | | |
65 | | /* |
66 | | CURL_SOCKET_HASH_TABLE_SIZE should be a prime number. Increasing it from 97 |
67 | | to 911 takes on a 32-bit machine 4 x 804 = 3211 more bytes. Still, every |
68 | | curl handle takes 6K memory, therefore this 3K are not significant. |
69 | | */ |
70 | | #ifndef CURL_SOCKET_HASH_TABLE_SIZE |
71 | 0 | #define CURL_SOCKET_HASH_TABLE_SIZE 911 |
72 | | #endif |
73 | | |
74 | | #ifndef CURL_CONNECTION_HASH_SIZE |
75 | 0 | #define CURL_CONNECTION_HASH_SIZE 97 |
76 | | #endif |
77 | | |
78 | | #ifndef CURL_DNS_HASH_SIZE |
79 | 0 | #define CURL_DNS_HASH_SIZE 71 |
80 | | #endif |
81 | | |
82 | | #ifndef CURL_TLS_SESSION_SIZE |
83 | 0 | #define CURL_TLS_SESSION_SIZE 25 |
84 | | #endif |
85 | | |
86 | 0 | #define CURL_MULTI_HANDLE 0x000bab1e |
87 | | |
88 | | #ifdef DEBUGBUILD |
89 | | /* On a debug build, we want to fail hard on multi handles that |
90 | | * are not NULL, but no longer have the MAGIC touch. This gives |
91 | | * us early warning on things only discovered by valgrind otherwise. */ |
92 | | #define GOOD_MULTI_HANDLE(x) \ |
93 | | (((x) && (x)->magic == CURL_MULTI_HANDLE)? TRUE: \ |
94 | | (DEBUGASSERT(!(x)), FALSE)) |
95 | | #else |
96 | | #define GOOD_MULTI_HANDLE(x) \ |
97 | 0 | ((x) && (x)->magic == CURL_MULTI_HANDLE) |
98 | | #endif |
99 | | |
100 | | static void move_pending_to_connect(struct Curl_multi *multi, |
101 | | struct Curl_easy *data); |
102 | | static CURLMcode add_next_timeout(struct curltime now, |
103 | | struct Curl_multi *multi, |
104 | | struct Curl_easy *d); |
105 | | static CURLMcode multi_timeout(struct Curl_multi *multi, |
106 | | struct curltime *expire_time, |
107 | | long *timeout_ms); |
108 | | static void process_pending_handles(struct Curl_multi *multi); |
109 | | static void multi_xfer_bufs_free(struct Curl_multi *multi); |
110 | | #ifdef DEBUGBUILD |
111 | | static void multi_xfer_tbl_dump(struct Curl_multi *multi); |
112 | | #endif |
113 | | |
114 | | /* function pointer called once when switching TO a state */ |
115 | | typedef void (*init_multistate_func)(struct Curl_easy *data); |
116 | | |
117 | | /* called in DID state, before PERFORMING state */ |
118 | | static void before_perform(struct Curl_easy *data) |
119 | 0 | { |
120 | 0 | data->req.chunk = FALSE; |
121 | 0 | Curl_pgrsTime(data, TIMER_PRETRANSFER); |
122 | 0 | } |
123 | | |
124 | | static void init_completed(struct Curl_easy *data) |
125 | 0 | { |
126 | | /* this is a completed transfer */ |
127 | | |
128 | | /* Important: reset the conn pointer so that we do not point to memory |
129 | | that could be freed anytime */ |
130 | 0 | Curl_detach_connection(data); |
131 | 0 | Curl_expire_clear(data); /* stop all timers */ |
132 | 0 | } |
133 | | |
134 | | /* always use this function to change state, to make debugging easier */ |
135 | | static void mstate(struct Curl_easy *data, CURLMstate state |
136 | | #ifdef DEBUGBUILD |
137 | | , int lineno |
138 | | #endif |
139 | | ) |
140 | 0 | { |
141 | 0 | CURLMstate oldstate = data->mstate; |
142 | 0 | static const init_multistate_func finit[MSTATE_LAST] = { |
143 | 0 | NULL, /* INIT */ |
144 | 0 | NULL, /* PENDING */ |
145 | 0 | NULL, /* SETUP */ |
146 | 0 | Curl_init_CONNECT, /* CONNECT */ |
147 | 0 | NULL, /* RESOLVING */ |
148 | 0 | NULL, /* CONNECTING */ |
149 | 0 | NULL, /* TUNNELING */ |
150 | 0 | NULL, /* PROTOCONNECT */ |
151 | 0 | NULL, /* PROTOCONNECTING */ |
152 | 0 | NULL, /* DO */ |
153 | 0 | NULL, /* DOING */ |
154 | 0 | NULL, /* DOING_MORE */ |
155 | 0 | before_perform, /* DID */ |
156 | 0 | NULL, /* PERFORMING */ |
157 | 0 | NULL, /* RATELIMITING */ |
158 | 0 | NULL, /* DONE */ |
159 | 0 | init_completed, /* COMPLETED */ |
160 | | NULL /* MSGSENT */ |
161 | 0 | }; |
162 | |
|
163 | 0 | if(oldstate == state) |
164 | | /* do not bother when the new state is the same as the old state */ |
165 | 0 | return; |
166 | | |
167 | | #ifdef DEBUGBUILD |
168 | | CURL_TRC_M(data, "-> [%s] (line %d)", CURL_MSTATE_NAME(state), lineno); |
169 | | #else |
170 | 0 | CURL_TRC_M(data, "-> [%s]", CURL_MSTATE_NAME(state)); |
171 | 0 | #endif |
172 | |
|
173 | 0 | data->mstate = state; |
174 | |
|
175 | 0 | if(state == MSTATE_COMPLETED) { |
176 | | /* changing to COMPLETED means it is in process and needs to go */ |
177 | 0 | DEBUGASSERT(Curl_uint_bset_contains(&data->multi->process, data->mid)); |
178 | 0 | Curl_uint_bset_remove(&data->multi->process, data->mid); |
179 | 0 | Curl_uint_bset_remove(&data->multi->pending, data->mid); /* to be sure */ |
180 | |
|
181 | 0 | if(Curl_uint_bset_empty(&data->multi->process)) { |
182 | | /* free the transfer buffer when we have no more active transfers */ |
183 | 0 | multi_xfer_bufs_free(data->multi); |
184 | 0 | } |
185 | 0 | } |
186 | | |
187 | | /* if this state has an init-function, run it */ |
188 | 0 | if(finit[state]) |
189 | 0 | finit[state](data); |
190 | 0 | } |
191 | | |
192 | | #ifndef DEBUGBUILD |
193 | 0 | #define multistate(x,y) mstate(x,y) |
194 | | #else |
195 | | #define multistate(x,y) mstate(x,y, __LINE__) |
196 | | #endif |
197 | | |
198 | | |
199 | | /* multi->proto_hash destructor. Should never be called as elements |
200 | | * MUST be added with their own destructor */ |
201 | | static void ph_freeentry(void *p) |
202 | 0 | { |
203 | 0 | (void)p; |
204 | | /* Will always be FALSE. Cannot use a 0 assert here since compilers |
205 | | * are not in agreement if they then want a NORETURN attribute or |
206 | | * not. *sigh* */ |
207 | 0 | DEBUGASSERT(p == NULL); |
208 | 0 | } |
209 | | |
210 | | /* |
211 | | * multi_addmsg() |
212 | | * |
213 | | * Called when a transfer is completed. Adds the given msg pointer to |
214 | | * the list kept in the multi handle. |
215 | | */ |
216 | | static void multi_addmsg(struct Curl_multi *multi, struct Curl_message *msg) |
217 | 0 | { |
218 | 0 | Curl_llist_append(&multi->msglist, msg, &msg->list); |
219 | 0 | } |
220 | | |
221 | | struct Curl_multi *Curl_multi_handle(unsigned int xfer_table_size, |
222 | | size_t ev_hashsize, /* event hash */ |
223 | | size_t chashsize, /* connection hash */ |
224 | | size_t dnssize, /* dns hash */ |
225 | | size_t sesssize) /* TLS session cache */ |
226 | 0 | { |
227 | 0 | struct Curl_multi *multi = calloc(1, sizeof(struct Curl_multi)); |
228 | |
|
229 | 0 | if(!multi) |
230 | 0 | return NULL; |
231 | | |
232 | 0 | multi->magic = CURL_MULTI_HANDLE; |
233 | |
|
234 | 0 | Curl_dnscache_init(&multi->dnscache, dnssize); |
235 | 0 | Curl_multi_ev_init(multi, ev_hashsize); |
236 | 0 | Curl_uint_tbl_init(&multi->xfers, NULL); |
237 | 0 | Curl_uint_bset_init(&multi->process); |
238 | 0 | Curl_uint_bset_init(&multi->dirty); |
239 | 0 | Curl_uint_bset_init(&multi->pending); |
240 | 0 | Curl_uint_bset_init(&multi->msgsent); |
241 | 0 | Curl_hash_init(&multi->proto_hash, 23, |
242 | 0 | Curl_hash_str, curlx_str_key_compare, ph_freeentry); |
243 | 0 | Curl_llist_init(&multi->msglist, NULL); |
244 | |
|
245 | 0 | multi->multiplexing = TRUE; |
246 | 0 | multi->max_concurrent_streams = 100; |
247 | 0 | multi->last_timeout_ms = -1; |
248 | |
|
249 | 0 | if(Curl_uint_bset_resize(&multi->process, xfer_table_size) || |
250 | 0 | Curl_uint_bset_resize(&multi->pending, xfer_table_size) || |
251 | 0 | Curl_uint_bset_resize(&multi->dirty, xfer_table_size) || |
252 | 0 | Curl_uint_bset_resize(&multi->msgsent, xfer_table_size) || |
253 | 0 | Curl_uint_tbl_resize(&multi->xfers, xfer_table_size)) |
254 | 0 | goto error; |
255 | | |
256 | 0 | multi->admin = curl_easy_init(); |
257 | 0 | if(!multi->admin) |
258 | 0 | goto error; |
259 | | /* Initialize admin handle to operate inside this multi */ |
260 | 0 | multi->admin->multi = multi; |
261 | 0 | multi->admin->state.internal = TRUE; |
262 | 0 | Curl_llist_init(&multi->admin->state.timeoutlist, NULL); |
263 | | #ifdef DEBUGBUILD |
264 | | if(getenv("CURL_DEBUG")) |
265 | | multi->admin->set.verbose = TRUE; |
266 | | #endif |
267 | 0 | Curl_uint_tbl_add(&multi->xfers, multi->admin, &multi->admin->mid); |
268 | |
|
269 | 0 | if(Curl_cshutdn_init(&multi->cshutdn, multi)) |
270 | 0 | goto error; |
271 | | |
272 | 0 | Curl_cpool_init(&multi->cpool, multi->admin, NULL, chashsize); |
273 | |
|
274 | 0 | #ifdef USE_SSL |
275 | 0 | if(Curl_ssl_scache_create(sesssize, 2, &multi->ssl_scache)) |
276 | 0 | goto error; |
277 | | #else |
278 | | (void)sesssize; |
279 | | #endif |
280 | | |
281 | | #ifdef USE_WINSOCK |
282 | | multi->wsa_event = WSACreateEvent(); |
283 | | if(multi->wsa_event == WSA_INVALID_EVENT) |
284 | | goto error; |
285 | | #elif defined(ENABLE_WAKEUP) |
286 | 0 | if(wakeup_create(multi->wakeup_pair, TRUE) < 0) { |
287 | 0 | multi->wakeup_pair[0] = CURL_SOCKET_BAD; |
288 | 0 | multi->wakeup_pair[1] = CURL_SOCKET_BAD; |
289 | 0 | } |
290 | 0 | #endif |
291 | |
|
292 | 0 | return multi; |
293 | | |
294 | 0 | error: |
295 | |
|
296 | 0 | Curl_multi_ev_cleanup(multi); |
297 | 0 | Curl_hash_destroy(&multi->proto_hash); |
298 | 0 | Curl_dnscache_destroy(&multi->dnscache); |
299 | 0 | Curl_cpool_destroy(&multi->cpool); |
300 | 0 | Curl_cshutdn_destroy(&multi->cshutdn, multi->admin); |
301 | 0 | #ifdef USE_SSL |
302 | 0 | Curl_ssl_scache_destroy(multi->ssl_scache); |
303 | 0 | #endif |
304 | 0 | if(multi->admin) { |
305 | 0 | multi->admin->multi = NULL; |
306 | 0 | Curl_close(&multi->admin); |
307 | 0 | } |
308 | |
|
309 | 0 | Curl_uint_bset_destroy(&multi->process); |
310 | 0 | Curl_uint_bset_destroy(&multi->dirty); |
311 | 0 | Curl_uint_bset_destroy(&multi->pending); |
312 | 0 | Curl_uint_bset_destroy(&multi->msgsent); |
313 | 0 | Curl_uint_tbl_destroy(&multi->xfers); |
314 | |
|
315 | 0 | free(multi); |
316 | 0 | return NULL; |
317 | 0 | } |
318 | | |
319 | | CURLM *curl_multi_init(void) |
320 | 0 | { |
321 | 0 | return Curl_multi_handle(CURL_XFER_TABLE_SIZE, |
322 | 0 | CURL_SOCKET_HASH_TABLE_SIZE, |
323 | 0 | CURL_CONNECTION_HASH_SIZE, |
324 | 0 | CURL_DNS_HASH_SIZE, |
325 | 0 | CURL_TLS_SESSION_SIZE); |
326 | 0 | } |
327 | | |
328 | | #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) |
329 | | static void multi_warn_debug(struct Curl_multi *multi, struct Curl_easy *data) |
330 | | { |
331 | | if(!multi->warned) { |
332 | | infof(data, "!!! WARNING !!!"); |
333 | | infof(data, "This is a debug build of libcurl, " |
334 | | "do not use in production."); |
335 | | multi->warned = TRUE; |
336 | | } |
337 | | } |
338 | | #else |
339 | 0 | #define multi_warn_debug(x,y) Curl_nop_stmt |
340 | | #endif |
341 | | |
342 | | |
343 | | static CURLMcode multi_xfers_add(struct Curl_multi *multi, |
344 | | struct Curl_easy *data) |
345 | 0 | { |
346 | 0 | unsigned int capacity = Curl_uint_tbl_capacity(&multi->xfers); |
347 | 0 | unsigned int new_size = 0; |
348 | | /* Prepare to make this into a CURLMOPT_MAX_TRANSFERS, because some |
349 | | * applications may want to prevent a run-away of their memory use. */ |
350 | | /* UINT_MAX is our "invalid" id, do not let the table grow up to that. */ |
351 | 0 | const unsigned int max_capacity = UINT_MAX - 1; |
352 | |
|
353 | 0 | if(capacity < max_capacity) { |
354 | | /* We want `multi->xfers` to have "sufficient" free rows, so that we do |
355 | | * have to reuse the `mid` from a just removed easy right away. |
356 | | * Since uint_tbl and uint_bset are quite memory efficient, |
357 | | * regard less than 25% free as insufficient. |
358 | | * (for low capacities, e.g. multi_easy, 4 or less). */ |
359 | 0 | unsigned int used = Curl_uint_tbl_count(&multi->xfers); |
360 | 0 | unsigned int unused = capacity - used; |
361 | 0 | unsigned int min_unused = CURLMAX(capacity >> 2, 4); |
362 | 0 | if(unused <= min_unused) { |
363 | | /* Make sure the uint arithmetic here works on the corner |
364 | | * cases where we are close to max_capacity or UINT_MAX */ |
365 | 0 | if((min_unused >= max_capacity) || |
366 | 0 | ((max_capacity - min_unused) <= capacity) || |
367 | 0 | ((UINT_MAX - min_unused - 63) <= capacity)) { |
368 | 0 | new_size = max_capacity; /* can not be larger than this */ |
369 | 0 | } |
370 | 0 | else { |
371 | | /* make it a 64 multiple, since our bitsets frow by that and |
372 | | * small (easy_multi) grows to at least 64 on first resize. */ |
373 | 0 | new_size = (((used + min_unused) + 63) / 64) * 64; |
374 | 0 | } |
375 | 0 | } |
376 | 0 | } |
377 | |
|
378 | 0 | if(new_size > capacity) { |
379 | | /* Grow the bitsets first. Should one fail, we do not need |
380 | | * to downsize the already resized ones. The sets continue |
381 | | * to work properly when larger than the table, but not |
382 | | * the other way around. */ |
383 | 0 | CURL_TRC_M(data, "increasing xfer table size to %u", new_size); |
384 | 0 | if(Curl_uint_bset_resize(&multi->process, new_size) || |
385 | 0 | Curl_uint_bset_resize(&multi->dirty, new_size) || |
386 | 0 | Curl_uint_bset_resize(&multi->pending, new_size) || |
387 | 0 | Curl_uint_bset_resize(&multi->msgsent, new_size) || |
388 | 0 | Curl_uint_tbl_resize(&multi->xfers, new_size)) |
389 | 0 | return CURLM_OUT_OF_MEMORY; |
390 | 0 | } |
391 | | |
392 | | /* Insert the easy into the table now */ |
393 | 0 | if(!Curl_uint_tbl_add(&multi->xfers, data, &data->mid)) { |
394 | | /* MUST only happen when table is full */ |
395 | 0 | DEBUGASSERT(Curl_uint_tbl_capacity(&multi->xfers) <= |
396 | 0 | Curl_uint_tbl_count(&multi->xfers)); |
397 | 0 | return CURLM_OUT_OF_MEMORY; |
398 | 0 | } |
399 | 0 | return CURLM_OK; |
400 | 0 | } |
401 | | |
402 | | |
403 | | CURLMcode curl_multi_add_handle(CURLM *m, CURL *d) |
404 | 0 | { |
405 | 0 | CURLMcode rc; |
406 | 0 | struct Curl_multi *multi = m; |
407 | 0 | struct Curl_easy *data = d; |
408 | | /* First, make some basic checks that the CURLM handle is a good handle */ |
409 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
410 | 0 | return CURLM_BAD_HANDLE; |
411 | | |
412 | | /* Verify that we got a somewhat good easy handle too */ |
413 | 0 | if(!GOOD_EASY_HANDLE(data)) |
414 | 0 | return CURLM_BAD_EASY_HANDLE; |
415 | | |
416 | | /* Prevent users from adding same easy handle more than once and prevent |
417 | | adding to more than one multi stack */ |
418 | 0 | if(data->multi) |
419 | 0 | return CURLM_ADDED_ALREADY; |
420 | | |
421 | 0 | if(multi->in_callback) |
422 | 0 | return CURLM_RECURSIVE_API_CALL; |
423 | | |
424 | 0 | if(multi->dead) { |
425 | | /* a "dead" handle cannot get added transfers while any existing easy |
426 | | handles are still alive - but if there are none alive anymore, it is |
427 | | fine to start over and unmark the "deadness" of this handle. |
428 | | This means only the admin handle MUST be present. */ |
429 | 0 | if((Curl_uint_tbl_count(&multi->xfers) != 1) || |
430 | 0 | !Curl_uint_tbl_contains(&multi->xfers, 0)) |
431 | 0 | return CURLM_ABORTED_BY_CALLBACK; |
432 | 0 | multi->dead = FALSE; |
433 | 0 | Curl_uint_bset_clear(&multi->process); |
434 | 0 | Curl_uint_bset_clear(&multi->dirty); |
435 | 0 | Curl_uint_bset_clear(&multi->pending); |
436 | 0 | Curl_uint_bset_clear(&multi->msgsent); |
437 | 0 | } |
438 | | |
439 | 0 | if(data->multi_easy) { |
440 | | /* if this easy handle was previously used for curl_easy_perform(), there |
441 | | is a private multi handle here that we can kill */ |
442 | 0 | curl_multi_cleanup(data->multi_easy); |
443 | 0 | data->multi_easy = NULL; |
444 | 0 | } |
445 | | |
446 | | /* Insert the easy into the multi->xfers table, assigning it a `mid`. */ |
447 | 0 | if(multi_xfers_add(multi, data)) |
448 | 0 | return CURLM_OUT_OF_MEMORY; |
449 | | |
450 | | /* Initialize timeout list for this handle */ |
451 | 0 | Curl_llist_init(&data->state.timeoutlist, NULL); |
452 | | |
453 | | /* |
454 | | * No failure allowed in this function beyond this point. No modification of |
455 | | * easy nor multi handle allowed before this except for potential multi's |
456 | | * connection pool growing which will not be undone in this function no |
457 | | * matter what. |
458 | | */ |
459 | 0 | if(data->set.errorbuffer) |
460 | 0 | data->set.errorbuffer[0] = 0; |
461 | |
|
462 | 0 | data->state.os_errno = 0; |
463 | | |
464 | | /* make the Curl_easy refer back to this multi handle - before Curl_expire() |
465 | | is called. */ |
466 | 0 | data->multi = multi; |
467 | | |
468 | | /* set the easy handle */ |
469 | 0 | multistate(data, MSTATE_INIT); |
470 | |
|
471 | | #ifdef USE_LIBPSL |
472 | | /* Do the same for PSL. */ |
473 | | if(data->share && (data->share->specifier & (1 << CURL_LOCK_DATA_PSL))) |
474 | | data->psl = &data->share->psl; |
475 | | else |
476 | | data->psl = &multi->psl; |
477 | | #endif |
478 | | |
479 | | /* add the easy handle to the process set */ |
480 | 0 | Curl_uint_bset_add(&multi->process, data->mid); |
481 | 0 | ++multi->xfers_alive; |
482 | |
|
483 | 0 | Curl_cpool_xfer_init(data); |
484 | 0 | multi_warn_debug(multi, data); |
485 | | |
486 | | /* Make sure the new handle will run */ |
487 | 0 | Curl_multi_mark_dirty(data); |
488 | | /* Necessary in event based processing, where dirty handles trigger |
489 | | * a timeout callback invocation. */ |
490 | 0 | rc = Curl_update_timer(multi); |
491 | 0 | if(rc) { |
492 | 0 | data->multi = NULL; /* not anymore */ |
493 | 0 | Curl_uint_tbl_remove(&multi->xfers, data->mid); |
494 | 0 | data->mid = UINT_MAX; |
495 | 0 | return rc; |
496 | 0 | } |
497 | | |
498 | | /* The admin handle only ever has default timeouts set. To improve the |
499 | | state somewhat we clone the timeouts from each added handle so that the |
500 | | admin handle always has the same timeouts as the most recently added |
501 | | easy handle. */ |
502 | 0 | multi->admin->set.timeout = data->set.timeout; |
503 | 0 | multi->admin->set.server_response_timeout = |
504 | 0 | data->set.server_response_timeout; |
505 | 0 | multi->admin->set.no_signal = data->set.no_signal; |
506 | |
|
507 | 0 | CURL_TRC_M(data, "added to multi, mid=%u, running=%u, total=%u", |
508 | 0 | data->mid, Curl_multi_xfers_running(multi), |
509 | 0 | Curl_uint_tbl_count(&multi->xfers)); |
510 | 0 | return CURLM_OK; |
511 | 0 | } |
512 | | |
513 | | #if 0 |
514 | | /* Debug-function, used like this: |
515 | | * |
516 | | * Curl_hash_print(&multi->sockhash, debug_print_sock_hash); |
517 | | * |
518 | | * Enable the hash print function first by editing hash.c |
519 | | */ |
520 | | static void debug_print_sock_hash(void *p) |
521 | | { |
522 | | struct Curl_sh_entry *sh = (struct Curl_sh_entry *)p; |
523 | | |
524 | | fprintf(stderr, " [readers %u][writers %u]", |
525 | | sh->readers, sh->writers); |
526 | | } |
527 | | #endif |
528 | | |
529 | | struct multi_done_ctx { |
530 | | BIT(premature); |
531 | | }; |
532 | | |
533 | | static void multi_done_locked(struct connectdata *conn, |
534 | | struct Curl_easy *data, |
535 | | void *userdata) |
536 | 0 | { |
537 | 0 | struct multi_done_ctx *mdctx = userdata; |
538 | 0 | #ifndef CURL_DISABLE_VERBOSE_STRINGS |
539 | 0 | const char *host = |
540 | 0 | #ifndef CURL_DISABLE_PROXY |
541 | 0 | conn->bits.socksproxy ? |
542 | 0 | conn->socks_proxy.host.dispname : |
543 | 0 | conn->bits.httpproxy ? conn->http_proxy.host.dispname : |
544 | 0 | #endif |
545 | 0 | conn->bits.conn_to_host ? conn->conn_to_host.dispname : |
546 | 0 | conn->host.dispname; |
547 | 0 | int port = |
548 | 0 | #ifndef CURL_DISABLE_PROXY |
549 | 0 | conn->bits.httpproxy ? conn->http_proxy.port : |
550 | 0 | #endif |
551 | 0 | conn->bits.conn_to_port ? conn->conn_to_port : |
552 | 0 | conn->remote_port; |
553 | 0 | #endif |
554 | |
|
555 | 0 | Curl_detach_connection(data); |
556 | |
|
557 | 0 | CURL_TRC_M(data, "multi_done_locked, in use=%u", |
558 | 0 | Curl_uint_spbset_count(&conn->xfers_attached)); |
559 | 0 | if(CONN_INUSE(conn)) { |
560 | | /* Stop if still used. */ |
561 | 0 | CURL_TRC_M(data, "Connection still in use %u, no more multi_done now!", |
562 | 0 | Curl_uint_spbset_count(&conn->xfers_attached)); |
563 | 0 | return; |
564 | 0 | } |
565 | | |
566 | 0 | data->state.done = TRUE; /* called just now! */ |
567 | 0 | data->state.recent_conn_id = conn->connection_id; |
568 | |
|
569 | 0 | Curl_resolv_unlink(data, &data->state.dns[0]); /* done with this */ |
570 | 0 | Curl_resolv_unlink(data, &data->state.dns[1]); |
571 | 0 | Curl_dnscache_prune(data); |
572 | | |
573 | | /* if data->set.reuse_forbid is TRUE, it means the libcurl client has |
574 | | forced us to close this connection. This is ignored for requests taking |
575 | | place in a NTLM/NEGOTIATE authentication handshake |
576 | | |
577 | | if conn->bits.close is TRUE, it means that the connection should be |
578 | | closed in spite of all our efforts to be nice, due to protocol |
579 | | restrictions in our or the server's end |
580 | | |
581 | | if premature is TRUE, it means this connection was said to be DONE before |
582 | | the entire request operation is complete and thus we cannot know in what |
583 | | state it is for reusing, so we are forced to close it. In a perfect world |
584 | | we can add code that keep track of if we really must close it here or not, |
585 | | but currently we have no such detail knowledge. |
586 | | */ |
587 | |
|
588 | 0 | if((data->set.reuse_forbid |
589 | 0 | #ifdef USE_NTLM |
590 | 0 | && !(conn->http_ntlm_state == NTLMSTATE_TYPE2 || |
591 | 0 | conn->proxy_ntlm_state == NTLMSTATE_TYPE2) |
592 | 0 | #endif |
593 | | #ifdef USE_SPNEGO |
594 | | && !(conn->http_negotiate_state == GSS_AUTHRECV || |
595 | | conn->proxy_negotiate_state == GSS_AUTHRECV) |
596 | | #endif |
597 | 0 | ) || conn->bits.close |
598 | 0 | || (mdctx->premature && !Curl_conn_is_multiplex(conn, FIRSTSOCKET))) { |
599 | 0 | #ifndef CURL_DISABLE_VERBOSE_STRINGS |
600 | 0 | CURL_TRC_M(data, "multi_done, terminating conn #%" FMT_OFF_T " to %s:%d, " |
601 | 0 | "forbid=%d, close=%d, premature=%d, conn_multiplex=%d", |
602 | 0 | conn->connection_id, host, port, data->set.reuse_forbid, |
603 | 0 | conn->bits.close, mdctx->premature, |
604 | 0 | Curl_conn_is_multiplex(conn, FIRSTSOCKET)); |
605 | 0 | #endif |
606 | 0 | connclose(conn, "disconnecting"); |
607 | 0 | Curl_conn_terminate(data, conn, mdctx->premature); |
608 | 0 | } |
609 | 0 | else if(!Curl_conn_get_max_concurrent(data, conn, FIRSTSOCKET)) { |
610 | 0 | #ifndef CURL_DISABLE_VERBOSE_STRINGS |
611 | 0 | CURL_TRC_M(data, "multi_done, conn #%" FMT_OFF_T " to %s:%d was shutdown" |
612 | 0 | " by server, not reusing", conn->connection_id, host, port); |
613 | 0 | #endif |
614 | 0 | connclose(conn, "server shutdown"); |
615 | 0 | Curl_conn_terminate(data, conn, mdctx->premature); |
616 | 0 | } |
617 | 0 | else { |
618 | | /* the connection is no longer in use by any transfer */ |
619 | 0 | if(Curl_cpool_conn_now_idle(data, conn)) { |
620 | | /* connection kept in the cpool */ |
621 | 0 | data->state.lastconnect_id = conn->connection_id; |
622 | 0 | #ifndef CURL_DISABLE_VERBOSE_STRINGS |
623 | 0 | infof(data, "Connection #%" FMT_OFF_T " to host %s:%d left intact", |
624 | 0 | conn->connection_id, host, port); |
625 | 0 | #endif |
626 | 0 | } |
627 | 0 | else { |
628 | | /* connection was removed from the cpool and destroyed. */ |
629 | 0 | data->state.lastconnect_id = -1; |
630 | 0 | } |
631 | 0 | } |
632 | 0 | } |
633 | | |
634 | | static CURLcode multi_done(struct Curl_easy *data, |
635 | | CURLcode status, /* an error if this is called |
636 | | after an error was detected */ |
637 | | bool premature) |
638 | 0 | { |
639 | 0 | CURLcode result; |
640 | 0 | struct connectdata *conn = data->conn; |
641 | 0 | struct multi_done_ctx mdctx; |
642 | |
|
643 | 0 | memset(&mdctx, 0, sizeof(mdctx)); |
644 | |
|
645 | 0 | CURL_TRC_M(data, "multi_done: status: %d prem: %d done: %d", |
646 | 0 | (int)status, (int)premature, data->state.done); |
647 | |
|
648 | 0 | if(data->state.done) |
649 | | /* Stop if multi_done() has already been called */ |
650 | 0 | return CURLE_OK; |
651 | | |
652 | | /* Shut down any ongoing async resolver operation. */ |
653 | 0 | Curl_async_shutdown(data); |
654 | | |
655 | | /* Cleanup possible redirect junk */ |
656 | 0 | Curl_safefree(data->req.newurl); |
657 | 0 | Curl_safefree(data->req.location); |
658 | |
|
659 | 0 | switch(status) { |
660 | 0 | case CURLE_ABORTED_BY_CALLBACK: |
661 | 0 | case CURLE_READ_ERROR: |
662 | 0 | case CURLE_WRITE_ERROR: |
663 | | /* When we are aborted due to a callback return code it basically have to |
664 | | be counted as premature as there is trouble ahead if we do not. We have |
665 | | many callbacks and protocols work differently, we could potentially do |
666 | | this more fine-grained in the future. */ |
667 | 0 | premature = TRUE; |
668 | 0 | FALLTHROUGH(); |
669 | 0 | default: |
670 | 0 | break; |
671 | 0 | } |
672 | | |
673 | | /* this calls the protocol-specific function pointer previously set */ |
674 | 0 | if(conn->handler->done && (data->mstate >= MSTATE_PROTOCONNECT)) |
675 | 0 | result = conn->handler->done(data, status, premature); |
676 | 0 | else |
677 | 0 | result = status; |
678 | |
|
679 | 0 | if(CURLE_ABORTED_BY_CALLBACK != result) { |
680 | | /* avoid this if we already aborted by callback to avoid this calling |
681 | | another callback */ |
682 | 0 | int rc = Curl_pgrsDone(data); |
683 | 0 | if(!result && rc) |
684 | 0 | result = CURLE_ABORTED_BY_CALLBACK; |
685 | 0 | } |
686 | | |
687 | | /* Make sure that transfer client writes are really done now. */ |
688 | 0 | result = Curl_1st_err(result, Curl_xfer_write_done(data, premature)); |
689 | | |
690 | | /* Inform connection filters that this transfer is done */ |
691 | 0 | Curl_conn_ev_data_done(data, premature); |
692 | |
|
693 | 0 | process_pending_handles(data->multi); /* connection / multiplex */ |
694 | |
|
695 | 0 | if(!result) |
696 | 0 | result = Curl_req_done(&data->req, data, premature); |
697 | | |
698 | | /* Under the potential connection pool's share lock, decide what to |
699 | | * do with the transfer's connection. */ |
700 | 0 | mdctx.premature = premature; |
701 | 0 | Curl_cpool_do_locked(data, data->conn, multi_done_locked, &mdctx); |
702 | | |
703 | | /* flush the netrc cache */ |
704 | 0 | Curl_netrc_cleanup(&data->state.netrc); |
705 | 0 | return result; |
706 | 0 | } |
707 | | |
708 | | static void close_connect_only(struct connectdata *conn, |
709 | | struct Curl_easy *data, |
710 | | void *userdata) |
711 | 0 | { |
712 | 0 | (void)userdata; |
713 | 0 | (void)data; |
714 | 0 | if(conn->connect_only) |
715 | 0 | connclose(conn, "Removing connect-only easy handle"); |
716 | 0 | } |
717 | | |
718 | | CURLMcode curl_multi_remove_handle(CURLM *m, CURL *d) |
719 | 0 | { |
720 | 0 | struct Curl_multi *multi = m; |
721 | 0 | struct Curl_easy *data = d; |
722 | 0 | bool premature; |
723 | 0 | struct Curl_llist_node *e; |
724 | 0 | CURLMcode rc; |
725 | 0 | bool removed_timer = FALSE; |
726 | 0 | unsigned int mid; |
727 | | |
728 | | /* First, make some basic checks that the CURLM handle is a good handle */ |
729 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
730 | 0 | return CURLM_BAD_HANDLE; |
731 | | |
732 | | /* Verify that we got a somewhat good easy handle too */ |
733 | 0 | if(!GOOD_EASY_HANDLE(data)) |
734 | 0 | return CURLM_BAD_EASY_HANDLE; |
735 | | |
736 | | /* Prevent users from trying to remove same easy handle more than once */ |
737 | 0 | if(!data->multi) |
738 | 0 | return CURLM_OK; /* it is already removed so let's say it is fine! */ |
739 | | |
740 | | /* Prevent users from trying to remove an easy handle from the wrong multi */ |
741 | 0 | if(data->multi != multi) |
742 | 0 | return CURLM_BAD_EASY_HANDLE; |
743 | | |
744 | 0 | if(data->mid == UINT_MAX) { |
745 | 0 | DEBUGASSERT(0); |
746 | 0 | return CURLM_INTERNAL_ERROR; |
747 | 0 | } |
748 | 0 | if(Curl_uint_tbl_get(&multi->xfers, data->mid) != data) { |
749 | 0 | DEBUGASSERT(0); |
750 | 0 | return CURLM_INTERNAL_ERROR; |
751 | 0 | } |
752 | | |
753 | 0 | if(multi->in_callback) |
754 | 0 | return CURLM_RECURSIVE_API_CALL; |
755 | | |
756 | 0 | premature = (data->mstate < MSTATE_COMPLETED); |
757 | | |
758 | | /* If the 'state' is not INIT or COMPLETED, we might need to do something |
759 | | nice to put the easy_handle in a good known state when this returns. */ |
760 | 0 | if(data->conn && |
761 | 0 | data->mstate > MSTATE_DO && |
762 | 0 | data->mstate < MSTATE_COMPLETED) { |
763 | | /* Set connection owner so that the DONE function closes it. We can |
764 | | safely do this here since connection is killed. */ |
765 | 0 | streamclose(data->conn, "Removed with partial response"); |
766 | 0 | } |
767 | |
|
768 | 0 | if(data->conn) { |
769 | | /* multi_done() clears the association between the easy handle and the |
770 | | connection. |
771 | | |
772 | | Note that this ignores the return code simply because there is |
773 | | nothing really useful to do with it anyway! */ |
774 | 0 | (void)multi_done(data, data->result, premature); |
775 | 0 | } |
776 | | |
777 | | /* The timer must be shut down before data->multi is set to NULL, else the |
778 | | timenode will remain in the splay tree after curl_easy_cleanup is |
779 | | called. Do it after multi_done() in case that sets another time! */ |
780 | 0 | removed_timer = Curl_expire_clear(data); |
781 | | |
782 | | /* If in `msgsent`, it was deducted from `multi->xfers_alive` already. */ |
783 | 0 | if(!Curl_uint_bset_contains(&multi->msgsent, data->mid)) |
784 | 0 | --multi->xfers_alive; |
785 | |
|
786 | 0 | Curl_wildcard_dtor(&data->wildcard); |
787 | |
|
788 | 0 | data->mstate = MSTATE_COMPLETED; |
789 | | |
790 | | /* Remove the association between the connection and the handle */ |
791 | 0 | Curl_detach_connection(data); |
792 | | |
793 | | /* Tell event handling that this transfer is definitely going away */ |
794 | 0 | Curl_multi_ev_xfer_done(multi, data); |
795 | |
|
796 | 0 | if(data->set.connect_only && !data->multi_easy) { |
797 | | /* This removes a handle that was part the multi interface that used |
798 | | CONNECT_ONLY, that connection is now left alive but since this handle |
799 | | has bits.close set nothing can use that transfer anymore and it is |
800 | | forbidden from reuse. This easy handle cannot find the connection |
801 | | anymore once removed from the multi handle |
802 | | |
803 | | Better close the connection here, at once. |
804 | | */ |
805 | 0 | struct connectdata *c; |
806 | 0 | curl_socket_t s; |
807 | 0 | s = Curl_getconnectinfo(data, &c); |
808 | 0 | if((s != CURL_SOCKET_BAD) && c) { |
809 | 0 | Curl_conn_terminate(data, c, TRUE); |
810 | 0 | } |
811 | 0 | } |
812 | |
|
813 | 0 | if(data->state.lastconnect_id != -1) { |
814 | | /* Mark any connect-only connection for closure */ |
815 | 0 | Curl_cpool_do_by_id(data, data->state.lastconnect_id, |
816 | 0 | close_connect_only, NULL); |
817 | 0 | } |
818 | |
|
819 | | #ifdef USE_LIBPSL |
820 | | /* Remove the PSL association. */ |
821 | | if(data->psl == &multi->psl) |
822 | | data->psl = NULL; |
823 | | #endif |
824 | | |
825 | | /* make sure there is no pending message in the queue sent from this easy |
826 | | handle */ |
827 | 0 | for(e = Curl_llist_head(&multi->msglist); e; e = Curl_node_next(e)) { |
828 | 0 | struct Curl_message *msg = Curl_node_elem(e); |
829 | |
|
830 | 0 | if(msg->extmsg.easy_handle == data) { |
831 | 0 | Curl_node_remove(e); |
832 | | /* there can only be one from this specific handle */ |
833 | 0 | break; |
834 | 0 | } |
835 | 0 | } |
836 | | |
837 | | /* clear the association to this multi handle */ |
838 | 0 | mid = data->mid; |
839 | 0 | DEBUGASSERT(Curl_uint_tbl_contains(&multi->xfers, mid)); |
840 | 0 | Curl_uint_tbl_remove(&multi->xfers, mid); |
841 | 0 | Curl_uint_bset_remove(&multi->process, mid); |
842 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
843 | 0 | Curl_uint_bset_remove(&multi->pending, mid); |
844 | 0 | Curl_uint_bset_remove(&multi->msgsent, mid); |
845 | 0 | data->multi = NULL; |
846 | 0 | data->mid = UINT_MAX; |
847 | 0 | data->master_mid = UINT_MAX; |
848 | | |
849 | | /* NOTE NOTE NOTE |
850 | | We do not touch the easy handle here! */ |
851 | 0 | process_pending_handles(multi); |
852 | |
|
853 | 0 | if(removed_timer) { |
854 | 0 | rc = Curl_update_timer(multi); |
855 | 0 | if(rc) |
856 | 0 | return rc; |
857 | 0 | } |
858 | | |
859 | 0 | CURL_TRC_M(data, "removed from multi, mid=%u, running=%u, total=%u", |
860 | 0 | mid, Curl_multi_xfers_running(multi), |
861 | 0 | Curl_uint_tbl_count(&multi->xfers)); |
862 | 0 | return CURLM_OK; |
863 | 0 | } |
864 | | |
865 | | /* Return TRUE if the application asked for multiplexing */ |
866 | | bool Curl_multiplex_wanted(const struct Curl_multi *multi) |
867 | 0 | { |
868 | 0 | return multi && multi->multiplexing; |
869 | 0 | } |
870 | | |
871 | | /* |
872 | | * Curl_detach_connection() removes the given transfer from the connection. |
873 | | * |
874 | | * This is the only function that should clear data->conn. This will |
875 | | * occasionally be called with the data->conn pointer already cleared. |
876 | | */ |
877 | | void Curl_detach_connection(struct Curl_easy *data) |
878 | 0 | { |
879 | 0 | struct connectdata *conn = data->conn; |
880 | 0 | if(conn) { |
881 | 0 | Curl_uint_spbset_remove(&conn->xfers_attached, data->mid); |
882 | 0 | if(Curl_uint_spbset_empty(&conn->xfers_attached)) |
883 | 0 | conn->attached_multi = NULL; |
884 | 0 | } |
885 | 0 | data->conn = NULL; |
886 | 0 | } |
887 | | |
888 | | /* |
889 | | * Curl_attach_connection() attaches this transfer to this connection. |
890 | | * |
891 | | * This is the only function that should assign data->conn |
892 | | */ |
893 | | void Curl_attach_connection(struct Curl_easy *data, |
894 | | struct connectdata *conn) |
895 | 0 | { |
896 | 0 | DEBUGASSERT(data); |
897 | 0 | DEBUGASSERT(!data->conn); |
898 | 0 | DEBUGASSERT(conn); |
899 | 0 | data->conn = conn; |
900 | 0 | Curl_uint_spbset_add(&conn->xfers_attached, data->mid); |
901 | | /* all attached transfers must be from the same multi */ |
902 | 0 | if(!conn->attached_multi) |
903 | 0 | conn->attached_multi = data->multi; |
904 | 0 | DEBUGASSERT(conn->attached_multi == data->multi); |
905 | |
|
906 | 0 | if(conn->handler && conn->handler->attach) |
907 | 0 | conn->handler->attach(data, conn); |
908 | 0 | } |
909 | | |
910 | | static int connecting_getsock(struct Curl_easy *data, curl_socket_t *socks) |
911 | 0 | { |
912 | 0 | struct connectdata *conn = data->conn; |
913 | 0 | curl_socket_t sockfd; |
914 | |
|
915 | 0 | if(!conn) |
916 | 0 | return GETSOCK_BLANK; |
917 | 0 | sockfd = Curl_conn_get_socket(data, FIRSTSOCKET); |
918 | 0 | if(sockfd != CURL_SOCKET_BAD) { |
919 | | /* Default is to wait to something from the server */ |
920 | 0 | socks[0] = sockfd; |
921 | 0 | return GETSOCK_READSOCK(0); |
922 | 0 | } |
923 | 0 | return GETSOCK_BLANK; |
924 | 0 | } |
925 | | |
926 | | static int protocol_getsock(struct Curl_easy *data, curl_socket_t *socks) |
927 | 0 | { |
928 | 0 | struct connectdata *conn = data->conn; |
929 | 0 | curl_socket_t sockfd; |
930 | |
|
931 | 0 | if(!conn) |
932 | 0 | return GETSOCK_BLANK; |
933 | 0 | if(conn->handler->proto_getsock) |
934 | 0 | return conn->handler->proto_getsock(data, conn, socks); |
935 | 0 | sockfd = Curl_conn_get_socket(data, FIRSTSOCKET); |
936 | 0 | if(sockfd != CURL_SOCKET_BAD) { |
937 | | /* Default is to wait to something from the server */ |
938 | 0 | socks[0] = sockfd; |
939 | 0 | return GETSOCK_READSOCK(0); |
940 | 0 | } |
941 | 0 | return GETSOCK_BLANK; |
942 | 0 | } |
943 | | |
944 | | static int domore_getsock(struct Curl_easy *data, curl_socket_t *socks) |
945 | 0 | { |
946 | 0 | struct connectdata *conn = data->conn; |
947 | 0 | if(!conn) |
948 | 0 | return GETSOCK_BLANK; |
949 | 0 | if(conn->handler->domore_getsock) |
950 | 0 | return conn->handler->domore_getsock(data, conn, socks); |
951 | 0 | else if(conn->sockfd != CURL_SOCKET_BAD) { |
952 | | /* Default is that we want to send something to the server */ |
953 | 0 | socks[0] = conn->sockfd; |
954 | 0 | return GETSOCK_WRITESOCK(0); |
955 | 0 | } |
956 | 0 | return GETSOCK_BLANK; |
957 | 0 | } |
958 | | |
959 | | static int doing_getsock(struct Curl_easy *data, curl_socket_t *socks) |
960 | 0 | { |
961 | 0 | struct connectdata *conn = data->conn; |
962 | 0 | if(!conn) |
963 | 0 | return GETSOCK_BLANK; |
964 | 0 | if(conn->handler->doing_getsock) |
965 | 0 | return conn->handler->doing_getsock(data, conn, socks); |
966 | 0 | else if(conn->sockfd != CURL_SOCKET_BAD) { |
967 | | /* Default is that we want to send something to the server */ |
968 | 0 | socks[0] = conn->sockfd; |
969 | 0 | return GETSOCK_WRITESOCK(0); |
970 | 0 | } |
971 | 0 | return GETSOCK_BLANK; |
972 | 0 | } |
973 | | |
974 | | static int perform_getsock(struct Curl_easy *data, curl_socket_t *sock) |
975 | 0 | { |
976 | 0 | struct connectdata *conn = data->conn; |
977 | 0 | if(!conn) |
978 | 0 | return GETSOCK_BLANK; |
979 | 0 | else if(conn->handler->perform_getsock) |
980 | 0 | return conn->handler->perform_getsock(data, conn, sock); |
981 | 0 | else { |
982 | | /* Default is to obey the data->req.keepon flags for send/recv */ |
983 | 0 | int bitmap = GETSOCK_BLANK; |
984 | 0 | unsigned sockindex = 0; |
985 | 0 | if(CURL_WANT_RECV(data)) { |
986 | 0 | DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD); |
987 | 0 | bitmap |= GETSOCK_READSOCK(sockindex); |
988 | 0 | sock[sockindex] = conn->sockfd; |
989 | 0 | } |
990 | |
|
991 | 0 | if(Curl_req_want_send(data)) { |
992 | 0 | if((conn->sockfd != conn->writesockfd) || |
993 | 0 | bitmap == GETSOCK_BLANK) { |
994 | | /* only if they are not the same socket and we have a readable |
995 | | one, we increase index */ |
996 | 0 | if(bitmap != GETSOCK_BLANK) |
997 | 0 | sockindex++; /* increase index if we need two entries */ |
998 | |
|
999 | 0 | DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD); |
1000 | 0 | sock[sockindex] = conn->writesockfd; |
1001 | 0 | } |
1002 | 0 | bitmap |= GETSOCK_WRITESOCK(sockindex); |
1003 | 0 | } |
1004 | 0 | return bitmap; |
1005 | 0 | } |
1006 | 0 | } |
1007 | | |
1008 | | /* Initializes `poll_set` with the current socket poll actions needed |
1009 | | * for transfer `data`. */ |
1010 | | void Curl_multi_getsock(struct Curl_easy *data, |
1011 | | struct easy_pollset *ps, |
1012 | | const char *caller) |
1013 | 0 | { |
1014 | 0 | bool expect_sockets = TRUE; |
1015 | | |
1016 | | /* If the transfer has no connection, this is fine. Happens when |
1017 | | called via curl_multi_remove_handle() => Curl_multi_ev_assess() => |
1018 | | Curl_multi_getsock(). */ |
1019 | 0 | Curl_pollset_reset(data, ps); |
1020 | 0 | if(!data->conn) |
1021 | 0 | return; |
1022 | | |
1023 | 0 | switch(data->mstate) { |
1024 | 0 | case MSTATE_INIT: |
1025 | 0 | case MSTATE_PENDING: |
1026 | 0 | case MSTATE_SETUP: |
1027 | 0 | case MSTATE_CONNECT: |
1028 | | /* nothing to poll for yet */ |
1029 | 0 | expect_sockets = FALSE; |
1030 | 0 | break; |
1031 | | |
1032 | 0 | case MSTATE_RESOLVING: |
1033 | 0 | Curl_pollset_add_socks(data, ps, Curl_resolv_getsock); |
1034 | | /* connection filters are not involved in this phase. It's ok if we get no |
1035 | | * sockets to wait for. Resolving can wake up from other sources. */ |
1036 | 0 | expect_sockets = FALSE; |
1037 | 0 | break; |
1038 | | |
1039 | 0 | case MSTATE_CONNECTING: |
1040 | 0 | case MSTATE_TUNNELING: |
1041 | 0 | Curl_pollset_add_socks(data, ps, connecting_getsock); |
1042 | 0 | Curl_conn_adjust_pollset(data, data->conn, ps); |
1043 | 0 | break; |
1044 | | |
1045 | 0 | case MSTATE_PROTOCONNECT: |
1046 | 0 | case MSTATE_PROTOCONNECTING: |
1047 | 0 | Curl_pollset_add_socks(data, ps, protocol_getsock); |
1048 | 0 | Curl_conn_adjust_pollset(data, data->conn, ps); |
1049 | 0 | break; |
1050 | | |
1051 | 0 | case MSTATE_DO: |
1052 | 0 | case MSTATE_DOING: |
1053 | 0 | Curl_pollset_add_socks(data, ps, doing_getsock); |
1054 | 0 | Curl_conn_adjust_pollset(data, data->conn, ps); |
1055 | 0 | break; |
1056 | | |
1057 | 0 | case MSTATE_DOING_MORE: |
1058 | 0 | Curl_pollset_add_socks(data, ps, domore_getsock); |
1059 | 0 | Curl_conn_adjust_pollset(data, data->conn, ps); |
1060 | 0 | break; |
1061 | | |
1062 | 0 | case MSTATE_DID: /* same as PERFORMING in regard to polling */ |
1063 | 0 | case MSTATE_PERFORMING: |
1064 | 0 | Curl_pollset_add_socks(data, ps, perform_getsock); |
1065 | 0 | Curl_conn_adjust_pollset(data, data->conn, ps); |
1066 | 0 | break; |
1067 | | |
1068 | 0 | case MSTATE_RATELIMITING: |
1069 | | /* we need to let time pass, ignore socket(s) */ |
1070 | 0 | expect_sockets = FALSE; |
1071 | 0 | break; |
1072 | | |
1073 | 0 | case MSTATE_DONE: |
1074 | 0 | case MSTATE_COMPLETED: |
1075 | 0 | case MSTATE_MSGSENT: |
1076 | | /* nothing more to poll for */ |
1077 | 0 | expect_sockets = FALSE; |
1078 | 0 | break; |
1079 | | |
1080 | 0 | default: |
1081 | 0 | failf(data, "multi_getsock: unexpected multi state %d", data->mstate); |
1082 | 0 | DEBUGASSERT(0); |
1083 | 0 | expect_sockets = FALSE; |
1084 | 0 | break; |
1085 | 0 | } |
1086 | | |
1087 | | |
1088 | | /* Unblocked and waiting to receive with buffered input. |
1089 | | * Make transfer run again at next opportunity. */ |
1090 | 0 | if(!Curl_xfer_is_blocked(data) && |
1091 | 0 | ((Curl_pollset_want_read(data, ps, data->conn->sock[FIRSTSOCKET]) && |
1092 | 0 | Curl_conn_data_pending(data, FIRSTSOCKET)) || |
1093 | 0 | (Curl_pollset_want_read(data, ps, data->conn->sock[SECONDARYSOCKET]) && |
1094 | 0 | Curl_conn_data_pending(data, SECONDARYSOCKET)))) { |
1095 | 0 | CURL_TRC_M(data, "%s pollset[] has POLLIN, but there is still " |
1096 | 0 | "buffered input to consume -> mark as dirty", caller); |
1097 | 0 | Curl_multi_mark_dirty(data); |
1098 | 0 | } |
1099 | |
|
1100 | 0 | switch(ps->num) { |
1101 | 0 | case 0: |
1102 | 0 | CURL_TRC_M(data, "%s pollset[], timeouts=%zu, paused %d/%d (r/w)", |
1103 | 0 | caller, Curl_llist_count(&data->state.timeoutlist), |
1104 | 0 | Curl_xfer_send_is_paused(data), |
1105 | 0 | Curl_xfer_recv_is_paused(data)); |
1106 | 0 | break; |
1107 | 0 | case 1: |
1108 | 0 | CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu", |
1109 | 0 | caller, ps->sockets[0], |
1110 | 0 | (ps->actions[0] & CURL_POLL_IN) ? "IN" : "", |
1111 | 0 | (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "", |
1112 | 0 | Curl_llist_count(&data->state.timeoutlist)); |
1113 | 0 | break; |
1114 | 0 | case 2: |
1115 | 0 | CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s, " |
1116 | 0 | "fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu", |
1117 | 0 | caller, ps->sockets[0], |
1118 | 0 | (ps->actions[0] & CURL_POLL_IN) ? "IN" : "", |
1119 | 0 | (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "", |
1120 | 0 | ps->sockets[1], |
1121 | 0 | (ps->actions[1] & CURL_POLL_IN) ? "IN" : "", |
1122 | 0 | (ps->actions[1] & CURL_POLL_OUT) ? "OUT" : "", |
1123 | 0 | Curl_llist_count(&data->state.timeoutlist)); |
1124 | 0 | break; |
1125 | 0 | default: |
1126 | 0 | CURL_TRC_M(data, "%s pollset[fds=%u], timeouts=%zu", |
1127 | 0 | caller, ps->num, Curl_llist_count(&data->state.timeoutlist)); |
1128 | 0 | break; |
1129 | 0 | } |
1130 | 0 | if(expect_sockets && !ps->num && data->multi && |
1131 | 0 | !Curl_uint_bset_contains(&data->multi->dirty, data->mid) && |
1132 | 0 | !Curl_llist_count(&data->state.timeoutlist) && |
1133 | 0 | !Curl_cwriter_is_paused(data) && !Curl_creader_is_paused(data) && |
1134 | 0 | Curl_conn_is_ip_connected(data, FIRSTSOCKET)) { |
1135 | | /* We expected sockets for POLL monitoring, but none are set. |
1136 | | * We are not dirty (and run anyway). |
1137 | | * We are not waiting on any timer. |
1138 | | * None of the READ/WRITE directions are paused. |
1139 | | * We are connected to the server on IP level, at least. */ |
1140 | 0 | infof(data, "WARNING: no socket in pollset or timer, transfer may stall!"); |
1141 | 0 | DEBUGASSERT(0); |
1142 | 0 | } |
1143 | 0 | } |
1144 | | |
1145 | | CURLMcode curl_multi_fdset(CURLM *m, |
1146 | | fd_set *read_fd_set, fd_set *write_fd_set, |
1147 | | fd_set *exc_fd_set, int *max_fd) |
1148 | 0 | { |
1149 | | /* Scan through all the easy handles to get the file descriptors set. |
1150 | | Some easy handles may not have connected to the remote host yet, |
1151 | | and then we must make sure that is done. */ |
1152 | 0 | int this_max_fd = -1; |
1153 | 0 | struct Curl_multi *multi = m; |
1154 | 0 | unsigned int i, mid; |
1155 | 0 | (void)exc_fd_set; /* not used */ |
1156 | |
|
1157 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
1158 | 0 | return CURLM_BAD_HANDLE; |
1159 | | |
1160 | 0 | if(multi->in_callback) |
1161 | 0 | return CURLM_RECURSIVE_API_CALL; |
1162 | | |
1163 | 0 | if(Curl_uint_bset_first(&multi->process, &mid)) { |
1164 | 0 | do { |
1165 | 0 | struct Curl_easy *data = Curl_multi_get_easy(multi, mid); |
1166 | 0 | struct easy_pollset ps; |
1167 | |
|
1168 | 0 | if(!data) { |
1169 | 0 | DEBUGASSERT(0); |
1170 | 0 | continue; |
1171 | 0 | } |
1172 | | |
1173 | 0 | Curl_multi_getsock(data, &ps, "curl_multi_fdset"); |
1174 | 0 | for(i = 0; i < ps.num; i++) { |
1175 | 0 | if(!FDSET_SOCK(ps.sockets[i])) |
1176 | | /* pretend it does not exist */ |
1177 | 0 | continue; |
1178 | | #ifdef __DJGPP__ |
1179 | | #pragma GCC diagnostic push |
1180 | | #pragma GCC diagnostic ignored "-Warith-conversion" |
1181 | | #endif |
1182 | 0 | if(ps.actions[i] & CURL_POLL_IN) |
1183 | 0 | FD_SET(ps.sockets[i], read_fd_set); |
1184 | 0 | if(ps.actions[i] & CURL_POLL_OUT) |
1185 | 0 | FD_SET(ps.sockets[i], write_fd_set); |
1186 | | #ifdef __DJGPP__ |
1187 | | #pragma GCC diagnostic pop |
1188 | | #endif |
1189 | 0 | if((int)ps.sockets[i] > this_max_fd) |
1190 | 0 | this_max_fd = (int)ps.sockets[i]; |
1191 | 0 | } |
1192 | 0 | } |
1193 | 0 | while(Curl_uint_bset_next(&multi->process, mid, &mid)); |
1194 | 0 | } |
1195 | | |
1196 | 0 | Curl_cshutdn_setfds(&multi->cshutdn, multi->admin, |
1197 | 0 | read_fd_set, write_fd_set, &this_max_fd); |
1198 | |
|
1199 | 0 | *max_fd = this_max_fd; |
1200 | |
|
1201 | 0 | return CURLM_OK; |
1202 | 0 | } |
1203 | | |
1204 | | CURLMcode curl_multi_waitfds(CURLM *m, |
1205 | | struct curl_waitfd *ufds, |
1206 | | unsigned int size, |
1207 | | unsigned int *fd_count) |
1208 | 0 | { |
1209 | 0 | struct Curl_waitfds cwfds; |
1210 | 0 | CURLMcode result = CURLM_OK; |
1211 | 0 | struct Curl_multi *multi = m; |
1212 | 0 | unsigned int need = 0, mid; |
1213 | |
|
1214 | 0 | if(!ufds && (size || !fd_count)) |
1215 | 0 | return CURLM_BAD_FUNCTION_ARGUMENT; |
1216 | | |
1217 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
1218 | 0 | return CURLM_BAD_HANDLE; |
1219 | | |
1220 | 0 | if(multi->in_callback) |
1221 | 0 | return CURLM_RECURSIVE_API_CALL; |
1222 | | |
1223 | 0 | Curl_waitfds_init(&cwfds, ufds, size); |
1224 | 0 | if(Curl_uint_bset_first(&multi->process, &mid)) { |
1225 | 0 | do { |
1226 | 0 | struct Curl_easy *data = Curl_multi_get_easy(multi, mid); |
1227 | 0 | struct easy_pollset ps; |
1228 | 0 | if(!data) { |
1229 | 0 | DEBUGASSERT(0); |
1230 | 0 | Curl_uint_bset_remove(&multi->process, mid); |
1231 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
1232 | 0 | continue; |
1233 | 0 | } |
1234 | 0 | Curl_multi_getsock(data, &ps, "curl_multi_waitfds"); |
1235 | 0 | need += Curl_waitfds_add_ps(&cwfds, &ps); |
1236 | 0 | } |
1237 | 0 | while(Curl_uint_bset_next(&multi->process, mid, &mid)); |
1238 | 0 | } |
1239 | | |
1240 | 0 | need += Curl_cshutdn_add_waitfds(&multi->cshutdn, multi->admin, &cwfds); |
1241 | |
|
1242 | 0 | if(need != cwfds.n && ufds) { |
1243 | 0 | result = CURLM_OUT_OF_MEMORY; |
1244 | 0 | } |
1245 | |
|
1246 | 0 | if(fd_count) |
1247 | 0 | *fd_count = need; |
1248 | 0 | return result; |
1249 | 0 | } |
1250 | | |
1251 | | #ifdef USE_WINSOCK |
1252 | | /* Reset FD_WRITE for TCP sockets. Nothing is actually sent. UDP sockets cannot |
1253 | | * be reset this way because an empty datagram would be sent. #9203 |
1254 | | * |
1255 | | * "On Windows the internal state of FD_WRITE as returned from |
1256 | | * WSAEnumNetworkEvents is only reset after successful send()." |
1257 | | */ |
1258 | | static void reset_socket_fdwrite(curl_socket_t s) |
1259 | | { |
1260 | | int t; |
1261 | | int l = (int)sizeof(t); |
1262 | | if(!getsockopt(s, SOL_SOCKET, SO_TYPE, (char *)&t, &l) && t == SOCK_STREAM) |
1263 | | send(s, NULL, 0, 0); |
1264 | | } |
1265 | | #endif |
1266 | | |
1267 | 0 | #define NUM_POLLS_ON_STACK 10 |
1268 | | |
1269 | | static CURLMcode multi_wait(struct Curl_multi *multi, |
1270 | | struct curl_waitfd extra_fds[], |
1271 | | unsigned int extra_nfds, |
1272 | | int timeout_ms, |
1273 | | int *ret, |
1274 | | bool extrawait, /* when no socket, wait */ |
1275 | | bool use_wakeup) |
1276 | 0 | { |
1277 | 0 | size_t i; |
1278 | 0 | struct curltime expire_time; |
1279 | 0 | long timeout_internal; |
1280 | 0 | int retcode = 0; |
1281 | 0 | struct pollfd a_few_on_stack[NUM_POLLS_ON_STACK]; |
1282 | 0 | struct curl_pollfds cpfds; |
1283 | 0 | unsigned int curl_nfds = 0; /* how many pfds are for curl transfers */ |
1284 | 0 | struct Curl_easy *data = NULL; |
1285 | 0 | CURLMcode result = CURLM_OK; |
1286 | 0 | unsigned int mid; |
1287 | |
|
1288 | | #ifdef USE_WINSOCK |
1289 | | WSANETWORKEVENTS wsa_events; |
1290 | | DEBUGASSERT(multi->wsa_event != WSA_INVALID_EVENT); |
1291 | | #endif |
1292 | | #ifndef ENABLE_WAKEUP |
1293 | | (void)use_wakeup; |
1294 | | #endif |
1295 | |
|
1296 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
1297 | 0 | return CURLM_BAD_HANDLE; |
1298 | | |
1299 | 0 | if(multi->in_callback) |
1300 | 0 | return CURLM_RECURSIVE_API_CALL; |
1301 | | |
1302 | 0 | if(timeout_ms < 0) |
1303 | 0 | return CURLM_BAD_FUNCTION_ARGUMENT; |
1304 | | |
1305 | 0 | Curl_pollfds_init(&cpfds, a_few_on_stack, NUM_POLLS_ON_STACK); |
1306 | | |
1307 | | /* Add the curl handles to our pollfds first */ |
1308 | 0 | if(Curl_uint_bset_first(&multi->process, &mid)) { |
1309 | 0 | do { |
1310 | 0 | struct easy_pollset ps; |
1311 | 0 | data = Curl_multi_get_easy(multi, mid); |
1312 | 0 | if(!data) { |
1313 | 0 | DEBUGASSERT(0); |
1314 | 0 | Curl_uint_bset_remove(&multi->process, mid); |
1315 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
1316 | 0 | continue; |
1317 | 0 | } |
1318 | 0 | Curl_multi_getsock(data, &ps, "multi_wait"); |
1319 | 0 | if(Curl_pollfds_add_ps(&cpfds, &ps)) { |
1320 | 0 | result = CURLM_OUT_OF_MEMORY; |
1321 | 0 | goto out; |
1322 | 0 | } |
1323 | 0 | } |
1324 | 0 | while(Curl_uint_bset_next(&multi->process, mid, &mid)); |
1325 | 0 | } |
1326 | | |
1327 | 0 | if(Curl_cshutdn_add_pollfds(&multi->cshutdn, multi->admin, &cpfds)) { |
1328 | 0 | result = CURLM_OUT_OF_MEMORY; |
1329 | 0 | goto out; |
1330 | 0 | } |
1331 | | |
1332 | 0 | curl_nfds = cpfds.n; /* what curl internally uses in cpfds */ |
1333 | | /* Add external file descriptions from poll-like struct curl_waitfd */ |
1334 | 0 | for(i = 0; i < extra_nfds; i++) { |
1335 | 0 | unsigned short events = 0; |
1336 | 0 | if(extra_fds[i].events & CURL_WAIT_POLLIN) |
1337 | 0 | events |= POLLIN; |
1338 | 0 | if(extra_fds[i].events & CURL_WAIT_POLLPRI) |
1339 | 0 | events |= POLLPRI; |
1340 | 0 | if(extra_fds[i].events & CURL_WAIT_POLLOUT) |
1341 | 0 | events |= POLLOUT; |
1342 | 0 | if(Curl_pollfds_add_sock(&cpfds, extra_fds[i].fd, events)) { |
1343 | 0 | result = CURLM_OUT_OF_MEMORY; |
1344 | 0 | goto out; |
1345 | 0 | } |
1346 | 0 | } |
1347 | | |
1348 | | #ifdef USE_WINSOCK |
1349 | | /* Set the WSA events based on the collected pollds */ |
1350 | | for(i = 0; i < cpfds.n; i++) { |
1351 | | long mask = 0; |
1352 | | if(cpfds.pfds[i].events & POLLIN) |
1353 | | mask |= FD_READ|FD_ACCEPT|FD_CLOSE; |
1354 | | if(cpfds.pfds[i].events & POLLPRI) |
1355 | | mask |= FD_OOB; |
1356 | | if(cpfds.pfds[i].events & POLLOUT) { |
1357 | | mask |= FD_WRITE|FD_CONNECT|FD_CLOSE; |
1358 | | reset_socket_fdwrite(cpfds.pfds[i].fd); |
1359 | | } |
1360 | | if(mask) { |
1361 | | if(WSAEventSelect(cpfds.pfds[i].fd, multi->wsa_event, mask) != 0) { |
1362 | | result = CURLM_OUT_OF_MEMORY; |
1363 | | goto out; |
1364 | | } |
1365 | | } |
1366 | | } |
1367 | | #endif |
1368 | | |
1369 | 0 | #ifdef ENABLE_WAKEUP |
1370 | 0 | #ifndef USE_WINSOCK |
1371 | 0 | if(use_wakeup && multi->wakeup_pair[0] != CURL_SOCKET_BAD) { |
1372 | 0 | if(Curl_pollfds_add_sock(&cpfds, multi->wakeup_pair[0], POLLIN)) { |
1373 | 0 | result = CURLM_OUT_OF_MEMORY; |
1374 | 0 | goto out; |
1375 | 0 | } |
1376 | 0 | } |
1377 | 0 | #endif |
1378 | 0 | #endif |
1379 | | |
1380 | | /* We check the internal timeout *AFTER* we collected all sockets to |
1381 | | * poll. Collecting the sockets may install new timers by protocols |
1382 | | * and connection filters. |
1383 | | * Use the shorter one of the internal and the caller requested timeout. */ |
1384 | 0 | (void)multi_timeout(multi, &expire_time, &timeout_internal); |
1385 | 0 | if((timeout_internal >= 0) && (timeout_internal < (long)timeout_ms)) |
1386 | 0 | timeout_ms = (int)timeout_internal; |
1387 | |
|
1388 | 0 | if(data) |
1389 | 0 | CURL_TRC_M(data, "multi_wait(fds=%d, timeout=%d) tinternal=%ld", |
1390 | 0 | cpfds.n, timeout_ms, timeout_internal); |
1391 | | #if defined(ENABLE_WAKEUP) && defined(USE_WINSOCK) |
1392 | | if(cpfds.n || use_wakeup) { |
1393 | | #else |
1394 | 0 | if(cpfds.n) { |
1395 | 0 | #endif |
1396 | 0 | int pollrc; |
1397 | | #ifdef USE_WINSOCK |
1398 | | if(cpfds.n) /* just pre-check with Winsock */ |
1399 | | pollrc = Curl_poll(cpfds.pfds, cpfds.n, 0); |
1400 | | else |
1401 | | pollrc = 0; |
1402 | | #else |
1403 | 0 | pollrc = Curl_poll(cpfds.pfds, cpfds.n, timeout_ms); /* wait... */ |
1404 | 0 | #endif |
1405 | 0 | if(pollrc < 0) { |
1406 | 0 | result = CURLM_UNRECOVERABLE_POLL; |
1407 | 0 | goto out; |
1408 | 0 | } |
1409 | | |
1410 | 0 | if(pollrc > 0) { |
1411 | 0 | retcode = pollrc; |
1412 | | #ifdef USE_WINSOCK |
1413 | | } |
1414 | | else { /* now wait... if not ready during the pre-check (pollrc == 0) */ |
1415 | | WSAWaitForMultipleEvents(1, &multi->wsa_event, FALSE, (DWORD)timeout_ms, |
1416 | | FALSE); |
1417 | | } |
1418 | | /* With Winsock, we have to run the following section unconditionally |
1419 | | to call WSAEventSelect(fd, event, 0) on all the sockets */ |
1420 | | { |
1421 | | #endif |
1422 | | /* copy revents results from the poll to the curl_multi_wait poll |
1423 | | struct, the bit values of the actual underlying poll() implementation |
1424 | | may not be the same as the ones in the public libcurl API! */ |
1425 | 0 | for(i = 0; i < extra_nfds; i++) { |
1426 | 0 | unsigned r = (unsigned)cpfds.pfds[curl_nfds + i].revents; |
1427 | 0 | unsigned short mask = 0; |
1428 | | #ifdef USE_WINSOCK |
1429 | | curl_socket_t s = extra_fds[i].fd; |
1430 | | wsa_events.lNetworkEvents = 0; |
1431 | | if(WSAEnumNetworkEvents(s, NULL, &wsa_events) == 0) { |
1432 | | if(wsa_events.lNetworkEvents & (FD_READ|FD_ACCEPT|FD_CLOSE)) |
1433 | | mask |= CURL_WAIT_POLLIN; |
1434 | | if(wsa_events.lNetworkEvents & (FD_WRITE|FD_CONNECT|FD_CLOSE)) |
1435 | | mask |= CURL_WAIT_POLLOUT; |
1436 | | if(wsa_events.lNetworkEvents & FD_OOB) |
1437 | | mask |= CURL_WAIT_POLLPRI; |
1438 | | if(ret && !pollrc && wsa_events.lNetworkEvents) |
1439 | | retcode++; |
1440 | | } |
1441 | | WSAEventSelect(s, multi->wsa_event, 0); |
1442 | | if(!pollrc) { |
1443 | | extra_fds[i].revents = (short)mask; |
1444 | | continue; |
1445 | | } |
1446 | | #endif |
1447 | 0 | if(r & POLLIN) |
1448 | 0 | mask |= CURL_WAIT_POLLIN; |
1449 | 0 | if(r & POLLOUT) |
1450 | 0 | mask |= CURL_WAIT_POLLOUT; |
1451 | 0 | if(r & POLLPRI) |
1452 | 0 | mask |= CURL_WAIT_POLLPRI; |
1453 | 0 | extra_fds[i].revents = (short)mask; |
1454 | 0 | } |
1455 | |
|
1456 | | #ifdef USE_WINSOCK |
1457 | | /* Count up all our own sockets that had activity, |
1458 | | and remove them from the event. */ |
1459 | | for(i = 0; i < curl_nfds; ++i) { |
1460 | | wsa_events.lNetworkEvents = 0; |
1461 | | if(WSAEnumNetworkEvents(cpfds.pfds[i].fd, NULL, &wsa_events) == 0) { |
1462 | | if(ret && !pollrc && wsa_events.lNetworkEvents) |
1463 | | retcode++; |
1464 | | } |
1465 | | WSAEventSelect(cpfds.pfds[i].fd, multi->wsa_event, 0); |
1466 | | } |
1467 | | WSAResetEvent(multi->wsa_event); |
1468 | | #else |
1469 | 0 | #ifdef ENABLE_WAKEUP |
1470 | 0 | if(use_wakeup && multi->wakeup_pair[0] != CURL_SOCKET_BAD) { |
1471 | 0 | if(cpfds.pfds[curl_nfds + extra_nfds].revents & POLLIN) { |
1472 | 0 | char buf[64]; |
1473 | 0 | ssize_t nread; |
1474 | 0 | while(1) { |
1475 | | /* the reading socket is non-blocking, try to read |
1476 | | data from it until it receives an error (except EINTR). |
1477 | | In normal cases it will get EAGAIN or EWOULDBLOCK |
1478 | | when there is no more data, breaking the loop. */ |
1479 | 0 | nread = wakeup_read(multi->wakeup_pair[0], buf, sizeof(buf)); |
1480 | 0 | if(nread <= 0) { |
1481 | 0 | if(nread < 0 && SOCKEINTR == SOCKERRNO) |
1482 | 0 | continue; |
1483 | 0 | break; |
1484 | 0 | } |
1485 | 0 | } |
1486 | | /* do not count the wakeup socket into the returned value */ |
1487 | 0 | retcode--; |
1488 | 0 | } |
1489 | 0 | } |
1490 | 0 | #endif |
1491 | 0 | #endif |
1492 | 0 | } |
1493 | 0 | } |
1494 | | |
1495 | 0 | if(ret) |
1496 | 0 | *ret = retcode; |
1497 | | #if defined(ENABLE_WAKEUP) && defined(USE_WINSOCK) |
1498 | | if(extrawait && !cpfds.n && !use_wakeup) { |
1499 | | #else |
1500 | 0 | if(extrawait && !cpfds.n) { |
1501 | 0 | #endif |
1502 | 0 | long sleep_ms = 0; |
1503 | | |
1504 | | /* Avoid busy-looping when there is nothing particular to wait for */ |
1505 | 0 | if(!curl_multi_timeout(multi, &sleep_ms) && sleep_ms) { |
1506 | 0 | if(sleep_ms > timeout_ms) |
1507 | 0 | sleep_ms = timeout_ms; |
1508 | | /* when there are no easy handles in the multi, this holds a -1 |
1509 | | timeout */ |
1510 | 0 | else if(sleep_ms < 0) |
1511 | 0 | sleep_ms = timeout_ms; |
1512 | 0 | curlx_wait_ms(sleep_ms); |
1513 | 0 | } |
1514 | 0 | } |
1515 | |
|
1516 | 0 | out: |
1517 | 0 | Curl_pollfds_cleanup(&cpfds); |
1518 | 0 | return result; |
1519 | 0 | } |
1520 | | |
1521 | | CURLMcode curl_multi_wait(CURLM *multi, |
1522 | | struct curl_waitfd extra_fds[], |
1523 | | unsigned int extra_nfds, |
1524 | | int timeout_ms, |
1525 | | int *ret) |
1526 | 0 | { |
1527 | 0 | return multi_wait(multi, extra_fds, extra_nfds, timeout_ms, ret, FALSE, |
1528 | 0 | FALSE); |
1529 | 0 | } |
1530 | | |
1531 | | CURLMcode curl_multi_poll(CURLM *multi, |
1532 | | struct curl_waitfd extra_fds[], |
1533 | | unsigned int extra_nfds, |
1534 | | int timeout_ms, |
1535 | | int *ret) |
1536 | 0 | { |
1537 | 0 | return multi_wait(multi, extra_fds, extra_nfds, timeout_ms, ret, TRUE, |
1538 | 0 | TRUE); |
1539 | 0 | } |
1540 | | |
1541 | | CURLMcode curl_multi_wakeup(CURLM *m) |
1542 | 0 | { |
1543 | | /* this function is usually called from another thread, |
1544 | | it has to be careful only to access parts of the |
1545 | | Curl_multi struct that are constant */ |
1546 | 0 | struct Curl_multi *multi = m; |
1547 | | |
1548 | | /* GOOD_MULTI_HANDLE can be safely called */ |
1549 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
1550 | 0 | return CURLM_BAD_HANDLE; |
1551 | | |
1552 | 0 | #ifdef ENABLE_WAKEUP |
1553 | | #ifdef USE_WINSOCK |
1554 | | if(WSASetEvent(multi->wsa_event)) |
1555 | | return CURLM_OK; |
1556 | | #else |
1557 | | /* the wakeup_pair variable is only written during init and cleanup, |
1558 | | making it safe to access from another thread after the init part |
1559 | | and before cleanup */ |
1560 | 0 | if(multi->wakeup_pair[1] != CURL_SOCKET_BAD) { |
1561 | 0 | while(1) { |
1562 | 0 | #ifdef USE_EVENTFD |
1563 | | /* eventfd has a stringent rule of requiring the 8-byte buffer when |
1564 | | calling write(2) on it */ |
1565 | 0 | const uint64_t buf[1] = { 1 }; |
1566 | | #else |
1567 | | const char buf[1] = { 1 }; |
1568 | | #endif |
1569 | | /* swrite() is not thread-safe in general, because concurrent calls |
1570 | | can have their messages interleaved, but in this case the content |
1571 | | of the messages does not matter, which makes it ok to call. |
1572 | | |
1573 | | The write socket is set to non-blocking, this way this function |
1574 | | cannot block, making it safe to call even from the same thread |
1575 | | that will call curl_multi_wait(). If swrite() returns that it |
1576 | | would block, it is considered successful because it means that |
1577 | | previous calls to this function will wake up the poll(). */ |
1578 | 0 | if(wakeup_write(multi->wakeup_pair[1], buf, sizeof(buf)) < 0) { |
1579 | 0 | int err = SOCKERRNO; |
1580 | 0 | int return_success; |
1581 | | #ifdef USE_WINSOCK |
1582 | | return_success = SOCKEWOULDBLOCK == err; |
1583 | | #else |
1584 | 0 | if(SOCKEINTR == err) |
1585 | 0 | continue; |
1586 | 0 | return_success = SOCKEWOULDBLOCK == err || EAGAIN == err; |
1587 | 0 | #endif |
1588 | 0 | if(!return_success) |
1589 | 0 | return CURLM_WAKEUP_FAILURE; |
1590 | 0 | } |
1591 | 0 | return CURLM_OK; |
1592 | 0 | } |
1593 | 0 | } |
1594 | 0 | #endif |
1595 | 0 | #endif |
1596 | 0 | return CURLM_WAKEUP_FAILURE; |
1597 | 0 | } |
1598 | | |
1599 | | /* |
1600 | | * multi_ischanged() is called |
1601 | | * |
1602 | | * Returns TRUE/FALSE whether the state is changed to trigger a CONNECT_PEND |
1603 | | * => CONNECT action. |
1604 | | * |
1605 | | * Set 'clear' to TRUE to have it also clear the state variable. |
1606 | | */ |
1607 | | static bool multi_ischanged(struct Curl_multi *multi, bool clear) |
1608 | 0 | { |
1609 | 0 | bool retval = multi->recheckstate; |
1610 | 0 | if(clear) |
1611 | 0 | multi->recheckstate = FALSE; |
1612 | 0 | return retval; |
1613 | 0 | } |
1614 | | |
1615 | | /* |
1616 | | * Curl_multi_connchanged() is called to tell that there is a connection in |
1617 | | * this multi handle that has changed state (multiplexing become possible, the |
1618 | | * number of allowed streams changed or similar), and a subsequent use of this |
1619 | | * multi handle should move CONNECT_PEND handles back to CONNECT to have them |
1620 | | * retry. |
1621 | | */ |
1622 | | void Curl_multi_connchanged(struct Curl_multi *multi) |
1623 | 0 | { |
1624 | 0 | multi->recheckstate = TRUE; |
1625 | 0 | } |
1626 | | |
1627 | | CURLMcode Curl_multi_add_perform(struct Curl_multi *multi, |
1628 | | struct Curl_easy *data, |
1629 | | struct connectdata *conn) |
1630 | 0 | { |
1631 | 0 | CURLMcode rc; |
1632 | |
|
1633 | 0 | if(multi->in_callback) |
1634 | 0 | return CURLM_RECURSIVE_API_CALL; |
1635 | | |
1636 | 0 | rc = curl_multi_add_handle(multi, data); |
1637 | 0 | if(!rc) { |
1638 | 0 | struct SingleRequest *k = &data->req; |
1639 | 0 | CURLcode result; |
1640 | | |
1641 | | /* pass in NULL for 'conn' here since we do not want to init the |
1642 | | connection, only this transfer */ |
1643 | 0 | result = Curl_init_do(data, NULL); |
1644 | 0 | if(result) { |
1645 | 0 | curl_multi_remove_handle(multi, data); |
1646 | 0 | return CURLM_INTERNAL_ERROR; |
1647 | 0 | } |
1648 | | |
1649 | | /* take this handle to the perform state right away */ |
1650 | 0 | multistate(data, MSTATE_PERFORMING); |
1651 | 0 | Curl_attach_connection(data, conn); |
1652 | 0 | k->keepon |= KEEP_RECV; /* setup to receive! */ |
1653 | 0 | } |
1654 | 0 | return rc; |
1655 | 0 | } |
1656 | | |
1657 | | static CURLcode multi_do(struct Curl_easy *data, bool *done) |
1658 | 0 | { |
1659 | 0 | CURLcode result = CURLE_OK; |
1660 | 0 | struct connectdata *conn = data->conn; |
1661 | |
|
1662 | 0 | DEBUGASSERT(conn); |
1663 | 0 | DEBUGASSERT(conn->handler); |
1664 | |
|
1665 | 0 | if(conn->handler->do_it) |
1666 | 0 | result = conn->handler->do_it(data, done); |
1667 | |
|
1668 | 0 | return result; |
1669 | 0 | } |
1670 | | |
1671 | | /* |
1672 | | * multi_do_more() is called during the DO_MORE multi state. It is basically a |
1673 | | * second stage DO state which (wrongly) was introduced to support FTP's |
1674 | | * second connection. |
1675 | | * |
1676 | | * 'complete' can return 0 for incomplete, 1 for done and -1 for go back to |
1677 | | * DOING state there is more work to do! |
1678 | | */ |
1679 | | |
1680 | | static CURLcode multi_do_more(struct Curl_easy *data, int *complete) |
1681 | 0 | { |
1682 | 0 | CURLcode result = CURLE_OK; |
1683 | 0 | struct connectdata *conn = data->conn; |
1684 | |
|
1685 | 0 | *complete = 0; |
1686 | |
|
1687 | 0 | if(conn->handler->do_more) |
1688 | 0 | result = conn->handler->do_more(data, complete); |
1689 | |
|
1690 | 0 | return result; |
1691 | 0 | } |
1692 | | |
1693 | | /* |
1694 | | * Check whether a timeout occurred, and handle it if it did |
1695 | | */ |
1696 | | static bool multi_handle_timeout(struct Curl_easy *data, |
1697 | | struct curltime *now, |
1698 | | bool *stream_error, |
1699 | | CURLcode *result) |
1700 | 0 | { |
1701 | 0 | bool connect_timeout = data->mstate < MSTATE_DO; |
1702 | 0 | timediff_t timeout_ms = Curl_timeleft(data, now, connect_timeout); |
1703 | 0 | if(timeout_ms < 0) { |
1704 | | /* Handle timed out */ |
1705 | 0 | struct curltime since; |
1706 | 0 | if(connect_timeout) |
1707 | 0 | since = data->progress.t_startsingle; |
1708 | 0 | else |
1709 | 0 | since = data->progress.t_startop; |
1710 | 0 | if(data->mstate == MSTATE_RESOLVING) |
1711 | 0 | failf(data, "Resolving timed out after %" FMT_TIMEDIFF_T |
1712 | 0 | " milliseconds", curlx_timediff(*now, since)); |
1713 | 0 | else if(data->mstate == MSTATE_CONNECTING) |
1714 | 0 | failf(data, "Connection timed out after %" FMT_TIMEDIFF_T |
1715 | 0 | " milliseconds", curlx_timediff(*now, since)); |
1716 | 0 | else { |
1717 | 0 | struct SingleRequest *k = &data->req; |
1718 | 0 | if(k->size != -1) { |
1719 | 0 | failf(data, "Operation timed out after %" FMT_TIMEDIFF_T |
1720 | 0 | " milliseconds with %" FMT_OFF_T " out of %" |
1721 | 0 | FMT_OFF_T " bytes received", |
1722 | 0 | curlx_timediff(*now, since), k->bytecount, k->size); |
1723 | 0 | } |
1724 | 0 | else { |
1725 | 0 | failf(data, "Operation timed out after %" FMT_TIMEDIFF_T |
1726 | 0 | " milliseconds with %" FMT_OFF_T " bytes received", |
1727 | 0 | curlx_timediff(*now, since), k->bytecount); |
1728 | 0 | } |
1729 | 0 | } |
1730 | 0 | *result = CURLE_OPERATION_TIMEDOUT; |
1731 | 0 | if(data->conn) { |
1732 | | /* Force connection closed if the connection has indeed been used */ |
1733 | 0 | if(data->mstate > MSTATE_DO) { |
1734 | 0 | streamclose(data->conn, "Disconnect due to timeout"); |
1735 | 0 | *stream_error = TRUE; |
1736 | 0 | } |
1737 | 0 | (void)multi_done(data, *result, TRUE); |
1738 | 0 | } |
1739 | 0 | return TRUE; |
1740 | 0 | } |
1741 | | |
1742 | 0 | return FALSE; |
1743 | 0 | } |
1744 | | |
1745 | | /* |
1746 | | * We are doing protocol-specific connecting and this is being called over and |
1747 | | * over from the multi interface until the connection phase is done on |
1748 | | * protocol layer. |
1749 | | */ |
1750 | | |
1751 | | static CURLcode protocol_connecting(struct Curl_easy *data, bool *done) |
1752 | 0 | { |
1753 | 0 | CURLcode result = CURLE_OK; |
1754 | 0 | struct connectdata *conn = data->conn; |
1755 | |
|
1756 | 0 | if(conn && conn->handler->connecting) { |
1757 | 0 | *done = FALSE; |
1758 | 0 | result = conn->handler->connecting(data, done); |
1759 | 0 | } |
1760 | 0 | else |
1761 | 0 | *done = TRUE; |
1762 | |
|
1763 | 0 | return result; |
1764 | 0 | } |
1765 | | |
1766 | | /* |
1767 | | * We are DOING this is being called over and over from the multi interface |
1768 | | * until the DOING phase is done on protocol layer. |
1769 | | */ |
1770 | | |
1771 | | static CURLcode protocol_doing(struct Curl_easy *data, bool *done) |
1772 | 0 | { |
1773 | 0 | CURLcode result = CURLE_OK; |
1774 | 0 | struct connectdata *conn = data->conn; |
1775 | |
|
1776 | 0 | if(conn && conn->handler->doing) { |
1777 | 0 | *done = FALSE; |
1778 | 0 | result = conn->handler->doing(data, done); |
1779 | 0 | } |
1780 | 0 | else |
1781 | 0 | *done = TRUE; |
1782 | |
|
1783 | 0 | return result; |
1784 | 0 | } |
1785 | | |
1786 | | /* |
1787 | | * We have discovered that the TCP connection has been successful, we can now |
1788 | | * proceed with some action. |
1789 | | * |
1790 | | */ |
1791 | | static CURLcode protocol_connect(struct Curl_easy *data, |
1792 | | bool *protocol_done) |
1793 | 0 | { |
1794 | 0 | CURLcode result = CURLE_OK; |
1795 | 0 | struct connectdata *conn = data->conn; |
1796 | 0 | DEBUGASSERT(conn); |
1797 | 0 | DEBUGASSERT(protocol_done); |
1798 | |
|
1799 | 0 | *protocol_done = FALSE; |
1800 | |
|
1801 | 0 | if(Curl_conn_is_connected(conn, FIRSTSOCKET) |
1802 | 0 | && conn->bits.protoconnstart) { |
1803 | | /* We already are connected, get back. This may happen when the connect |
1804 | | worked fine in the first call, like when we connect to a local server |
1805 | | or proxy. Note that we do not know if the protocol is actually done. |
1806 | | |
1807 | | Unless this protocol does not have any protocol-connect callback, as |
1808 | | then we know we are done. */ |
1809 | 0 | if(!conn->handler->connecting) |
1810 | 0 | *protocol_done = TRUE; |
1811 | |
|
1812 | 0 | return CURLE_OK; |
1813 | 0 | } |
1814 | | |
1815 | 0 | if(!conn->bits.protoconnstart) { |
1816 | 0 | if(conn->handler->connect_it) { |
1817 | | /* is there a protocol-specific connect() procedure? */ |
1818 | | |
1819 | | /* Call the protocol-specific connect function */ |
1820 | 0 | result = conn->handler->connect_it(data, protocol_done); |
1821 | 0 | } |
1822 | 0 | else |
1823 | 0 | *protocol_done = TRUE; |
1824 | | |
1825 | | /* it has started, possibly even completed but that knowledge is not stored |
1826 | | in this bit! */ |
1827 | 0 | if(!result) |
1828 | 0 | conn->bits.protoconnstart = TRUE; |
1829 | 0 | } |
1830 | |
|
1831 | 0 | return result; /* pass back status */ |
1832 | 0 | } |
1833 | | |
1834 | | static void set_in_callback(struct Curl_multi *multi, bool value) |
1835 | 0 | { |
1836 | 0 | multi->in_callback = value; |
1837 | 0 | } |
1838 | | |
1839 | | /* |
1840 | | * posttransfer() is called immediately after a transfer ends |
1841 | | */ |
1842 | | static void multi_posttransfer(struct Curl_easy *data) |
1843 | 0 | { |
1844 | | #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL) |
1845 | | /* restore the signal handler for SIGPIPE before we get back */ |
1846 | | if(!data->set.no_signal) |
1847 | | signal(SIGPIPE, data->state.prev_signal); |
1848 | | #else |
1849 | 0 | (void)data; /* unused parameter */ |
1850 | 0 | #endif |
1851 | 0 | } |
1852 | | |
1853 | | /* |
1854 | | * multi_follow() handles the URL redirect magic. Pass in the 'newurl' string |
1855 | | * as given by the remote server and set up the new URL to request. |
1856 | | * |
1857 | | * This function DOES NOT FREE the given url. |
1858 | | */ |
1859 | | static CURLcode multi_follow(struct Curl_easy *data, |
1860 | | const struct Curl_handler *handler, |
1861 | | const char *newurl, /* the Location: string */ |
1862 | | followtype type) /* see transfer.h */ |
1863 | 0 | { |
1864 | 0 | if(handler && handler->follow) |
1865 | 0 | return handler->follow(data, newurl, type); |
1866 | 0 | return CURLE_TOO_MANY_REDIRECTS; |
1867 | 0 | } |
1868 | | |
1869 | | static CURLMcode state_performing(struct Curl_easy *data, |
1870 | | struct curltime *nowp, |
1871 | | bool *stream_errorp, |
1872 | | CURLcode *resultp) |
1873 | 0 | { |
1874 | 0 | char *newurl = NULL; |
1875 | 0 | bool retry = FALSE; |
1876 | 0 | timediff_t recv_timeout_ms = 0; |
1877 | 0 | timediff_t send_timeout_ms = 0; |
1878 | 0 | CURLMcode rc = CURLM_OK; |
1879 | 0 | CURLcode result = *resultp = CURLE_OK; |
1880 | 0 | *stream_errorp = FALSE; |
1881 | | |
1882 | | /* check if over send speed */ |
1883 | 0 | if(data->set.max_send_speed) |
1884 | 0 | send_timeout_ms = Curl_pgrsLimitWaitTime(&data->progress.ul, |
1885 | 0 | data->set.max_send_speed, |
1886 | 0 | *nowp); |
1887 | | |
1888 | | /* check if over recv speed */ |
1889 | 0 | if(data->set.max_recv_speed) |
1890 | 0 | recv_timeout_ms = Curl_pgrsLimitWaitTime(&data->progress.dl, |
1891 | 0 | data->set.max_recv_speed, |
1892 | 0 | *nowp); |
1893 | |
|
1894 | 0 | if(send_timeout_ms || recv_timeout_ms) { |
1895 | 0 | Curl_ratelimit(data, *nowp); |
1896 | 0 | multistate(data, MSTATE_RATELIMITING); |
1897 | 0 | if(send_timeout_ms >= recv_timeout_ms) |
1898 | 0 | Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST); |
1899 | 0 | else |
1900 | 0 | Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST); |
1901 | 0 | return CURLM_OK; |
1902 | 0 | } |
1903 | | |
1904 | | /* read/write data if it is ready to do so */ |
1905 | 0 | result = Curl_sendrecv(data, nowp); |
1906 | |
|
1907 | 0 | if(data->req.done || (result == CURLE_RECV_ERROR)) { |
1908 | | /* If CURLE_RECV_ERROR happens early enough, we assume it was a race |
1909 | | * condition and the server closed the reused connection exactly when we |
1910 | | * wanted to use it, so figure out if that is indeed the case. |
1911 | | */ |
1912 | 0 | CURLcode ret = Curl_retry_request(data, &newurl); |
1913 | 0 | if(!ret) |
1914 | 0 | retry = !!newurl; |
1915 | 0 | else if(!result) |
1916 | 0 | result = ret; |
1917 | |
|
1918 | 0 | if(retry) { |
1919 | | /* if we are to retry, set the result to OK and consider the |
1920 | | request as done */ |
1921 | 0 | result = CURLE_OK; |
1922 | 0 | data->req.done = TRUE; |
1923 | 0 | } |
1924 | 0 | } |
1925 | 0 | #ifndef CURL_DISABLE_HTTP |
1926 | 0 | else if((CURLE_HTTP2_STREAM == result) && |
1927 | 0 | Curl_h2_http_1_1_error(data)) { |
1928 | 0 | CURLcode ret = Curl_retry_request(data, &newurl); |
1929 | |
|
1930 | 0 | if(!ret) { |
1931 | 0 | infof(data, "Downgrades to HTTP/1.1"); |
1932 | 0 | streamclose(data->conn, "Disconnect HTTP/2 for HTTP/1"); |
1933 | 0 | data->state.http_neg.wanted = CURL_HTTP_V1x; |
1934 | 0 | data->state.http_neg.allowed = CURL_HTTP_V1x; |
1935 | | /* clear the error message bit too as we ignore the one we got */ |
1936 | 0 | data->state.errorbuf = FALSE; |
1937 | 0 | if(!newurl) |
1938 | | /* typically for HTTP_1_1_REQUIRED error on first flight */ |
1939 | 0 | newurl = strdup(data->state.url); |
1940 | | /* if we are to retry, set the result to OK and consider the request |
1941 | | as done */ |
1942 | 0 | retry = TRUE; |
1943 | 0 | result = CURLE_OK; |
1944 | 0 | data->req.done = TRUE; |
1945 | 0 | } |
1946 | 0 | else |
1947 | 0 | result = ret; |
1948 | 0 | } |
1949 | 0 | #endif |
1950 | |
|
1951 | 0 | if(result) { |
1952 | | /* |
1953 | | * The transfer phase returned error, we mark the connection to get closed |
1954 | | * to prevent being reused. This is because we cannot possibly know if the |
1955 | | * connection is in a good shape or not now. Unless it is a protocol which |
1956 | | * uses two "channels" like FTP, as then the error happened in the data |
1957 | | * connection. |
1958 | | */ |
1959 | |
|
1960 | 0 | if(!(data->conn->handler->flags & PROTOPT_DUAL) && |
1961 | 0 | result != CURLE_HTTP2_STREAM) |
1962 | 0 | streamclose(data->conn, "Transfer returned error"); |
1963 | |
|
1964 | 0 | multi_posttransfer(data); |
1965 | 0 | multi_done(data, result, TRUE); |
1966 | 0 | } |
1967 | 0 | else if(data->req.done && !Curl_cwriter_is_paused(data)) { |
1968 | 0 | const struct Curl_handler *handler = data->conn->handler; |
1969 | | |
1970 | | /* call this even if the readwrite function returned error */ |
1971 | 0 | multi_posttransfer(data); |
1972 | | |
1973 | | /* When we follow redirects or is set to retry the connection, we must to |
1974 | | go back to the CONNECT state */ |
1975 | 0 | if(data->req.newurl || retry) { |
1976 | 0 | followtype follow = FOLLOW_NONE; |
1977 | 0 | if(!retry) { |
1978 | | /* if the URL is a follow-location and not just a retried request then |
1979 | | figure out the URL here */ |
1980 | 0 | free(newurl); |
1981 | 0 | newurl = data->req.newurl; |
1982 | 0 | data->req.newurl = NULL; |
1983 | 0 | follow = FOLLOW_REDIR; |
1984 | 0 | } |
1985 | 0 | else |
1986 | 0 | follow = FOLLOW_RETRY; |
1987 | 0 | (void)multi_done(data, CURLE_OK, FALSE); |
1988 | | /* multi_done() might return CURLE_GOT_NOTHING */ |
1989 | 0 | result = multi_follow(data, handler, newurl, follow); |
1990 | 0 | if(!result) { |
1991 | 0 | multistate(data, MSTATE_SETUP); |
1992 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
1993 | 0 | } |
1994 | 0 | } |
1995 | 0 | else { |
1996 | | /* after the transfer is done, go DONE */ |
1997 | | |
1998 | | /* but first check to see if we got a location info even though we are |
1999 | | not following redirects */ |
2000 | 0 | if(data->req.location) { |
2001 | 0 | free(newurl); |
2002 | 0 | newurl = data->req.location; |
2003 | 0 | data->req.location = NULL; |
2004 | 0 | result = multi_follow(data, handler, newurl, FOLLOW_FAKE); |
2005 | 0 | if(result) { |
2006 | 0 | *stream_errorp = TRUE; |
2007 | 0 | result = multi_done(data, result, TRUE); |
2008 | 0 | } |
2009 | 0 | } |
2010 | |
|
2011 | 0 | if(!result) { |
2012 | 0 | multistate(data, MSTATE_DONE); |
2013 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2014 | 0 | } |
2015 | 0 | } |
2016 | 0 | } |
2017 | 0 | free(newurl); |
2018 | 0 | *resultp = result; |
2019 | 0 | return rc; |
2020 | 0 | } |
2021 | | |
2022 | | static CURLMcode state_do(struct Curl_easy *data, |
2023 | | bool *stream_errorp, |
2024 | | CURLcode *resultp) |
2025 | 0 | { |
2026 | 0 | CURLMcode rc = CURLM_OK; |
2027 | 0 | CURLcode result = CURLE_OK; |
2028 | 0 | if(data->set.fprereq) { |
2029 | 0 | int prereq_rc; |
2030 | | |
2031 | | /* call the prerequest callback function */ |
2032 | 0 | Curl_set_in_callback(data, TRUE); |
2033 | 0 | prereq_rc = data->set.fprereq(data->set.prereq_userp, |
2034 | 0 | data->info.primary.remote_ip, |
2035 | 0 | data->info.primary.local_ip, |
2036 | 0 | data->info.primary.remote_port, |
2037 | 0 | data->info.primary.local_port); |
2038 | 0 | Curl_set_in_callback(data, FALSE); |
2039 | 0 | if(prereq_rc != CURL_PREREQFUNC_OK) { |
2040 | 0 | failf(data, "operation aborted by pre-request callback"); |
2041 | | /* failure in pre-request callback - do not do any other processing */ |
2042 | 0 | result = CURLE_ABORTED_BY_CALLBACK; |
2043 | 0 | multi_posttransfer(data); |
2044 | 0 | multi_done(data, result, FALSE); |
2045 | 0 | *stream_errorp = TRUE; |
2046 | 0 | goto end; |
2047 | 0 | } |
2048 | 0 | } |
2049 | | |
2050 | 0 | if(data->set.connect_only && !data->set.connect_only_ws) { |
2051 | | /* keep connection open for application to use the socket */ |
2052 | 0 | connkeep(data->conn, "CONNECT_ONLY"); |
2053 | 0 | multistate(data, MSTATE_DONE); |
2054 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2055 | 0 | } |
2056 | 0 | else { |
2057 | 0 | bool dophase_done = FALSE; |
2058 | | /* Perform the protocol's DO action */ |
2059 | 0 | result = multi_do(data, &dophase_done); |
2060 | | |
2061 | | /* When multi_do() returns failure, data->conn might be NULL! */ |
2062 | |
|
2063 | 0 | if(!result) { |
2064 | 0 | if(!dophase_done) { |
2065 | 0 | #ifndef CURL_DISABLE_FTP |
2066 | | /* some steps needed for wildcard matching */ |
2067 | 0 | if(data->state.wildcardmatch) { |
2068 | 0 | struct WildcardData *wc = data->wildcard; |
2069 | 0 | if(wc->state == CURLWC_DONE || wc->state == CURLWC_SKIP) { |
2070 | | /* skip some states if it is important */ |
2071 | 0 | multi_done(data, CURLE_OK, FALSE); |
2072 | | |
2073 | | /* if there is no connection left, skip the DONE state */ |
2074 | 0 | multistate(data, data->conn ? |
2075 | 0 | MSTATE_DONE : MSTATE_COMPLETED); |
2076 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2077 | 0 | goto end; |
2078 | 0 | } |
2079 | 0 | } |
2080 | 0 | #endif |
2081 | | /* DO was not completed in one function call, we must continue |
2082 | | DOING... */ |
2083 | 0 | multistate(data, MSTATE_DOING); |
2084 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2085 | 0 | } |
2086 | | |
2087 | | /* after DO, go DO_DONE... or DO_MORE */ |
2088 | 0 | else if(data->conn->bits.do_more) { |
2089 | | /* we are supposed to do more, but we need to sit down, relax and wait |
2090 | | a little while first */ |
2091 | 0 | multistate(data, MSTATE_DOING_MORE); |
2092 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2093 | 0 | } |
2094 | 0 | else { |
2095 | | /* we are done with the DO, now DID */ |
2096 | 0 | multistate(data, MSTATE_DID); |
2097 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2098 | 0 | } |
2099 | 0 | } |
2100 | 0 | else if((CURLE_SEND_ERROR == result) && |
2101 | 0 | data->conn->bits.reuse) { |
2102 | | /* |
2103 | | * In this situation, a connection that we were trying to use may have |
2104 | | * unexpectedly died. If possible, send the connection back to the |
2105 | | * CONNECT phase so we can try again. |
2106 | | */ |
2107 | 0 | const struct Curl_handler *handler = data->conn->handler; |
2108 | 0 | char *newurl = NULL; |
2109 | 0 | followtype follow = FOLLOW_NONE; |
2110 | 0 | CURLcode drc; |
2111 | |
|
2112 | 0 | drc = Curl_retry_request(data, &newurl); |
2113 | 0 | if(drc) { |
2114 | | /* a failure here pretty much implies an out of memory */ |
2115 | 0 | result = drc; |
2116 | 0 | *stream_errorp = TRUE; |
2117 | 0 | } |
2118 | |
|
2119 | 0 | multi_posttransfer(data); |
2120 | 0 | drc = multi_done(data, result, FALSE); |
2121 | | |
2122 | | /* When set to retry the connection, we must go back to the CONNECT |
2123 | | * state */ |
2124 | 0 | if(newurl) { |
2125 | 0 | if(!drc || (drc == CURLE_SEND_ERROR)) { |
2126 | 0 | follow = FOLLOW_RETRY; |
2127 | 0 | drc = multi_follow(data, handler, newurl, follow); |
2128 | 0 | if(!drc) { |
2129 | 0 | multistate(data, MSTATE_SETUP); |
2130 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2131 | 0 | result = CURLE_OK; |
2132 | 0 | } |
2133 | 0 | else { |
2134 | | /* Follow failed */ |
2135 | 0 | result = drc; |
2136 | 0 | } |
2137 | 0 | } |
2138 | 0 | else { |
2139 | | /* done did not return OK or SEND_ERROR */ |
2140 | 0 | result = drc; |
2141 | 0 | } |
2142 | 0 | } |
2143 | 0 | else { |
2144 | | /* Have error handler disconnect conn if we cannot retry */ |
2145 | 0 | *stream_errorp = TRUE; |
2146 | 0 | } |
2147 | 0 | free(newurl); |
2148 | 0 | } |
2149 | 0 | else { |
2150 | | /* failure detected */ |
2151 | 0 | multi_posttransfer(data); |
2152 | 0 | if(data->conn) |
2153 | 0 | multi_done(data, result, FALSE); |
2154 | 0 | *stream_errorp = TRUE; |
2155 | 0 | } |
2156 | 0 | } |
2157 | 0 | end: |
2158 | 0 | *resultp = result; |
2159 | 0 | return rc; |
2160 | 0 | } |
2161 | | |
2162 | | static CURLMcode state_ratelimiting(struct Curl_easy *data, |
2163 | | struct curltime *nowp, |
2164 | | CURLcode *resultp) |
2165 | 0 | { |
2166 | 0 | CURLcode result = CURLE_OK; |
2167 | 0 | CURLMcode rc = CURLM_OK; |
2168 | 0 | DEBUGASSERT(data->conn); |
2169 | | /* if both rates are within spec, resume transfer */ |
2170 | 0 | if(Curl_pgrsUpdate(data)) |
2171 | 0 | result = CURLE_ABORTED_BY_CALLBACK; |
2172 | 0 | else |
2173 | 0 | result = Curl_speedcheck(data, *nowp); |
2174 | |
|
2175 | 0 | if(result) { |
2176 | 0 | if(!(data->conn->handler->flags & PROTOPT_DUAL) && |
2177 | 0 | result != CURLE_HTTP2_STREAM) |
2178 | 0 | streamclose(data->conn, "Transfer returned error"); |
2179 | |
|
2180 | 0 | multi_posttransfer(data); |
2181 | 0 | multi_done(data, result, TRUE); |
2182 | 0 | } |
2183 | 0 | else { |
2184 | 0 | timediff_t recv_timeout_ms = 0; |
2185 | 0 | timediff_t send_timeout_ms = 0; |
2186 | 0 | if(data->set.max_send_speed) |
2187 | 0 | send_timeout_ms = |
2188 | 0 | Curl_pgrsLimitWaitTime(&data->progress.ul, |
2189 | 0 | data->set.max_send_speed, |
2190 | 0 | *nowp); |
2191 | |
|
2192 | 0 | if(data->set.max_recv_speed) |
2193 | 0 | recv_timeout_ms = |
2194 | 0 | Curl_pgrsLimitWaitTime(&data->progress.dl, |
2195 | 0 | data->set.max_recv_speed, |
2196 | 0 | *nowp); |
2197 | |
|
2198 | 0 | if(!send_timeout_ms && !recv_timeout_ms) { |
2199 | 0 | multistate(data, MSTATE_PERFORMING); |
2200 | 0 | Curl_ratelimit(data, *nowp); |
2201 | | /* start performing again right away */ |
2202 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2203 | 0 | } |
2204 | 0 | else if(send_timeout_ms >= recv_timeout_ms) |
2205 | 0 | Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST); |
2206 | 0 | else |
2207 | 0 | Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST); |
2208 | 0 | } |
2209 | 0 | *resultp = result; |
2210 | 0 | return rc; |
2211 | 0 | } |
2212 | | |
2213 | | static CURLMcode state_resolving(struct Curl_multi *multi, |
2214 | | struct Curl_easy *data, |
2215 | | bool *stream_errorp, |
2216 | | CURLcode *resultp) |
2217 | 0 | { |
2218 | 0 | struct Curl_dns_entry *dns = NULL; |
2219 | 0 | CURLcode result; |
2220 | 0 | CURLMcode rc = CURLM_OK; |
2221 | |
|
2222 | 0 | result = Curl_resolv_check(data, &dns); |
2223 | 0 | CURL_TRC_DNS(data, "Curl_resolv_check() -> %d, %s", |
2224 | 0 | result, dns ? "found" : "missing"); |
2225 | | /* Update sockets here, because the socket(s) may have been closed and the |
2226 | | application thus needs to be told, even if it is likely that the same |
2227 | | socket(s) will again be used further down. If the name has not yet been |
2228 | | resolved, it is likely that new sockets have been opened in an attempt to |
2229 | | contact another resolver. */ |
2230 | 0 | rc = Curl_multi_ev_assess_xfer(multi, data); |
2231 | 0 | if(rc) |
2232 | 0 | return rc; |
2233 | | |
2234 | 0 | if(dns) { |
2235 | 0 | bool connected; |
2236 | | /* Perform the next step in the connection phase, and then move on to the |
2237 | | WAITCONNECT state */ |
2238 | 0 | result = Curl_once_resolved(data, dns, &connected); |
2239 | |
|
2240 | 0 | if(result) |
2241 | | /* if Curl_once_resolved() returns failure, the connection struct is |
2242 | | already freed and gone */ |
2243 | 0 | data->conn = NULL; /* no more connection */ |
2244 | 0 | else { |
2245 | | /* call again please so that we get the next socket setup */ |
2246 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2247 | 0 | if(connected) |
2248 | 0 | multistate(data, MSTATE_PROTOCONNECT); |
2249 | 0 | else { |
2250 | 0 | multistate(data, MSTATE_CONNECTING); |
2251 | 0 | } |
2252 | 0 | } |
2253 | 0 | } |
2254 | |
|
2255 | 0 | if(result) |
2256 | | /* failure detected */ |
2257 | 0 | *stream_errorp = TRUE; |
2258 | |
|
2259 | 0 | *resultp = result; |
2260 | 0 | return rc; |
2261 | 0 | } |
2262 | | |
2263 | | static CURLMcode state_connect(struct Curl_multi *multi, |
2264 | | struct Curl_easy *data, |
2265 | | struct curltime *nowp, |
2266 | | CURLcode *resultp) |
2267 | 0 | { |
2268 | | /* Connect. We want to get a connection identifier filled in. This state can |
2269 | | be entered from SETUP and from PENDING. */ |
2270 | 0 | bool connected; |
2271 | 0 | bool async; |
2272 | 0 | CURLMcode rc = CURLM_OK; |
2273 | 0 | CURLcode result = Curl_connect(data, &async, &connected); |
2274 | 0 | if(CURLE_NO_CONNECTION_AVAILABLE == result) { |
2275 | | /* There was no connection available. We will go to the pending state and |
2276 | | wait for an available connection. */ |
2277 | 0 | multistate(data, MSTATE_PENDING); |
2278 | | /* move from process to pending set */ |
2279 | 0 | Curl_uint_bset_remove(&multi->process, data->mid); |
2280 | 0 | Curl_uint_bset_remove(&multi->dirty, data->mid); |
2281 | 0 | Curl_uint_bset_add(&multi->pending, data->mid); |
2282 | 0 | *resultp = CURLE_OK; |
2283 | 0 | return rc; |
2284 | 0 | } |
2285 | 0 | else |
2286 | 0 | process_pending_handles(data->multi); |
2287 | | |
2288 | 0 | if(!result) { |
2289 | 0 | *nowp = Curl_pgrsTime(data, TIMER_POSTQUEUE); |
2290 | 0 | if(async) |
2291 | | /* We are now waiting for an asynchronous name lookup */ |
2292 | 0 | multistate(data, MSTATE_RESOLVING); |
2293 | 0 | else { |
2294 | | /* after the connect has been sent off, go WAITCONNECT unless the |
2295 | | protocol connect is already done and we can go directly to WAITDO or |
2296 | | DO! */ |
2297 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2298 | |
|
2299 | 0 | if(connected) { |
2300 | 0 | if(!data->conn->bits.reuse && |
2301 | 0 | Curl_conn_is_multiplex(data->conn, FIRSTSOCKET)) { |
2302 | | /* new connection, can multiplex, wake pending handles */ |
2303 | 0 | process_pending_handles(data->multi); |
2304 | 0 | } |
2305 | 0 | multistate(data, MSTATE_PROTOCONNECT); |
2306 | 0 | } |
2307 | 0 | else { |
2308 | 0 | multistate(data, MSTATE_CONNECTING); |
2309 | 0 | } |
2310 | 0 | } |
2311 | 0 | } |
2312 | 0 | *resultp = result; |
2313 | 0 | return rc; |
2314 | 0 | } |
2315 | | |
2316 | | static CURLMcode multi_runsingle(struct Curl_multi *multi, |
2317 | | struct curltime *nowp, |
2318 | | struct Curl_easy *data) |
2319 | 0 | { |
2320 | 0 | struct Curl_message *msg = NULL; |
2321 | 0 | bool connected; |
2322 | 0 | bool protocol_connected = FALSE; |
2323 | 0 | bool dophase_done = FALSE; |
2324 | 0 | CURLMcode rc; |
2325 | 0 | CURLcode result = CURLE_OK; |
2326 | 0 | int control; |
2327 | |
|
2328 | 0 | if(!GOOD_EASY_HANDLE(data)) |
2329 | 0 | return CURLM_BAD_EASY_HANDLE; |
2330 | | |
2331 | 0 | if(multi->dead) { |
2332 | | /* a multi-level callback returned error before, meaning every individual |
2333 | | transfer now has failed */ |
2334 | 0 | result = CURLE_ABORTED_BY_CALLBACK; |
2335 | 0 | multi_posttransfer(data); |
2336 | 0 | multi_done(data, result, FALSE); |
2337 | 0 | multistate(data, MSTATE_COMPLETED); |
2338 | 0 | } |
2339 | |
|
2340 | 0 | multi_warn_debug(multi, data); |
2341 | | |
2342 | | /* transfer runs now, clear the dirty bit. This may be set |
2343 | | * again during processing, triggering a re-run later. */ |
2344 | 0 | Curl_uint_bset_remove(&multi->dirty, data->mid); |
2345 | |
|
2346 | 0 | do { |
2347 | | /* A "stream" here is a logical stream if the protocol can handle that |
2348 | | (HTTP/2), or the full connection for older protocols */ |
2349 | 0 | bool stream_error = FALSE; |
2350 | 0 | rc = CURLM_OK; |
2351 | |
|
2352 | 0 | if(multi_ischanged(multi, TRUE)) { |
2353 | 0 | CURL_TRC_M(data, "multi changed, check CONNECT_PEND queue"); |
2354 | 0 | process_pending_handles(multi); /* multiplexed */ |
2355 | 0 | } |
2356 | |
|
2357 | 0 | if(data->mstate > MSTATE_CONNECT && |
2358 | 0 | data->mstate < MSTATE_COMPLETED) { |
2359 | | /* Make sure we set the connection's current owner */ |
2360 | 0 | DEBUGASSERT(data->conn); |
2361 | 0 | if(!data->conn) |
2362 | 0 | return CURLM_INTERNAL_ERROR; |
2363 | 0 | } |
2364 | | |
2365 | | /* Wait for the connect state as only then is the start time stored, but |
2366 | | we must not check already completed handles */ |
2367 | 0 | if((data->mstate >= MSTATE_CONNECT) && (data->mstate < MSTATE_COMPLETED) && |
2368 | 0 | multi_handle_timeout(data, nowp, &stream_error, &result)) |
2369 | | /* Skip the statemachine and go directly to error handling section. */ |
2370 | 0 | goto statemachine_end; |
2371 | | |
2372 | 0 | switch(data->mstate) { |
2373 | 0 | case MSTATE_INIT: |
2374 | | /* Transitional state. init this transfer. A handle never comes back to |
2375 | | this state. */ |
2376 | 0 | result = Curl_pretransfer(data); |
2377 | 0 | if(result) |
2378 | 0 | break; |
2379 | | |
2380 | | /* after init, go SETUP */ |
2381 | 0 | multistate(data, MSTATE_SETUP); |
2382 | 0 | (void)Curl_pgrsTime(data, TIMER_STARTOP); |
2383 | 0 | FALLTHROUGH(); |
2384 | |
|
2385 | 0 | case MSTATE_SETUP: |
2386 | | /* Transitional state. Setup things for a new transfer. The handle |
2387 | | can come back to this state on a redirect. */ |
2388 | 0 | *nowp = Curl_pgrsTime(data, TIMER_STARTSINGLE); |
2389 | 0 | if(data->set.timeout) |
2390 | 0 | Curl_expire(data, data->set.timeout, EXPIRE_TIMEOUT); |
2391 | 0 | if(data->set.connecttimeout) |
2392 | | /* Since a connection might go to pending and back to CONNECT several |
2393 | | times before it actually takes off, we need to set the timeout once |
2394 | | in SETUP before we enter CONNECT the first time. */ |
2395 | 0 | Curl_expire(data, data->set.connecttimeout, EXPIRE_CONNECTTIMEOUT); |
2396 | |
|
2397 | 0 | multistate(data, MSTATE_CONNECT); |
2398 | 0 | FALLTHROUGH(); |
2399 | |
|
2400 | 0 | case MSTATE_CONNECT: |
2401 | 0 | rc = state_connect(multi, data, nowp, &result); |
2402 | 0 | break; |
2403 | | |
2404 | 0 | case MSTATE_RESOLVING: |
2405 | | /* awaiting an asynch name resolve to complete */ |
2406 | 0 | rc = state_resolving(multi, data, &stream_error, &result); |
2407 | 0 | break; |
2408 | | |
2409 | 0 | #ifndef CURL_DISABLE_HTTP |
2410 | 0 | case MSTATE_TUNNELING: |
2411 | | /* this is HTTP-specific, but sending CONNECT to a proxy is HTTP... */ |
2412 | 0 | DEBUGASSERT(data->conn); |
2413 | 0 | result = Curl_http_connect(data, &protocol_connected); |
2414 | 0 | if(!result) { |
2415 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2416 | | /* initiate protocol connect phase */ |
2417 | 0 | multistate(data, MSTATE_PROTOCONNECT); |
2418 | 0 | } |
2419 | 0 | else |
2420 | 0 | stream_error = TRUE; |
2421 | 0 | break; |
2422 | 0 | #endif |
2423 | | |
2424 | 0 | case MSTATE_CONNECTING: |
2425 | | /* awaiting a completion of an asynch TCP connect */ |
2426 | 0 | DEBUGASSERT(data->conn); |
2427 | 0 | result = Curl_conn_connect(data, FIRSTSOCKET, FALSE, &connected); |
2428 | 0 | if(connected && !result) { |
2429 | 0 | if(!data->conn->bits.reuse && |
2430 | 0 | Curl_conn_is_multiplex(data->conn, FIRSTSOCKET)) { |
2431 | | /* new connection, can multiplex, wake pending handles */ |
2432 | 0 | process_pending_handles(data->multi); |
2433 | 0 | } |
2434 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2435 | 0 | multistate(data, MSTATE_PROTOCONNECT); |
2436 | 0 | } |
2437 | 0 | else if(result) { |
2438 | | /* failure detected */ |
2439 | 0 | multi_posttransfer(data); |
2440 | 0 | multi_done(data, result, TRUE); |
2441 | 0 | stream_error = TRUE; |
2442 | 0 | break; |
2443 | 0 | } |
2444 | 0 | break; |
2445 | | |
2446 | 0 | case MSTATE_PROTOCONNECT: |
2447 | 0 | if(!result && data->conn->bits.reuse) { |
2448 | | /* ftp seems to hang when protoconnect on reused connection since we |
2449 | | * handle PROTOCONNECT in general inside the filers, it seems wrong to |
2450 | | * restart this on a reused connection. |
2451 | | */ |
2452 | 0 | multistate(data, MSTATE_DO); |
2453 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2454 | 0 | break; |
2455 | 0 | } |
2456 | 0 | if(!result) |
2457 | 0 | result = protocol_connect(data, &protocol_connected); |
2458 | 0 | if(!result && !protocol_connected) { |
2459 | | /* switch to waiting state */ |
2460 | 0 | multistate(data, MSTATE_PROTOCONNECTING); |
2461 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2462 | 0 | } |
2463 | 0 | else if(!result) { |
2464 | | /* protocol connect has completed, go WAITDO or DO */ |
2465 | 0 | multistate(data, MSTATE_DO); |
2466 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2467 | 0 | } |
2468 | 0 | else { |
2469 | | /* failure detected */ |
2470 | 0 | multi_posttransfer(data); |
2471 | 0 | multi_done(data, result, TRUE); |
2472 | 0 | stream_error = TRUE; |
2473 | 0 | } |
2474 | 0 | break; |
2475 | | |
2476 | 0 | case MSTATE_PROTOCONNECTING: |
2477 | | /* protocol-specific connect phase */ |
2478 | 0 | result = protocol_connecting(data, &protocol_connected); |
2479 | 0 | if(!result && protocol_connected) { |
2480 | | /* after the connect has completed, go WAITDO or DO */ |
2481 | 0 | multistate(data, MSTATE_DO); |
2482 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2483 | 0 | } |
2484 | 0 | else if(result) { |
2485 | | /* failure detected */ |
2486 | 0 | multi_posttransfer(data); |
2487 | 0 | multi_done(data, result, TRUE); |
2488 | 0 | stream_error = TRUE; |
2489 | 0 | } |
2490 | 0 | break; |
2491 | | |
2492 | 0 | case MSTATE_DO: |
2493 | 0 | rc = state_do(data, &stream_error, &result); |
2494 | 0 | break; |
2495 | | |
2496 | 0 | case MSTATE_DOING: |
2497 | | /* we continue DOING until the DO phase is complete */ |
2498 | 0 | DEBUGASSERT(data->conn); |
2499 | 0 | result = protocol_doing(data, &dophase_done); |
2500 | 0 | if(!result) { |
2501 | 0 | if(dophase_done) { |
2502 | | /* after DO, go DO_DONE or DO_MORE */ |
2503 | 0 | multistate(data, data->conn->bits.do_more ? |
2504 | 0 | MSTATE_DOING_MORE : MSTATE_DID); |
2505 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2506 | 0 | } /* dophase_done */ |
2507 | 0 | } |
2508 | 0 | else { |
2509 | | /* failure detected */ |
2510 | 0 | multi_posttransfer(data); |
2511 | 0 | multi_done(data, result, FALSE); |
2512 | 0 | stream_error = TRUE; |
2513 | 0 | } |
2514 | 0 | break; |
2515 | | |
2516 | 0 | case MSTATE_DOING_MORE: |
2517 | | /* |
2518 | | * When we are connected, DOING MORE and then go DID |
2519 | | */ |
2520 | 0 | DEBUGASSERT(data->conn); |
2521 | 0 | result = multi_do_more(data, &control); |
2522 | |
|
2523 | 0 | if(!result) { |
2524 | 0 | if(control) { |
2525 | | /* if positive, advance to DO_DONE |
2526 | | if negative, go back to DOING */ |
2527 | 0 | multistate(data, control == 1 ? |
2528 | 0 | MSTATE_DID : MSTATE_DOING); |
2529 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2530 | 0 | } |
2531 | | /* else |
2532 | | stay in DO_MORE */ |
2533 | 0 | } |
2534 | 0 | else { |
2535 | | /* failure detected */ |
2536 | 0 | multi_posttransfer(data); |
2537 | 0 | multi_done(data, result, FALSE); |
2538 | 0 | stream_error = TRUE; |
2539 | 0 | } |
2540 | 0 | break; |
2541 | | |
2542 | 0 | case MSTATE_DID: |
2543 | 0 | DEBUGASSERT(data->conn); |
2544 | 0 | if(data->conn->bits.multiplex) |
2545 | | /* Check if we can move pending requests to send pipe */ |
2546 | 0 | process_pending_handles(multi); /* multiplexed */ |
2547 | | |
2548 | | /* Only perform the transfer if there is a good socket to work with. |
2549 | | Having both BAD is a signal to skip immediately to DONE */ |
2550 | 0 | if((data->conn->sockfd != CURL_SOCKET_BAD) || |
2551 | 0 | (data->conn->writesockfd != CURL_SOCKET_BAD)) |
2552 | 0 | multistate(data, MSTATE_PERFORMING); |
2553 | 0 | else { |
2554 | 0 | #ifndef CURL_DISABLE_FTP |
2555 | 0 | if(data->state.wildcardmatch && |
2556 | 0 | ((data->conn->handler->flags & PROTOPT_WILDCARD) == 0)) { |
2557 | 0 | data->wildcard->state = CURLWC_DONE; |
2558 | 0 | } |
2559 | 0 | #endif |
2560 | 0 | multistate(data, MSTATE_DONE); |
2561 | 0 | } |
2562 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2563 | 0 | break; |
2564 | | |
2565 | 0 | case MSTATE_RATELIMITING: /* limit-rate exceeded in either direction */ |
2566 | 0 | rc = state_ratelimiting(data, nowp, &result); |
2567 | 0 | break; |
2568 | | |
2569 | 0 | case MSTATE_PERFORMING: |
2570 | 0 | rc = state_performing(data, nowp, &stream_error, &result); |
2571 | 0 | break; |
2572 | | |
2573 | 0 | case MSTATE_DONE: |
2574 | | /* this state is highly transient, so run another loop after this */ |
2575 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2576 | |
|
2577 | 0 | if(data->conn) { |
2578 | 0 | CURLcode res; |
2579 | | |
2580 | | /* post-transfer command */ |
2581 | 0 | res = multi_done(data, result, FALSE); |
2582 | | |
2583 | | /* allow a previously set error code take precedence */ |
2584 | 0 | if(!result) |
2585 | 0 | result = res; |
2586 | 0 | } |
2587 | |
|
2588 | 0 | #ifndef CURL_DISABLE_FTP |
2589 | 0 | if(data->state.wildcardmatch) { |
2590 | 0 | if(data->wildcard->state != CURLWC_DONE) { |
2591 | | /* if a wildcard is set and we are not ending -> lets start again |
2592 | | with MSTATE_INIT */ |
2593 | 0 | multistate(data, MSTATE_INIT); |
2594 | 0 | break; |
2595 | 0 | } |
2596 | 0 | } |
2597 | 0 | #endif |
2598 | | /* after we have DONE what we are supposed to do, go COMPLETED, and |
2599 | | it does not matter what the multi_done() returned! */ |
2600 | 0 | multistate(data, MSTATE_COMPLETED); |
2601 | 0 | break; |
2602 | | |
2603 | 0 | case MSTATE_COMPLETED: |
2604 | 0 | break; |
2605 | | |
2606 | 0 | case MSTATE_PENDING: |
2607 | 0 | case MSTATE_MSGSENT: |
2608 | | /* handles in these states should NOT be in this list */ |
2609 | 0 | break; |
2610 | | |
2611 | 0 | default: |
2612 | 0 | return CURLM_INTERNAL_ERROR; |
2613 | 0 | } |
2614 | | |
2615 | 0 | if(data->mstate >= MSTATE_CONNECT && |
2616 | 0 | data->mstate < MSTATE_DO && |
2617 | 0 | rc != CURLM_CALL_MULTI_PERFORM && |
2618 | 0 | !multi_ischanged(multi, FALSE)) { |
2619 | | /* We now handle stream timeouts if and only if this will be the last |
2620 | | * loop iteration. We only check this on the last iteration to ensure |
2621 | | * that if we know we have additional work to do immediately |
2622 | | * (i.e. CURLM_CALL_MULTI_PERFORM == TRUE) then we should do that before |
2623 | | * declaring the connection timed out as we may almost have a completed |
2624 | | * connection. */ |
2625 | 0 | multi_handle_timeout(data, nowp, &stream_error, &result); |
2626 | 0 | } |
2627 | |
|
2628 | 0 | statemachine_end: |
2629 | |
|
2630 | 0 | if(data->mstate < MSTATE_COMPLETED) { |
2631 | 0 | if(result) { |
2632 | | /* |
2633 | | * If an error was returned, and we are not in completed state now, |
2634 | | * then we go to completed and consider this transfer aborted. |
2635 | | */ |
2636 | | |
2637 | | /* NOTE: no attempt to disconnect connections must be made |
2638 | | in the case blocks above - cleanup happens only here */ |
2639 | | |
2640 | | /* Check if we can move pending requests to send pipe */ |
2641 | 0 | process_pending_handles(multi); /* connection */ |
2642 | |
|
2643 | 0 | if(data->conn) { |
2644 | 0 | if(stream_error) { |
2645 | | /* Do not attempt to send data over a connection that timed out */ |
2646 | 0 | bool dead_connection = result == CURLE_OPERATION_TIMEDOUT; |
2647 | 0 | struct connectdata *conn = data->conn; |
2648 | | |
2649 | | /* This is where we make sure that the conn pointer is reset. |
2650 | | We do not have to do this in every case block above where a |
2651 | | failure is detected */ |
2652 | 0 | Curl_detach_connection(data); |
2653 | 0 | Curl_conn_terminate(data, conn, dead_connection); |
2654 | 0 | } |
2655 | 0 | } |
2656 | 0 | else if(data->mstate == MSTATE_CONNECT) { |
2657 | | /* Curl_connect() failed */ |
2658 | 0 | multi_posttransfer(data); |
2659 | 0 | Curl_pgrsUpdate_nometer(data); |
2660 | 0 | } |
2661 | |
|
2662 | 0 | multistate(data, MSTATE_COMPLETED); |
2663 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2664 | 0 | } |
2665 | | /* if there is still a connection to use, call the progress function */ |
2666 | 0 | else if(data->conn && Curl_pgrsUpdate(data)) { |
2667 | | /* aborted due to progress callback return code must close the |
2668 | | connection */ |
2669 | 0 | result = CURLE_ABORTED_BY_CALLBACK; |
2670 | 0 | streamclose(data->conn, "Aborted by callback"); |
2671 | | |
2672 | | /* if not yet in DONE state, go there, otherwise COMPLETED */ |
2673 | 0 | multistate(data, (data->mstate < MSTATE_DONE) ? |
2674 | 0 | MSTATE_DONE : MSTATE_COMPLETED); |
2675 | 0 | rc = CURLM_CALL_MULTI_PERFORM; |
2676 | 0 | } |
2677 | 0 | } |
2678 | |
|
2679 | 0 | if(MSTATE_COMPLETED == data->mstate) { |
2680 | 0 | if(data->master_mid != UINT_MAX) { |
2681 | | /* A sub transfer, not for msgsent to application */ |
2682 | 0 | struct Curl_easy *mdata; |
2683 | |
|
2684 | 0 | CURL_TRC_M(data, "sub xfer done for master %u", data->master_mid); |
2685 | 0 | mdata = Curl_multi_get_easy(multi, data->master_mid); |
2686 | 0 | if(mdata) { |
2687 | 0 | if(mdata->sub_xfer_done) |
2688 | 0 | mdata->sub_xfer_done(mdata, data, result); |
2689 | 0 | else |
2690 | 0 | CURL_TRC_M(data, "master easy %u without sub_xfer_done callback.", |
2691 | 0 | data->master_mid); |
2692 | 0 | } |
2693 | 0 | else { |
2694 | 0 | CURL_TRC_M(data, "master easy %u already gone.", data->master_mid); |
2695 | 0 | } |
2696 | 0 | } |
2697 | 0 | else { |
2698 | | /* now fill in the Curl_message with this info */ |
2699 | 0 | msg = &data->msg; |
2700 | |
|
2701 | 0 | msg->extmsg.msg = CURLMSG_DONE; |
2702 | 0 | msg->extmsg.easy_handle = data; |
2703 | 0 | msg->extmsg.data.result = result; |
2704 | |
|
2705 | 0 | multi_addmsg(multi, msg); |
2706 | 0 | DEBUGASSERT(!data->conn); |
2707 | 0 | } |
2708 | 0 | multistate(data, MSTATE_MSGSENT); |
2709 | | |
2710 | | /* remove from the other sets, add to msgsent */ |
2711 | 0 | Curl_uint_bset_remove(&multi->process, data->mid); |
2712 | 0 | Curl_uint_bset_remove(&multi->dirty, data->mid); |
2713 | 0 | Curl_uint_bset_remove(&multi->pending, data->mid); |
2714 | 0 | Curl_uint_bset_add(&multi->msgsent, data->mid); |
2715 | 0 | --multi->xfers_alive; |
2716 | 0 | return CURLM_OK; |
2717 | 0 | } |
2718 | 0 | } while((rc == CURLM_CALL_MULTI_PERFORM) || multi_ischanged(multi, FALSE)); |
2719 | | |
2720 | 0 | data->result = result; |
2721 | 0 | return rc; |
2722 | 0 | } |
2723 | | |
2724 | | |
2725 | | CURLMcode curl_multi_perform(CURLM *m, int *running_handles) |
2726 | 0 | { |
2727 | 0 | CURLMcode returncode = CURLM_OK; |
2728 | 0 | struct Curl_tree *t = NULL; |
2729 | 0 | struct curltime now = curlx_now(); |
2730 | 0 | struct Curl_multi *multi = m; |
2731 | 0 | unsigned int mid; |
2732 | 0 | SIGPIPE_VARIABLE(pipe_st); |
2733 | |
|
2734 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
2735 | 0 | return CURLM_BAD_HANDLE; |
2736 | | |
2737 | 0 | if(multi->in_callback) |
2738 | 0 | return CURLM_RECURSIVE_API_CALL; |
2739 | | |
2740 | 0 | sigpipe_init(&pipe_st); |
2741 | 0 | if(Curl_uint_bset_first(&multi->process, &mid)) { |
2742 | 0 | CURL_TRC_M(multi->admin, "multi_perform(running=%u)", |
2743 | 0 | Curl_multi_xfers_running(multi)); |
2744 | 0 | do { |
2745 | 0 | struct Curl_easy *data = Curl_multi_get_easy(multi, mid); |
2746 | 0 | CURLMcode result; |
2747 | 0 | if(!data) { |
2748 | 0 | DEBUGASSERT(0); |
2749 | 0 | Curl_uint_bset_remove(&multi->process, mid); |
2750 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
2751 | 0 | continue; |
2752 | 0 | } |
2753 | 0 | if(data != multi->admin) { |
2754 | | /* admin handle is processed below */ |
2755 | 0 | sigpipe_apply(data, &pipe_st); |
2756 | 0 | result = multi_runsingle(multi, &now, data); |
2757 | 0 | if(result) |
2758 | 0 | returncode = result; |
2759 | 0 | } |
2760 | 0 | } |
2761 | 0 | while(Curl_uint_bset_next(&multi->process, mid, &mid)); |
2762 | 0 | } |
2763 | | |
2764 | 0 | sigpipe_apply(multi->admin, &pipe_st); |
2765 | 0 | Curl_cshutdn_perform(&multi->cshutdn, multi->admin, CURL_SOCKET_TIMEOUT); |
2766 | 0 | sigpipe_restore(&pipe_st); |
2767 | |
|
2768 | 0 | if(multi_ischanged(m, TRUE)) |
2769 | 0 | process_pending_handles(m); |
2770 | | |
2771 | | /* |
2772 | | * Simply remove all expired timers from the splay since handles are dealt |
2773 | | * with unconditionally by this function and curl_multi_timeout() requires |
2774 | | * that already passed/handled expire times are removed from the splay. |
2775 | | * |
2776 | | * It is important that the 'now' value is set at the entry of this function |
2777 | | * and not for the current time as it may have ticked a little while since |
2778 | | * then and then we risk this loop to remove timers that actually have not |
2779 | | * been handled! |
2780 | | */ |
2781 | 0 | do { |
2782 | 0 | multi->timetree = Curl_splaygetbest(now, multi->timetree, &t); |
2783 | 0 | if(t) { |
2784 | | /* the removed may have another timeout in queue */ |
2785 | 0 | struct Curl_easy *data = Curl_splayget(t); |
2786 | 0 | if(data->mstate == MSTATE_PENDING) { |
2787 | 0 | bool stream_unused; |
2788 | 0 | CURLcode result_unused; |
2789 | 0 | if(multi_handle_timeout(data, &now, &stream_unused, &result_unused)) { |
2790 | 0 | infof(data, "PENDING handle timeout"); |
2791 | 0 | move_pending_to_connect(multi, data); |
2792 | 0 | continue; |
2793 | 0 | } |
2794 | 0 | } |
2795 | 0 | (void)add_next_timeout(now, multi, Curl_splayget(t)); |
2796 | 0 | } |
2797 | 0 | } while(t); |
2798 | | |
2799 | 0 | if(running_handles) { |
2800 | 0 | unsigned int running = Curl_multi_xfers_running(multi); |
2801 | 0 | *running_handles = (running < INT_MAX) ? (int)running : INT_MAX; |
2802 | 0 | } |
2803 | |
|
2804 | 0 | if(CURLM_OK >= returncode) |
2805 | 0 | returncode = Curl_update_timer(multi); |
2806 | |
|
2807 | 0 | return returncode; |
2808 | 0 | } |
2809 | | |
2810 | | CURLMcode curl_multi_cleanup(CURLM *m) |
2811 | 0 | { |
2812 | 0 | struct Curl_multi *multi = m; |
2813 | 0 | if(GOOD_MULTI_HANDLE(multi)) { |
2814 | 0 | void *entry; |
2815 | 0 | unsigned int mid; |
2816 | 0 | if(multi->in_callback) |
2817 | 0 | return CURLM_RECURSIVE_API_CALL; |
2818 | | |
2819 | | /* First remove all remaining easy handles, |
2820 | | * close internal ones. admin handle is special */ |
2821 | 0 | if(Curl_uint_tbl_first(&multi->xfers, &mid, &entry)) { |
2822 | 0 | do { |
2823 | 0 | struct Curl_easy *data = entry; |
2824 | 0 | if(!GOOD_EASY_HANDLE(data)) |
2825 | 0 | return CURLM_BAD_HANDLE; |
2826 | | |
2827 | | #ifdef DEBUGBUILD |
2828 | | if(mid != data->mid) { |
2829 | | CURL_TRC_M(data, "multi_cleanup: still present with mid=%u, " |
2830 | | "but unexpected data->mid=%u\n", mid, data->mid); |
2831 | | DEBUGASSERT(0); |
2832 | | } |
2833 | | #endif |
2834 | | |
2835 | 0 | if(data == multi->admin) |
2836 | 0 | continue; |
2837 | | |
2838 | 0 | if(!data->state.done && data->conn) |
2839 | | /* if DONE was never called for this handle */ |
2840 | 0 | (void)multi_done(data, CURLE_OK, TRUE); |
2841 | |
|
2842 | 0 | data->multi = NULL; /* clear the association */ |
2843 | 0 | Curl_uint_tbl_remove(&multi->xfers, mid); |
2844 | 0 | data->mid = UINT_MAX; |
2845 | |
|
2846 | | #ifdef USE_LIBPSL |
2847 | | if(data->psl == &multi->psl) |
2848 | | data->psl = NULL; |
2849 | | #endif |
2850 | 0 | if(data->state.internal) |
2851 | 0 | Curl_close(&data); |
2852 | 0 | } |
2853 | 0 | while(Curl_uint_tbl_next(&multi->xfers, mid, &mid, &entry)); |
2854 | 0 | } |
2855 | | |
2856 | 0 | Curl_cpool_destroy(&multi->cpool); |
2857 | 0 | Curl_cshutdn_destroy(&multi->cshutdn, multi->admin); |
2858 | 0 | if(multi->admin) { |
2859 | 0 | CURL_TRC_M(multi->admin, "multi_cleanup, closing admin handle, done"); |
2860 | 0 | multi->admin->multi = NULL; |
2861 | 0 | Curl_uint_tbl_remove(&multi->xfers, multi->admin->mid); |
2862 | 0 | Curl_close(&multi->admin); |
2863 | 0 | } |
2864 | |
|
2865 | 0 | multi->magic = 0; /* not good anymore */ |
2866 | |
|
2867 | 0 | Curl_multi_ev_cleanup(multi); |
2868 | 0 | Curl_hash_destroy(&multi->proto_hash); |
2869 | 0 | Curl_dnscache_destroy(&multi->dnscache); |
2870 | 0 | Curl_psl_destroy(&multi->psl); |
2871 | 0 | #ifdef USE_SSL |
2872 | 0 | Curl_ssl_scache_destroy(multi->ssl_scache); |
2873 | 0 | #endif |
2874 | |
|
2875 | | #ifdef USE_WINSOCK |
2876 | | WSACloseEvent(multi->wsa_event); |
2877 | | #else |
2878 | 0 | #ifdef ENABLE_WAKEUP |
2879 | 0 | wakeup_close(multi->wakeup_pair[0]); |
2880 | | #ifndef USE_EVENTFD |
2881 | | wakeup_close(multi->wakeup_pair[1]); |
2882 | | #endif |
2883 | 0 | #endif |
2884 | 0 | #endif |
2885 | |
|
2886 | 0 | multi_xfer_bufs_free(multi); |
2887 | | #ifdef DEBUGBUILD |
2888 | | if(Curl_uint_tbl_count(&multi->xfers)) { |
2889 | | multi_xfer_tbl_dump(multi); |
2890 | | DEBUGASSERT(0); |
2891 | | } |
2892 | | #endif |
2893 | 0 | Curl_uint_bset_destroy(&multi->process); |
2894 | 0 | Curl_uint_bset_destroy(&multi->dirty); |
2895 | 0 | Curl_uint_bset_destroy(&multi->pending); |
2896 | 0 | Curl_uint_bset_destroy(&multi->msgsent); |
2897 | 0 | Curl_uint_tbl_destroy(&multi->xfers); |
2898 | 0 | free(multi); |
2899 | |
|
2900 | 0 | return CURLM_OK; |
2901 | 0 | } |
2902 | 0 | return CURLM_BAD_HANDLE; |
2903 | 0 | } |
2904 | | |
2905 | | /* |
2906 | | * curl_multi_info_read() |
2907 | | * |
2908 | | * This function is the primary way for a multi/multi_socket application to |
2909 | | * figure out if a transfer has ended. We MUST make this function as fast as |
2910 | | * possible as it will be polled frequently and we MUST NOT scan any lists in |
2911 | | * here to figure out things. We must scale fine to thousands of handles and |
2912 | | * beyond. The current design is fully O(1). |
2913 | | */ |
2914 | | |
2915 | | CURLMsg *curl_multi_info_read(CURLM *m, int *msgs_in_queue) |
2916 | 0 | { |
2917 | 0 | struct Curl_message *msg; |
2918 | 0 | struct Curl_multi *multi = m; |
2919 | |
|
2920 | 0 | *msgs_in_queue = 0; /* default to none */ |
2921 | |
|
2922 | 0 | if(GOOD_MULTI_HANDLE(multi) && |
2923 | 0 | !multi->in_callback && |
2924 | 0 | Curl_llist_count(&multi->msglist)) { |
2925 | | /* there is one or more messages in the list */ |
2926 | 0 | struct Curl_llist_node *e; |
2927 | | |
2928 | | /* extract the head of the list to return */ |
2929 | 0 | e = Curl_llist_head(&multi->msglist); |
2930 | |
|
2931 | 0 | msg = Curl_node_elem(e); |
2932 | | |
2933 | | /* remove the extracted entry */ |
2934 | 0 | Curl_node_remove(e); |
2935 | |
|
2936 | 0 | *msgs_in_queue = curlx_uztosi(Curl_llist_count(&multi->msglist)); |
2937 | |
|
2938 | 0 | return &msg->extmsg; |
2939 | 0 | } |
2940 | 0 | return NULL; |
2941 | 0 | } |
2942 | | |
2943 | | |
2944 | | void Curl_multi_will_close(struct Curl_easy *data, curl_socket_t s) |
2945 | 0 | { |
2946 | 0 | if(data) { |
2947 | 0 | struct Curl_multi *multi = data->multi; |
2948 | 0 | if(multi) { |
2949 | 0 | CURL_TRC_M(data, "Curl_multi_will_close fd=%" FMT_SOCKET_T, s); |
2950 | 0 | Curl_multi_ev_socket_done(multi, data, s); |
2951 | 0 | } |
2952 | 0 | } |
2953 | 0 | } |
2954 | | |
2955 | | /* |
2956 | | * add_next_timeout() |
2957 | | * |
2958 | | * Each Curl_easy has a list of timeouts. The add_next_timeout() is called |
2959 | | * when it has just been removed from the splay tree because the timeout has |
2960 | | * expired. This function is then to advance in the list to pick the next |
2961 | | * timeout to use (skip the already expired ones) and add this node back to |
2962 | | * the splay tree again. |
2963 | | * |
2964 | | * The splay tree only has each sessionhandle as a single node and the nearest |
2965 | | * timeout is used to sort it on. |
2966 | | */ |
2967 | | static CURLMcode add_next_timeout(struct curltime now, |
2968 | | struct Curl_multi *multi, |
2969 | | struct Curl_easy *d) |
2970 | 0 | { |
2971 | 0 | struct curltime *tv = &d->state.expiretime; |
2972 | 0 | struct Curl_llist *list = &d->state.timeoutlist; |
2973 | 0 | struct Curl_llist_node *e; |
2974 | | |
2975 | | /* move over the timeout list for this specific handle and remove all |
2976 | | timeouts that are now passed tense and store the next pending |
2977 | | timeout in *tv */ |
2978 | 0 | for(e = Curl_llist_head(list); e;) { |
2979 | 0 | struct Curl_llist_node *n = Curl_node_next(e); |
2980 | 0 | struct time_node *node = Curl_node_elem(e); |
2981 | 0 | timediff_t diff = curlx_timediff_us(node->time, now); |
2982 | 0 | if(diff <= 0) |
2983 | | /* remove outdated entry */ |
2984 | 0 | Curl_node_remove(e); |
2985 | 0 | else |
2986 | | /* the list is sorted so get out on the first mismatch */ |
2987 | 0 | break; |
2988 | 0 | e = n; |
2989 | 0 | } |
2990 | 0 | e = Curl_llist_head(list); |
2991 | 0 | if(!e) { |
2992 | | /* clear the expire times within the handles that we remove from the |
2993 | | splay tree */ |
2994 | 0 | tv->tv_sec = 0; |
2995 | 0 | tv->tv_usec = 0; |
2996 | 0 | } |
2997 | 0 | else { |
2998 | 0 | struct time_node *node = Curl_node_elem(e); |
2999 | | /* copy the first entry to 'tv' */ |
3000 | 0 | memcpy(tv, &node->time, sizeof(*tv)); |
3001 | | |
3002 | | /* Insert this node again into the splay. Keep the timer in the list in |
3003 | | case we need to recompute future timers. */ |
3004 | 0 | multi->timetree = Curl_splayinsert(*tv, multi->timetree, |
3005 | 0 | &d->state.timenode); |
3006 | 0 | } |
3007 | 0 | return CURLM_OK; |
3008 | 0 | } |
3009 | | |
3010 | | struct multi_run_ctx { |
3011 | | struct Curl_multi *multi; |
3012 | | struct curltime now; |
3013 | | size_t run_xfers; |
3014 | | SIGPIPE_MEMBER(pipe_st); |
3015 | | bool run_cpool; |
3016 | | }; |
3017 | | |
3018 | | static void multi_mark_expired_as_dirty(struct multi_run_ctx *mrc) |
3019 | 0 | { |
3020 | 0 | struct Curl_multi *multi = mrc->multi; |
3021 | 0 | struct Curl_easy *data = NULL; |
3022 | 0 | struct Curl_tree *t = NULL; |
3023 | | |
3024 | | /* |
3025 | | * The loop following here will go on as long as there are expire-times left |
3026 | | * to process (compared to mrc->now) in the splay and 'data' will be |
3027 | | * re-assigned for every expired handle we deal with. |
3028 | | */ |
3029 | 0 | while(1) { |
3030 | | /* Check if there is one (more) expired timer to deal with! This function |
3031 | | extracts a matching node if there is one */ |
3032 | 0 | multi->timetree = Curl_splaygetbest(mrc->now, multi->timetree, &t); |
3033 | 0 | if(!t) |
3034 | 0 | return; |
3035 | | |
3036 | 0 | data = Curl_splayget(t); /* assign this for next loop */ |
3037 | 0 | if(!data) |
3038 | 0 | continue; |
3039 | | |
3040 | 0 | (void)add_next_timeout(mrc->now, multi, data); |
3041 | 0 | Curl_multi_mark_dirty(data); |
3042 | 0 | } |
3043 | 0 | } |
3044 | | |
3045 | | static CURLMcode multi_run_dirty(struct multi_run_ctx *mrc) |
3046 | 0 | { |
3047 | 0 | struct Curl_multi *multi = mrc->multi; |
3048 | 0 | CURLMcode result = CURLM_OK; |
3049 | 0 | unsigned int mid; |
3050 | |
|
3051 | 0 | if(Curl_uint_bset_first(&multi->dirty, &mid)) { |
3052 | 0 | do { |
3053 | 0 | struct Curl_easy *data = Curl_multi_get_easy(multi, mid); |
3054 | 0 | if(data) { |
3055 | 0 | CURL_TRC_M(data, "multi_run_dirty"); |
3056 | |
|
3057 | 0 | if(data == multi->admin) { |
3058 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
3059 | 0 | mrc->run_cpool = TRUE; |
3060 | 0 | continue; |
3061 | 0 | } |
3062 | 0 | else if(!Curl_uint_bset_contains(&multi->process, mid)) { |
3063 | | /* We are no longer processing this transfer */ |
3064 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
3065 | 0 | continue; |
3066 | 0 | } |
3067 | | |
3068 | 0 | mrc->run_xfers++; |
3069 | 0 | sigpipe_apply(data, &mrc->pipe_st); |
3070 | | /* runsingle() clears the dirty mid */ |
3071 | 0 | result = multi_runsingle(multi, &mrc->now, data); |
3072 | |
|
3073 | 0 | if(CURLM_OK >= result) { |
3074 | | /* reassess event handling of data */ |
3075 | 0 | result = Curl_multi_ev_assess_xfer(multi, data); |
3076 | 0 | if(result) |
3077 | 0 | goto out; |
3078 | 0 | } |
3079 | 0 | } |
3080 | 0 | else { |
3081 | 0 | CURL_TRC_M(multi->admin, "multi_run_dirty, %u no longer found", mid); |
3082 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
3083 | 0 | } |
3084 | 0 | } |
3085 | 0 | while(Curl_uint_bset_next(&multi->dirty, mid, &mid)); |
3086 | 0 | } |
3087 | | |
3088 | 0 | out: |
3089 | 0 | return result; |
3090 | 0 | } |
3091 | | |
3092 | | static CURLMcode multi_socket(struct Curl_multi *multi, |
3093 | | bool checkall, |
3094 | | curl_socket_t s, |
3095 | | int ev_bitmask, |
3096 | | int *running_handles) |
3097 | 0 | { |
3098 | 0 | CURLMcode result = CURLM_OK; |
3099 | 0 | struct multi_run_ctx mrc; |
3100 | |
|
3101 | 0 | (void)ev_bitmask; |
3102 | 0 | memset(&mrc, 0, sizeof(mrc)); |
3103 | 0 | mrc.multi = multi; |
3104 | 0 | mrc.now = curlx_now(); |
3105 | 0 | sigpipe_init(&mrc.pipe_st); |
3106 | |
|
3107 | 0 | if(checkall) { |
3108 | | /* *perform() deals with running_handles on its own */ |
3109 | 0 | result = curl_multi_perform(multi, running_handles); |
3110 | |
|
3111 | 0 | if(result != CURLM_BAD_HANDLE) { |
3112 | | /* Reassess event status of all active transfers */ |
3113 | 0 | result = Curl_multi_ev_assess_xfer_bset(multi, &multi->process); |
3114 | 0 | } |
3115 | 0 | mrc.run_cpool = TRUE; |
3116 | 0 | goto out; |
3117 | 0 | } |
3118 | | |
3119 | 0 | if(s != CURL_SOCKET_TIMEOUT) { |
3120 | | /* Mark all transfers of that socket as dirty */ |
3121 | 0 | Curl_multi_ev_dirty_xfers(multi, s, &mrc.run_cpool); |
3122 | 0 | } |
3123 | 0 | else { |
3124 | | /* Asked to run due to time-out. Clear the 'last_expire_ts' variable to |
3125 | | force Curl_update_timer() to trigger a callback to the app again even |
3126 | | if the same timeout is still the one to run after this call. That |
3127 | | handles the case when the application asks libcurl to run the timeout |
3128 | | prematurely. */ |
3129 | 0 | memset(&multi->last_expire_ts, 0, sizeof(multi->last_expire_ts)); |
3130 | 0 | mrc.run_cpool = TRUE; |
3131 | 0 | } |
3132 | |
|
3133 | 0 | multi_mark_expired_as_dirty(&mrc); |
3134 | 0 | result = multi_run_dirty(&mrc); |
3135 | 0 | if(result) |
3136 | 0 | goto out; |
3137 | | |
3138 | 0 | if(mrc.run_xfers) { |
3139 | | /* Running transfers takes time. With a new timestamp, we might catch |
3140 | | * other expires which are due now. Instead of telling the application |
3141 | | * to set a 0 timeout and call us again, we run them here. |
3142 | | * Do that only once or it might be unfair to transfers on other |
3143 | | * sockets. */ |
3144 | 0 | mrc.now = curlx_now(); |
3145 | 0 | multi_mark_expired_as_dirty(&mrc); |
3146 | 0 | result = multi_run_dirty(&mrc); |
3147 | 0 | } |
3148 | |
|
3149 | 0 | out: |
3150 | 0 | if(mrc.run_cpool) { |
3151 | 0 | sigpipe_apply(multi->admin, &mrc.pipe_st); |
3152 | 0 | Curl_cshutdn_perform(&multi->cshutdn, multi->admin, s); |
3153 | 0 | } |
3154 | 0 | sigpipe_restore(&mrc.pipe_st); |
3155 | |
|
3156 | 0 | if(multi_ischanged(multi, TRUE)) |
3157 | 0 | process_pending_handles(multi); |
3158 | |
|
3159 | 0 | if(running_handles) { |
3160 | 0 | unsigned int running = Curl_multi_xfers_running(multi); |
3161 | 0 | *running_handles = (running < INT_MAX) ? (int)running : INT_MAX; |
3162 | 0 | } |
3163 | |
|
3164 | 0 | if(CURLM_OK >= result) |
3165 | 0 | result = Curl_update_timer(multi); |
3166 | 0 | return result; |
3167 | 0 | } |
3168 | | |
3169 | | #undef curl_multi_setopt |
3170 | | CURLMcode curl_multi_setopt(CURLM *m, |
3171 | | CURLMoption option, ...) |
3172 | 0 | { |
3173 | 0 | CURLMcode res = CURLM_OK; |
3174 | 0 | va_list param; |
3175 | 0 | unsigned long uarg; |
3176 | 0 | struct Curl_multi *multi = m; |
3177 | |
|
3178 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
3179 | 0 | return CURLM_BAD_HANDLE; |
3180 | | |
3181 | 0 | if(multi->in_callback) |
3182 | 0 | return CURLM_RECURSIVE_API_CALL; |
3183 | | |
3184 | 0 | va_start(param, option); |
3185 | |
|
3186 | 0 | switch(option) { |
3187 | 0 | case CURLMOPT_SOCKETFUNCTION: |
3188 | 0 | multi->socket_cb = va_arg(param, curl_socket_callback); |
3189 | 0 | break; |
3190 | 0 | case CURLMOPT_SOCKETDATA: |
3191 | 0 | multi->socket_userp = va_arg(param, void *); |
3192 | 0 | break; |
3193 | 0 | case CURLMOPT_PUSHFUNCTION: |
3194 | 0 | multi->push_cb = va_arg(param, curl_push_callback); |
3195 | 0 | break; |
3196 | 0 | case CURLMOPT_PUSHDATA: |
3197 | 0 | multi->push_userp = va_arg(param, void *); |
3198 | 0 | break; |
3199 | 0 | case CURLMOPT_PIPELINING: |
3200 | 0 | multi->multiplexing = va_arg(param, long) & CURLPIPE_MULTIPLEX ? 1 : 0; |
3201 | 0 | break; |
3202 | 0 | case CURLMOPT_TIMERFUNCTION: |
3203 | 0 | multi->timer_cb = va_arg(param, curl_multi_timer_callback); |
3204 | 0 | break; |
3205 | 0 | case CURLMOPT_TIMERDATA: |
3206 | 0 | multi->timer_userp = va_arg(param, void *); |
3207 | 0 | break; |
3208 | 0 | case CURLMOPT_MAXCONNECTS: |
3209 | 0 | uarg = va_arg(param, unsigned long); |
3210 | 0 | if(uarg <= UINT_MAX) |
3211 | 0 | multi->maxconnects = (unsigned int)uarg; |
3212 | 0 | break; |
3213 | 0 | case CURLMOPT_MAX_HOST_CONNECTIONS: |
3214 | 0 | multi->max_host_connections = va_arg(param, long); |
3215 | 0 | break; |
3216 | 0 | case CURLMOPT_MAX_TOTAL_CONNECTIONS: |
3217 | 0 | multi->max_total_connections = va_arg(param, long); |
3218 | 0 | break; |
3219 | | /* options formerly used for pipelining */ |
3220 | 0 | case CURLMOPT_MAX_PIPELINE_LENGTH: |
3221 | 0 | break; |
3222 | 0 | case CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE: |
3223 | 0 | break; |
3224 | 0 | case CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE: |
3225 | 0 | break; |
3226 | 0 | case CURLMOPT_PIPELINING_SITE_BL: |
3227 | 0 | break; |
3228 | 0 | case CURLMOPT_PIPELINING_SERVER_BL: |
3229 | 0 | break; |
3230 | 0 | case CURLMOPT_MAX_CONCURRENT_STREAMS: |
3231 | 0 | { |
3232 | 0 | long streams = va_arg(param, long); |
3233 | 0 | if((streams < 1) || (streams > INT_MAX)) |
3234 | 0 | streams = 100; |
3235 | 0 | multi->max_concurrent_streams = (unsigned int)streams; |
3236 | 0 | } |
3237 | 0 | break; |
3238 | 0 | case CURLMOPT_NETWORK_CHANGED: { |
3239 | 0 | long val = va_arg(param, long); |
3240 | 0 | if(val & CURLM_NWCOPT_CLEAR_DNS) { |
3241 | 0 | Curl_dnscache_clear(multi->admin); |
3242 | 0 | } |
3243 | 0 | if(val & CURLM_NWCOPT_CLEAR_CONNS) { |
3244 | 0 | Curl_cpool_nw_changed(multi->admin); |
3245 | 0 | } |
3246 | 0 | break; |
3247 | 0 | } |
3248 | 0 | default: |
3249 | 0 | res = CURLM_UNKNOWN_OPTION; |
3250 | 0 | break; |
3251 | 0 | } |
3252 | 0 | va_end(param); |
3253 | 0 | return res; |
3254 | 0 | } |
3255 | | |
3256 | | /* we define curl_multi_socket() in the public multi.h header */ |
3257 | | #undef curl_multi_socket |
3258 | | |
3259 | | CURLMcode curl_multi_socket(CURLM *m, curl_socket_t s, int *running_handles) |
3260 | 0 | { |
3261 | 0 | struct Curl_multi *multi = m; |
3262 | 0 | if(multi->in_callback) |
3263 | 0 | return CURLM_RECURSIVE_API_CALL; |
3264 | 0 | return multi_socket(multi, FALSE, s, 0, running_handles); |
3265 | 0 | } |
3266 | | |
3267 | | CURLMcode curl_multi_socket_action(CURLM *m, curl_socket_t s, |
3268 | | int ev_bitmask, int *running_handles) |
3269 | 0 | { |
3270 | 0 | struct Curl_multi *multi = m; |
3271 | 0 | if(multi->in_callback) |
3272 | 0 | return CURLM_RECURSIVE_API_CALL; |
3273 | 0 | return multi_socket(multi, FALSE, s, ev_bitmask, running_handles); |
3274 | 0 | } |
3275 | | |
3276 | | CURLMcode curl_multi_socket_all(CURLM *m, int *running_handles) |
3277 | 0 | { |
3278 | 0 | struct Curl_multi *multi = m; |
3279 | 0 | if(multi->in_callback) |
3280 | 0 | return CURLM_RECURSIVE_API_CALL; |
3281 | 0 | return multi_socket(multi, TRUE, CURL_SOCKET_BAD, 0, running_handles); |
3282 | 0 | } |
3283 | | |
3284 | | |
3285 | | static bool multi_has_dirties(struct Curl_multi *multi) |
3286 | 0 | { |
3287 | 0 | unsigned int mid; |
3288 | 0 | if(Curl_uint_bset_first(&multi->dirty, &mid)) { |
3289 | 0 | do { |
3290 | 0 | struct Curl_easy *data = Curl_multi_get_easy(multi, mid); |
3291 | 0 | if(data) { |
3292 | 0 | if(Curl_uint_bset_contains(&multi->process, mid)) |
3293 | 0 | return TRUE; |
3294 | | /* We are no longer processing this transfer */ |
3295 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
3296 | 0 | } |
3297 | 0 | else { |
3298 | 0 | CURL_TRC_M(multi->admin, "dirty transfer %u no longer found", mid); |
3299 | 0 | Curl_uint_bset_remove(&multi->dirty, mid); |
3300 | 0 | } |
3301 | 0 | } |
3302 | 0 | while(Curl_uint_bset_next(&multi->dirty, mid, &mid)); |
3303 | 0 | } |
3304 | 0 | return FALSE; |
3305 | 0 | } |
3306 | | |
3307 | | static CURLMcode multi_timeout(struct Curl_multi *multi, |
3308 | | struct curltime *expire_time, |
3309 | | long *timeout_ms) |
3310 | 0 | { |
3311 | 0 | static const struct curltime tv_zero = {0, 0}; |
3312 | |
|
3313 | 0 | if(multi->dead) { |
3314 | 0 | *timeout_ms = 0; |
3315 | 0 | return CURLM_OK; |
3316 | 0 | } |
3317 | | |
3318 | 0 | if(multi_has_dirties(multi)) { |
3319 | 0 | *expire_time = curlx_now(); |
3320 | 0 | *timeout_ms = 0; |
3321 | 0 | return CURLM_OK; |
3322 | 0 | } |
3323 | 0 | else if(multi->timetree) { |
3324 | | /* we have a tree of expire times */ |
3325 | 0 | struct curltime now = curlx_now(); |
3326 | | |
3327 | | /* splay the lowest to the bottom */ |
3328 | 0 | multi->timetree = Curl_splay(tv_zero, multi->timetree); |
3329 | | /* this will not return NULL from a non-empty tree, but some compilers |
3330 | | * are not convinced of that. Analyzers are hard. */ |
3331 | 0 | *expire_time = multi->timetree ? multi->timetree->key : tv_zero; |
3332 | | |
3333 | | /* 'multi->timetree' will be non-NULL here but the compilers sometimes |
3334 | | yell at us if we assume so */ |
3335 | 0 | if(multi->timetree && |
3336 | 0 | curlx_timediff_us(multi->timetree->key, now) > 0) { |
3337 | | /* some time left before expiration */ |
3338 | 0 | timediff_t diff = curlx_timediff_ceil(multi->timetree->key, now); |
3339 | | /* this should be safe even on 32-bit archs, as we do not use that |
3340 | | overly long timeouts */ |
3341 | 0 | *timeout_ms = (long)diff; |
3342 | 0 | } |
3343 | 0 | else { |
3344 | 0 | if(multi->timetree) { |
3345 | 0 | struct Curl_easy *data = Curl_splayget(multi->timetree); |
3346 | 0 | CURL_TRC_M(data, "multi_timeout() says this has expired"); |
3347 | 0 | } |
3348 | | /* 0 means immediately */ |
3349 | 0 | *timeout_ms = 0; |
3350 | 0 | } |
3351 | 0 | } |
3352 | 0 | else { |
3353 | 0 | *expire_time = tv_zero; |
3354 | 0 | *timeout_ms = -1; |
3355 | 0 | } |
3356 | | |
3357 | 0 | return CURLM_OK; |
3358 | 0 | } |
3359 | | |
3360 | | CURLMcode curl_multi_timeout(CURLM *m, |
3361 | | long *timeout_ms) |
3362 | 0 | { |
3363 | 0 | struct curltime expire_time; |
3364 | 0 | struct Curl_multi *multi = m; |
3365 | | |
3366 | | /* First, make some basic checks that the CURLM handle is a good handle */ |
3367 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
3368 | 0 | return CURLM_BAD_HANDLE; |
3369 | | |
3370 | 0 | if(multi->in_callback) |
3371 | 0 | return CURLM_RECURSIVE_API_CALL; |
3372 | | |
3373 | 0 | return multi_timeout(multi, &expire_time, timeout_ms); |
3374 | 0 | } |
3375 | | |
3376 | | #define DEBUG_UPDATE_TIMER 0 |
3377 | | |
3378 | | /* |
3379 | | * Tell the application it should update its timers, if it subscribes to the |
3380 | | * update timer callback. |
3381 | | */ |
3382 | | CURLMcode Curl_update_timer(struct Curl_multi *multi) |
3383 | 0 | { |
3384 | 0 | struct curltime expire_ts; |
3385 | 0 | long timeout_ms; |
3386 | 0 | int rc; |
3387 | 0 | bool set_value = FALSE; |
3388 | |
|
3389 | 0 | if(!multi->timer_cb || multi->dead) |
3390 | 0 | return CURLM_OK; |
3391 | 0 | if(multi_timeout(multi, &expire_ts, &timeout_ms)) { |
3392 | 0 | return CURLM_OK; |
3393 | 0 | } |
3394 | | |
3395 | 0 | if(timeout_ms < 0 && multi->last_timeout_ms < 0) { |
3396 | | #if DEBUG_UPDATE_TIMER |
3397 | | fprintf(stderr, "Curl_update_timer(), still no timeout, no change\n"); |
3398 | | #endif |
3399 | 0 | } |
3400 | 0 | else if(timeout_ms < 0) { |
3401 | | /* there is no timeout now but there was one previously */ |
3402 | | #if DEBUG_UPDATE_TIMER |
3403 | | fprintf(stderr, "Curl_update_timer(), remove timeout, " |
3404 | | " last_timeout=%ldms\n", multi->last_timeout_ms); |
3405 | | #endif |
3406 | 0 | timeout_ms = -1; /* normalize */ |
3407 | 0 | set_value = TRUE; |
3408 | 0 | } |
3409 | 0 | else if(multi->last_timeout_ms < 0) { |
3410 | | #if DEBUG_UPDATE_TIMER |
3411 | | fprintf(stderr, "Curl_update_timer(), had no timeout, set now\n"); |
3412 | | #endif |
3413 | 0 | set_value = TRUE; |
3414 | 0 | } |
3415 | 0 | else if(curlx_timediff_us(multi->last_expire_ts, expire_ts)) { |
3416 | | /* We had a timeout before and have one now, the absolute timestamp |
3417 | | * differs. The relative timeout_ms may be the same, but the starting |
3418 | | * point differs. Let the application restart its timer. */ |
3419 | | #if DEBUG_UPDATE_TIMER |
3420 | | fprintf(stderr, "Curl_update_timer(), expire timestamp changed\n"); |
3421 | | #endif |
3422 | 0 | set_value = TRUE; |
3423 | 0 | } |
3424 | 0 | else { |
3425 | | /* We have same expire time as previously. Our relative 'timeout_ms' |
3426 | | * may be different now, but the application has the timer running |
3427 | | * and we do not to tell it to start this again. */ |
3428 | | #if DEBUG_UPDATE_TIMER |
3429 | | fprintf(stderr, "Curl_update_timer(), same expire timestamp, no change\n"); |
3430 | | #endif |
3431 | 0 | } |
3432 | |
|
3433 | 0 | if(set_value) { |
3434 | | #if DEBUG_UPDATE_TIMER |
3435 | | fprintf(stderr, "Curl_update_timer(), set timeout %ldms\n", timeout_ms); |
3436 | | #endif |
3437 | 0 | multi->last_expire_ts = expire_ts; |
3438 | 0 | multi->last_timeout_ms = timeout_ms; |
3439 | 0 | set_in_callback(multi, TRUE); |
3440 | 0 | rc = multi->timer_cb(multi, timeout_ms, multi->timer_userp); |
3441 | 0 | set_in_callback(multi, FALSE); |
3442 | 0 | if(rc == -1) { |
3443 | 0 | multi->dead = TRUE; |
3444 | 0 | return CURLM_ABORTED_BY_CALLBACK; |
3445 | 0 | } |
3446 | 0 | } |
3447 | 0 | return CURLM_OK; |
3448 | 0 | } |
3449 | | |
3450 | | /* |
3451 | | * multi_deltimeout() |
3452 | | * |
3453 | | * Remove a given timestamp from the list of timeouts. |
3454 | | */ |
3455 | | static void |
3456 | | multi_deltimeout(struct Curl_easy *data, expire_id eid) |
3457 | 0 | { |
3458 | 0 | struct Curl_llist_node *e; |
3459 | 0 | struct Curl_llist *timeoutlist = &data->state.timeoutlist; |
3460 | | /* find and remove the specific node from the list */ |
3461 | 0 | for(e = Curl_llist_head(timeoutlist); e; e = Curl_node_next(e)) { |
3462 | 0 | struct time_node *n = Curl_node_elem(e); |
3463 | 0 | if(n->eid == eid) { |
3464 | 0 | Curl_node_remove(e); |
3465 | 0 | return; |
3466 | 0 | } |
3467 | 0 | } |
3468 | 0 | } |
3469 | | |
3470 | | /* |
3471 | | * multi_addtimeout() |
3472 | | * |
3473 | | * Add a timestamp to the list of timeouts. Keep the list sorted so that head |
3474 | | * of list is always the timeout nearest in time. |
3475 | | * |
3476 | | */ |
3477 | | static CURLMcode |
3478 | | multi_addtimeout(struct Curl_easy *data, |
3479 | | struct curltime *stamp, |
3480 | | expire_id eid) |
3481 | 0 | { |
3482 | 0 | struct Curl_llist_node *e; |
3483 | 0 | struct time_node *node; |
3484 | 0 | struct Curl_llist_node *prev = NULL; |
3485 | 0 | size_t n; |
3486 | 0 | struct Curl_llist *timeoutlist = &data->state.timeoutlist; |
3487 | |
|
3488 | 0 | node = &data->state.expires[eid]; |
3489 | | |
3490 | | /* copy the timestamp and id */ |
3491 | 0 | memcpy(&node->time, stamp, sizeof(*stamp)); |
3492 | 0 | node->eid = eid; /* also marks it as in use */ |
3493 | |
|
3494 | 0 | n = Curl_llist_count(timeoutlist); |
3495 | 0 | if(n) { |
3496 | | /* find the correct spot in the list */ |
3497 | 0 | for(e = Curl_llist_head(timeoutlist); e; e = Curl_node_next(e)) { |
3498 | 0 | struct time_node *check = Curl_node_elem(e); |
3499 | 0 | timediff_t diff = curlx_timediff(check->time, node->time); |
3500 | 0 | if(diff > 0) |
3501 | 0 | break; |
3502 | 0 | prev = e; |
3503 | 0 | } |
3504 | |
|
3505 | 0 | } |
3506 | | /* else |
3507 | | this is the first timeout on the list */ |
3508 | |
|
3509 | 0 | Curl_llist_insert_next(timeoutlist, prev, node, &node->list); |
3510 | 0 | return CURLM_OK; |
3511 | 0 | } |
3512 | | |
3513 | | void Curl_expire_ex(struct Curl_easy *data, |
3514 | | const struct curltime *nowp, |
3515 | | timediff_t milli, expire_id id) |
3516 | 0 | { |
3517 | 0 | struct Curl_multi *multi = data->multi; |
3518 | 0 | struct curltime *curr_expire = &data->state.expiretime; |
3519 | 0 | struct curltime set; |
3520 | | |
3521 | | /* this is only interesting while there is still an associated multi struct |
3522 | | remaining! */ |
3523 | 0 | if(!multi) |
3524 | 0 | return; |
3525 | | |
3526 | 0 | DEBUGASSERT(id < EXPIRE_LAST); |
3527 | |
|
3528 | 0 | set = *nowp; |
3529 | 0 | set.tv_sec += (time_t)(milli/1000); /* might be a 64 to 32 bits conversion */ |
3530 | 0 | set.tv_usec += (int)(milli%1000)*1000; |
3531 | |
|
3532 | 0 | if(set.tv_usec >= 1000000) { |
3533 | 0 | set.tv_sec++; |
3534 | 0 | set.tv_usec -= 1000000; |
3535 | 0 | } |
3536 | | |
3537 | | /* Remove any timer with the same id just in case. */ |
3538 | 0 | multi_deltimeout(data, id); |
3539 | | |
3540 | | /* Add it to the timer list. It must stay in the list until it has expired |
3541 | | in case we need to recompute the minimum timer later. */ |
3542 | 0 | multi_addtimeout(data, &set, id); |
3543 | |
|
3544 | 0 | if(curr_expire->tv_sec || curr_expire->tv_usec) { |
3545 | | /* This means that the struct is added as a node in the splay tree. |
3546 | | Compare if the new time is earlier, and only remove-old/add-new if it |
3547 | | is. */ |
3548 | 0 | timediff_t diff = curlx_timediff(set, *curr_expire); |
3549 | 0 | int rc; |
3550 | |
|
3551 | 0 | if(diff > 0) { |
3552 | | /* The current splay tree entry is sooner than this new expiry time. |
3553 | | We do not need to update our splay tree entry. */ |
3554 | 0 | return; |
3555 | 0 | } |
3556 | | |
3557 | | /* Since this is an updated time, we must remove the previous entry from |
3558 | | the splay tree first and then re-add the new value */ |
3559 | 0 | rc = Curl_splayremove(multi->timetree, &data->state.timenode, |
3560 | 0 | &multi->timetree); |
3561 | 0 | if(rc) |
3562 | 0 | infof(data, "Internal error removing splay node = %d", rc); |
3563 | 0 | } |
3564 | | |
3565 | | /* Indicate that we are in the splay tree and insert the new timer expiry |
3566 | | value since it is our local minimum. */ |
3567 | 0 | *curr_expire = set; |
3568 | 0 | Curl_splayset(&data->state.timenode, data); |
3569 | 0 | multi->timetree = Curl_splayinsert(*curr_expire, multi->timetree, |
3570 | 0 | &data->state.timenode); |
3571 | 0 | if(data->id >= 0) |
3572 | 0 | CURL_TRC_M(data, "set expire[%d] in %" FMT_TIMEDIFF_T "ns", |
3573 | 0 | id, curlx_timediff_us(set, *nowp)); |
3574 | 0 | } |
3575 | | |
3576 | | /* |
3577 | | * Curl_expire() |
3578 | | * |
3579 | | * given a number of milliseconds from now to use to set the 'act before |
3580 | | * this'-time for the transfer, to be extracted by curl_multi_timeout() |
3581 | | * |
3582 | | * The timeout will be added to a queue of timeouts if it defines a moment in |
3583 | | * time that is later than the current head of queue. |
3584 | | * |
3585 | | * Expire replaces a former timeout using the same id if already set. |
3586 | | */ |
3587 | | void Curl_expire(struct Curl_easy *data, timediff_t milli, expire_id id) |
3588 | 0 | { |
3589 | 0 | struct curltime now = curlx_now(); |
3590 | 0 | Curl_expire_ex(data, &now, milli, id); |
3591 | 0 | } |
3592 | | |
3593 | | /* |
3594 | | * Curl_expire_done() |
3595 | | * |
3596 | | * Removes the expire timer. Marks it as done. |
3597 | | * |
3598 | | */ |
3599 | | void Curl_expire_done(struct Curl_easy *data, expire_id id) |
3600 | 0 | { |
3601 | | /* remove the timer, if there */ |
3602 | 0 | multi_deltimeout(data, id); |
3603 | 0 | } |
3604 | | |
3605 | | /* |
3606 | | * Curl_expire_clear() |
3607 | | * |
3608 | | * Clear ALL timeout values for this handle. |
3609 | | */ |
3610 | | bool Curl_expire_clear(struct Curl_easy *data) |
3611 | 0 | { |
3612 | 0 | struct Curl_multi *multi = data->multi; |
3613 | 0 | struct curltime *nowp = &data->state.expiretime; |
3614 | | |
3615 | | /* this is only interesting while there is still an associated multi struct |
3616 | | remaining! */ |
3617 | 0 | if(!multi) |
3618 | 0 | return FALSE; |
3619 | | |
3620 | 0 | if(nowp->tv_sec || nowp->tv_usec) { |
3621 | | /* Since this is an cleared time, we must remove the previous entry from |
3622 | | the splay tree */ |
3623 | 0 | struct Curl_llist *list = &data->state.timeoutlist; |
3624 | 0 | int rc; |
3625 | |
|
3626 | 0 | rc = Curl_splayremove(multi->timetree, &data->state.timenode, |
3627 | 0 | &multi->timetree); |
3628 | 0 | if(rc) |
3629 | 0 | infof(data, "Internal error clearing splay node = %d", rc); |
3630 | | |
3631 | | /* clear the timeout list too */ |
3632 | 0 | Curl_llist_destroy(list, NULL); |
3633 | |
|
3634 | 0 | CURL_TRC_M(data, "Expire cleared"); |
3635 | 0 | nowp->tv_sec = 0; |
3636 | 0 | nowp->tv_usec = 0; |
3637 | 0 | return TRUE; |
3638 | 0 | } |
3639 | 0 | return FALSE; |
3640 | 0 | } |
3641 | | |
3642 | | CURLMcode curl_multi_assign(CURLM *m, curl_socket_t s, |
3643 | | void *hashp) |
3644 | 0 | { |
3645 | 0 | struct Curl_multi *multi = m; |
3646 | 0 | if(!GOOD_MULTI_HANDLE(multi)) |
3647 | 0 | return CURLM_BAD_HANDLE; |
3648 | | |
3649 | 0 | return Curl_multi_ev_assign(multi, s, hashp); |
3650 | 0 | } |
3651 | | |
3652 | | static void move_pending_to_connect(struct Curl_multi *multi, |
3653 | | struct Curl_easy *data) |
3654 | 0 | { |
3655 | 0 | DEBUGASSERT(data->mstate == MSTATE_PENDING); |
3656 | | |
3657 | | /* Remove this node from the pending set, add into process set */ |
3658 | 0 | Curl_uint_bset_remove(&multi->pending, data->mid); |
3659 | 0 | Curl_uint_bset_add(&multi->process, data->mid); |
3660 | |
|
3661 | 0 | multistate(data, MSTATE_CONNECT); |
3662 | 0 | Curl_multi_mark_dirty(data); /* make it run */ |
3663 | 0 | } |
3664 | | |
3665 | | /* process_pending_handles() moves a handle from PENDING back into the process |
3666 | | list and change state to CONNECT. |
3667 | | |
3668 | | We do not move all transfers because that can be a significant amount. |
3669 | | Since this is tried every now and then doing too many too often becomes a |
3670 | | performance problem. |
3671 | | |
3672 | | When there is a change for connection limits like max host connections etc, |
3673 | | this likely only allows one new transfer. When there is a pipewait change, |
3674 | | it can potentially allow hundreds of new transfers. |
3675 | | |
3676 | | We could consider an improvement where we store the queue reason and allow |
3677 | | more pipewait rechecks than others. |
3678 | | */ |
3679 | | static void process_pending_handles(struct Curl_multi *multi) |
3680 | 0 | { |
3681 | 0 | unsigned int mid; |
3682 | 0 | if(Curl_uint_bset_first(&multi->pending, &mid)) { |
3683 | 0 | do { |
3684 | 0 | struct Curl_easy *data = Curl_multi_get_easy(multi, mid); |
3685 | 0 | if(data) { |
3686 | 0 | move_pending_to_connect(multi, data); |
3687 | 0 | break; |
3688 | 0 | } |
3689 | | /* transfer no longer known, should not happen */ |
3690 | 0 | Curl_uint_bset_remove(&multi->pending, mid); |
3691 | 0 | DEBUGASSERT(0); |
3692 | 0 | } |
3693 | 0 | while(Curl_uint_bset_next(&multi->pending, mid, &mid)); |
3694 | 0 | } |
3695 | 0 | } |
3696 | | |
3697 | | void Curl_set_in_callback(struct Curl_easy *data, bool value) |
3698 | 0 | { |
3699 | 0 | if(data && data->multi) |
3700 | 0 | data->multi->in_callback = value; |
3701 | 0 | } |
3702 | | |
3703 | | bool Curl_is_in_callback(struct Curl_easy *data) |
3704 | 0 | { |
3705 | 0 | return data && data->multi && data->multi->in_callback; |
3706 | 0 | } |
3707 | | |
3708 | | unsigned int Curl_multi_max_concurrent_streams(struct Curl_multi *multi) |
3709 | 0 | { |
3710 | 0 | DEBUGASSERT(multi); |
3711 | 0 | return multi->max_concurrent_streams; |
3712 | 0 | } |
3713 | | |
3714 | | CURL **curl_multi_get_handles(CURLM *m) |
3715 | 0 | { |
3716 | 0 | struct Curl_multi *multi = m; |
3717 | 0 | void *entry; |
3718 | 0 | unsigned int count = Curl_uint_tbl_count(&multi->xfers); |
3719 | 0 | CURL **a = malloc(sizeof(struct Curl_easy *) * (count + 1)); |
3720 | 0 | if(a) { |
3721 | 0 | unsigned int i = 0, mid; |
3722 | |
|
3723 | 0 | if(Curl_uint_tbl_first(&multi->xfers, &mid, &entry)) { |
3724 | 0 | do { |
3725 | 0 | struct Curl_easy *data = entry; |
3726 | 0 | DEBUGASSERT(i < count); |
3727 | 0 | if(!data->state.internal) |
3728 | 0 | a[i++] = data; |
3729 | 0 | } |
3730 | 0 | while(Curl_uint_tbl_next(&multi->xfers, mid, &mid, &entry)); |
3731 | 0 | } |
3732 | 0 | a[i] = NULL; /* last entry is a NULL */ |
3733 | 0 | } |
3734 | 0 | return a; |
3735 | 0 | } |
3736 | | |
3737 | | CURLcode Curl_multi_xfer_buf_borrow(struct Curl_easy *data, |
3738 | | char **pbuf, size_t *pbuflen) |
3739 | 0 | { |
3740 | 0 | DEBUGASSERT(data); |
3741 | 0 | DEBUGASSERT(data->multi); |
3742 | 0 | *pbuf = NULL; |
3743 | 0 | *pbuflen = 0; |
3744 | 0 | if(!data->multi) { |
3745 | 0 | failf(data, "transfer has no multi handle"); |
3746 | 0 | return CURLE_FAILED_INIT; |
3747 | 0 | } |
3748 | 0 | if(!data->set.buffer_size) { |
3749 | 0 | failf(data, "transfer buffer size is 0"); |
3750 | 0 | return CURLE_FAILED_INIT; |
3751 | 0 | } |
3752 | 0 | if(data->multi->xfer_buf_borrowed) { |
3753 | 0 | failf(data, "attempt to borrow xfer_buf when already borrowed"); |
3754 | 0 | return CURLE_AGAIN; |
3755 | 0 | } |
3756 | | |
3757 | 0 | if(data->multi->xfer_buf && |
3758 | 0 | data->set.buffer_size > data->multi->xfer_buf_len) { |
3759 | | /* not large enough, get a new one */ |
3760 | 0 | free(data->multi->xfer_buf); |
3761 | 0 | data->multi->xfer_buf = NULL; |
3762 | 0 | data->multi->xfer_buf_len = 0; |
3763 | 0 | } |
3764 | |
|
3765 | 0 | if(!data->multi->xfer_buf) { |
3766 | 0 | data->multi->xfer_buf = malloc((size_t)data->set.buffer_size); |
3767 | 0 | if(!data->multi->xfer_buf) { |
3768 | 0 | failf(data, "could not allocate xfer_buf of %zu bytes", |
3769 | 0 | (size_t)data->set.buffer_size); |
3770 | 0 | return CURLE_OUT_OF_MEMORY; |
3771 | 0 | } |
3772 | 0 | data->multi->xfer_buf_len = data->set.buffer_size; |
3773 | 0 | } |
3774 | | |
3775 | 0 | data->multi->xfer_buf_borrowed = TRUE; |
3776 | 0 | *pbuf = data->multi->xfer_buf; |
3777 | 0 | *pbuflen = data->multi->xfer_buf_len; |
3778 | 0 | return CURLE_OK; |
3779 | 0 | } |
3780 | | |
3781 | | void Curl_multi_xfer_buf_release(struct Curl_easy *data, char *buf) |
3782 | 0 | { |
3783 | 0 | (void)buf; |
3784 | 0 | DEBUGASSERT(data); |
3785 | 0 | DEBUGASSERT(data->multi); |
3786 | 0 | DEBUGASSERT(!buf || data->multi->xfer_buf == buf); |
3787 | 0 | data->multi->xfer_buf_borrowed = FALSE; |
3788 | 0 | } |
3789 | | |
3790 | | CURLcode Curl_multi_xfer_ulbuf_borrow(struct Curl_easy *data, |
3791 | | char **pbuf, size_t *pbuflen) |
3792 | 0 | { |
3793 | 0 | DEBUGASSERT(data); |
3794 | 0 | DEBUGASSERT(data->multi); |
3795 | 0 | *pbuf = NULL; |
3796 | 0 | *pbuflen = 0; |
3797 | 0 | if(!data->multi) { |
3798 | 0 | failf(data, "transfer has no multi handle"); |
3799 | 0 | return CURLE_FAILED_INIT; |
3800 | 0 | } |
3801 | 0 | if(!data->set.upload_buffer_size) { |
3802 | 0 | failf(data, "transfer upload buffer size is 0"); |
3803 | 0 | return CURLE_FAILED_INIT; |
3804 | 0 | } |
3805 | 0 | if(data->multi->xfer_ulbuf_borrowed) { |
3806 | 0 | failf(data, "attempt to borrow xfer_ulbuf when already borrowed"); |
3807 | 0 | return CURLE_AGAIN; |
3808 | 0 | } |
3809 | | |
3810 | 0 | if(data->multi->xfer_ulbuf && |
3811 | 0 | data->set.upload_buffer_size > data->multi->xfer_ulbuf_len) { |
3812 | | /* not large enough, get a new one */ |
3813 | 0 | free(data->multi->xfer_ulbuf); |
3814 | 0 | data->multi->xfer_ulbuf = NULL; |
3815 | 0 | data->multi->xfer_ulbuf_len = 0; |
3816 | 0 | } |
3817 | |
|
3818 | 0 | if(!data->multi->xfer_ulbuf) { |
3819 | 0 | data->multi->xfer_ulbuf = malloc((size_t)data->set.upload_buffer_size); |
3820 | 0 | if(!data->multi->xfer_ulbuf) { |
3821 | 0 | failf(data, "could not allocate xfer_ulbuf of %zu bytes", |
3822 | 0 | (size_t)data->set.upload_buffer_size); |
3823 | 0 | return CURLE_OUT_OF_MEMORY; |
3824 | 0 | } |
3825 | 0 | data->multi->xfer_ulbuf_len = data->set.upload_buffer_size; |
3826 | 0 | } |
3827 | | |
3828 | 0 | data->multi->xfer_ulbuf_borrowed = TRUE; |
3829 | 0 | *pbuf = data->multi->xfer_ulbuf; |
3830 | 0 | *pbuflen = data->multi->xfer_ulbuf_len; |
3831 | 0 | return CURLE_OK; |
3832 | 0 | } |
3833 | | |
3834 | | void Curl_multi_xfer_ulbuf_release(struct Curl_easy *data, char *buf) |
3835 | 0 | { |
3836 | 0 | (void)buf; |
3837 | 0 | DEBUGASSERT(data); |
3838 | 0 | DEBUGASSERT(data->multi); |
3839 | 0 | DEBUGASSERT(!buf || data->multi->xfer_ulbuf == buf); |
3840 | 0 | data->multi->xfer_ulbuf_borrowed = FALSE; |
3841 | 0 | } |
3842 | | |
3843 | | CURLcode Curl_multi_xfer_sockbuf_borrow(struct Curl_easy *data, |
3844 | | size_t blen, char **pbuf) |
3845 | 0 | { |
3846 | 0 | DEBUGASSERT(data); |
3847 | 0 | DEBUGASSERT(data->multi); |
3848 | 0 | *pbuf = NULL; |
3849 | 0 | if(!data->multi) { |
3850 | 0 | failf(data, "transfer has no multi handle"); |
3851 | 0 | return CURLE_FAILED_INIT; |
3852 | 0 | } |
3853 | 0 | if(data->multi->xfer_sockbuf_borrowed) { |
3854 | 0 | failf(data, "attempt to borrow xfer_sockbuf when already borrowed"); |
3855 | 0 | return CURLE_AGAIN; |
3856 | 0 | } |
3857 | | |
3858 | 0 | if(data->multi->xfer_sockbuf && blen > data->multi->xfer_sockbuf_len) { |
3859 | | /* not large enough, get a new one */ |
3860 | 0 | free(data->multi->xfer_sockbuf); |
3861 | 0 | data->multi->xfer_sockbuf = NULL; |
3862 | 0 | data->multi->xfer_sockbuf_len = 0; |
3863 | 0 | } |
3864 | |
|
3865 | 0 | if(!data->multi->xfer_sockbuf) { |
3866 | 0 | data->multi->xfer_sockbuf = malloc(blen); |
3867 | 0 | if(!data->multi->xfer_sockbuf) { |
3868 | 0 | failf(data, "could not allocate xfer_sockbuf of %zu bytes", blen); |
3869 | 0 | return CURLE_OUT_OF_MEMORY; |
3870 | 0 | } |
3871 | 0 | data->multi->xfer_sockbuf_len = blen; |
3872 | 0 | } |
3873 | | |
3874 | 0 | data->multi->xfer_sockbuf_borrowed = TRUE; |
3875 | 0 | *pbuf = data->multi->xfer_sockbuf; |
3876 | 0 | return CURLE_OK; |
3877 | 0 | } |
3878 | | |
3879 | | void Curl_multi_xfer_sockbuf_release(struct Curl_easy *data, char *buf) |
3880 | 0 | { |
3881 | 0 | (void)buf; |
3882 | 0 | DEBUGASSERT(data); |
3883 | 0 | DEBUGASSERT(data->multi); |
3884 | 0 | DEBUGASSERT(!buf || data->multi->xfer_sockbuf == buf); |
3885 | 0 | data->multi->xfer_sockbuf_borrowed = FALSE; |
3886 | 0 | } |
3887 | | |
3888 | | static void multi_xfer_bufs_free(struct Curl_multi *multi) |
3889 | 0 | { |
3890 | 0 | DEBUGASSERT(multi); |
3891 | 0 | Curl_safefree(multi->xfer_buf); |
3892 | 0 | multi->xfer_buf_len = 0; |
3893 | 0 | multi->xfer_buf_borrowed = FALSE; |
3894 | 0 | Curl_safefree(multi->xfer_ulbuf); |
3895 | 0 | multi->xfer_ulbuf_len = 0; |
3896 | 0 | multi->xfer_ulbuf_borrowed = FALSE; |
3897 | 0 | Curl_safefree(multi->xfer_sockbuf); |
3898 | 0 | multi->xfer_sockbuf_len = 0; |
3899 | 0 | multi->xfer_sockbuf_borrowed = FALSE; |
3900 | 0 | } |
3901 | | |
3902 | | struct Curl_easy *Curl_multi_get_easy(struct Curl_multi *multi, |
3903 | | unsigned int mid) |
3904 | 0 | { |
3905 | 0 | struct Curl_easy *data = mid ? Curl_uint_tbl_get(&multi->xfers, mid) : NULL; |
3906 | 0 | if(data && GOOD_EASY_HANDLE(data)) |
3907 | 0 | return data; |
3908 | 0 | CURL_TRC_M(multi->admin, "invalid easy handle in xfer table for mid=%u", |
3909 | 0 | mid); |
3910 | 0 | Curl_uint_tbl_remove(&multi->xfers, mid); |
3911 | 0 | return NULL; |
3912 | 0 | } |
3913 | | |
3914 | | unsigned int Curl_multi_xfers_running(struct Curl_multi *multi) |
3915 | 0 | { |
3916 | 0 | return multi->xfers_alive; |
3917 | 0 | } |
3918 | | |
3919 | | void Curl_multi_mark_dirty(struct Curl_easy *data) |
3920 | 0 | { |
3921 | 0 | if(data->multi && data->mid != UINT_MAX) |
3922 | 0 | Curl_uint_bset_add(&data->multi->dirty, data->mid); |
3923 | 0 | } |
3924 | | |
3925 | | #ifdef DEBUGBUILD |
3926 | | static void multi_xfer_dump(struct Curl_multi *multi, unsigned int mid, |
3927 | | void *entry) |
3928 | | { |
3929 | | struct Curl_easy *data = entry; |
3930 | | |
3931 | | (void)multi; |
3932 | | if(!data) { |
3933 | | fprintf(stderr, "mid=%u, entry=NULL, bug in xfer table?\n", mid); |
3934 | | } |
3935 | | else { |
3936 | | fprintf(stderr, "mid=%u, magic=%s, p=%p, id=%" FMT_OFF_T ", url=%s\n", |
3937 | | mid, (data->magic == CURLEASY_MAGIC_NUMBER) ? "GOOD" : "BAD!", |
3938 | | (void *)data, data->id, data->state.url); |
3939 | | } |
3940 | | } |
3941 | | |
3942 | | static void multi_xfer_tbl_dump(struct Curl_multi *multi) |
3943 | | { |
3944 | | unsigned int mid; |
3945 | | void *entry; |
3946 | | fprintf(stderr, "=== multi xfer table (count=%u, capacity=%u\n", |
3947 | | Curl_uint_tbl_count(&multi->xfers), |
3948 | | Curl_uint_tbl_capacity(&multi->xfers)); |
3949 | | if(Curl_uint_tbl_first(&multi->xfers, &mid, &entry)) { |
3950 | | multi_xfer_dump(multi, mid, entry); |
3951 | | while(Curl_uint_tbl_next(&multi->xfers, mid, &mid, &entry)) |
3952 | | multi_xfer_dump(multi, mid, entry); |
3953 | | } |
3954 | | fprintf(stderr, "===\n"); |
3955 | | fflush(stderr); |
3956 | | } |
3957 | | #endif /* DEBUGBUILD */ |