/src/tor/src/core/mainloop/mainloop.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (c) 2001 Matej Pfajfar. |
2 | | * Copyright (c) 2001-2004, Roger Dingledine. |
3 | | * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. |
4 | | * Copyright (c) 2007-2021, The Tor Project, Inc. */ |
5 | | /* See LICENSE for licensing information */ |
6 | | |
7 | | /** |
8 | | * \file mainloop.c |
9 | | * \brief Toplevel module. Handles signals, multiplexes between |
10 | | * connections, implements main loop, and drives scheduled events. |
11 | | * |
12 | | * For the main loop itself; see run_main_loop_once(). It invokes the rest of |
13 | | * Tor mostly through Libevent callbacks. Libevent callbacks can happen when |
14 | | * a timer elapses, a signal is received, a socket is ready to read or write, |
15 | | * or an event is manually activated. |
16 | | * |
17 | | * Most events in Tor are driven from these callbacks: |
18 | | * <ul> |
19 | | * <li>conn_read_callback() and conn_write_callback() here, which are |
20 | | * invoked when a socket is ready to read or write respectively. |
21 | | * <li>signal_callback(), which handles incoming signals. |
22 | | * </ul> |
23 | | * Other events are used for specific purposes, or for building more complex |
24 | | * control structures. If you search for usage of tor_event_new(), you |
25 | | * will find all the events that we construct in Tor. |
26 | | * |
27 | | * Tor has numerous housekeeping operations that need to happen |
28 | | * regularly. They are handled in different ways: |
29 | | * <ul> |
30 | | * <li>The most frequent operations are handled after every read or write |
31 | | * event, at the end of connection_handle_read() and |
32 | | * connection_handle_write(). |
33 | | * |
34 | | * <li>The next most frequent operations happen after each invocation of the |
35 | | * main loop, in run_main_loop_once(). |
36 | | * |
37 | | * <li>Once per second, we run all of the operations listed in |
38 | | * second_elapsed_callback(), and in its child, run_scheduled_events(). |
39 | | * |
40 | | * <li>Once-a-second operations are handled in second_elapsed_callback(). |
41 | | * |
42 | | * <li>More infrequent operations take place based on the periodic event |
43 | | * driver in periodic.c . These are stored in the periodic_events[] |
44 | | * table. |
45 | | * </ul> |
46 | | * |
47 | | **/ |
48 | | |
49 | | #define MAINLOOP_PRIVATE |
50 | | #include "core/or/or.h" |
51 | | |
52 | | #include "app/config/config.h" |
53 | | #include "app/config/statefile.h" |
54 | | #include "app/main/ntmain.h" |
55 | | #include "core/mainloop/connection.h" |
56 | | #include "core/mainloop/cpuworker.h" |
57 | | #include "core/mainloop/mainloop.h" |
58 | | #include "core/mainloop/netstatus.h" |
59 | | #include "core/mainloop/periodic.h" |
60 | | #include "core/or/channel.h" |
61 | | #include "core/or/channelpadding.h" |
62 | | #include "core/or/channeltls.h" |
63 | | #include "core/or/circuitbuild.h" |
64 | | #include "core/or/circuitlist.h" |
65 | | #include "core/or/circuituse.h" |
66 | | #include "core/or/connection_edge.h" |
67 | | #include "core/or/connection_or.h" |
68 | | #include "core/or/dos.h" |
69 | | #include "core/or/status.h" |
70 | | #include "feature/client/addressmap.h" |
71 | | #include "feature/client/bridges.h" |
72 | | #include "feature/client/dnsserv.h" |
73 | | #include "feature/client/entrynodes.h" |
74 | | #include "feature/client/proxymode.h" |
75 | | #include "feature/client/transports.h" |
76 | | #include "feature/control/control.h" |
77 | | #include "feature/control/control_events.h" |
78 | | #include "feature/dirauth/authmode.h" |
79 | | #include "feature/dircache/consdiffmgr.h" |
80 | | #include "feature/dirclient/dirclient_modes.h" |
81 | | #include "feature/dircommon/directory.h" |
82 | | #include "feature/hibernate/hibernate.h" |
83 | | #include "feature/hs/hs_cache.h" |
84 | | #include "feature/hs/hs_client.h" |
85 | | #include "feature/hs/hs_service.h" |
86 | | #include "feature/nodelist/microdesc.h" |
87 | | #include "feature/nodelist/networkstatus.h" |
88 | | #include "feature/nodelist/nodelist.h" |
89 | | #include "feature/nodelist/routerlist.h" |
90 | | #include "feature/relay/dns.h" |
91 | | #include "feature/relay/routerkeys.h" |
92 | | #include "feature/relay/routermode.h" |
93 | | #include "feature/relay/selftest.h" |
94 | | #include "feature/stats/geoip_stats.h" |
95 | | #include "feature/stats/predict_ports.h" |
96 | | #include "feature/stats/connstats.h" |
97 | | #include "feature/stats/rephist.h" |
98 | | #include "lib/buf/buffers.h" |
99 | | #include "lib/crypt_ops/crypto_rand.h" |
100 | | #include "lib/err/backtrace.h" |
101 | | #include "lib/tls/buffers_tls.h" |
102 | | |
103 | | #include "lib/net/buffers_net.h" |
104 | | #include "lib/evloop/compat_libevent.h" |
105 | | |
106 | | #include <event2/event.h> |
107 | | |
108 | | #include "core/or/cell_st.h" |
109 | | #include "core/or/entry_connection_st.h" |
110 | | #include "feature/nodelist/networkstatus_st.h" |
111 | | #include "core/or/or_connection_st.h" |
112 | | #include "app/config/or_state_st.h" |
113 | | #include "feature/nodelist/routerinfo_st.h" |
114 | | #include "core/or/socks_request_st.h" |
115 | | |
116 | | #ifdef HAVE_UNISTD_H |
117 | | #include <unistd.h> |
118 | | #endif |
119 | | |
120 | | #ifdef HAVE_SYSTEMD |
121 | | # if defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__) |
122 | | /* Systemd's use of gcc's __INCLUDE_LEVEL__ extension macro appears to confuse |
123 | | * Coverity. Here's a kludge to unconfuse it. |
124 | | */ |
125 | | # define __INCLUDE_LEVEL__ 2 |
126 | | #endif /* defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__) */ |
127 | | #include <systemd/sd-daemon.h> |
128 | | #endif /* defined(HAVE_SYSTEMD) */ |
129 | | |
130 | | /* Token bucket for all traffic. */ |
131 | | token_bucket_rw_t global_bucket; |
132 | | |
133 | | /* Token bucket for relayed traffic. */ |
134 | | token_bucket_rw_t global_relayed_bucket; |
135 | | |
136 | | /* XXX we might want to keep stats about global_relayed_*_bucket too. Or not.*/ |
137 | | /** How many bytes have we read since we started the process? */ |
138 | | static uint64_t stats_n_bytes_read = 0; |
139 | | /** How many bytes have we written since we started the process? */ |
140 | | static uint64_t stats_n_bytes_written = 0; |
141 | | /** What time did this process start up? */ |
142 | | time_t time_of_process_start = 0; |
143 | | /** How many seconds have we been running? */ |
144 | | static long stats_n_seconds_working = 0; |
145 | | /** How many times have we returned from the main loop successfully? */ |
146 | | static uint64_t stats_n_main_loop_successes = 0; |
147 | | /** How many times have we received an error from the main loop? */ |
148 | | static uint64_t stats_n_main_loop_errors = 0; |
149 | | /** How many times have we returned from the main loop with no events. */ |
150 | | static uint64_t stats_n_main_loop_idle = 0; |
151 | | |
152 | | /** How often will we honor SIGNEWNYM requests? */ |
153 | 0 | #define MAX_SIGNEWNYM_RATE 10 |
154 | | /** When did we last process a SIGNEWNYM request? */ |
155 | | static time_t time_of_last_signewnym = 0; |
156 | | /** Is there a signewnym request we're currently waiting to handle? */ |
157 | | static int signewnym_is_pending = 0; |
158 | | /** Mainloop event for the deferred signewnym call. */ |
159 | | static mainloop_event_t *handle_deferred_signewnym_ev = NULL; |
160 | | /** How many times have we called newnym? */ |
161 | | static unsigned newnym_epoch = 0; |
162 | | |
163 | | /** Smartlist of all open connections. */ |
164 | | STATIC smartlist_t *connection_array = NULL; |
165 | | /** List of connections that have been marked for close and need to be freed |
166 | | * and removed from connection_array. */ |
167 | | static smartlist_t *closeable_connection_lst = NULL; |
168 | | /** List of linked connections that are currently reading data into their |
169 | | * inbuf from their partner's outbuf. */ |
170 | | static smartlist_t *active_linked_connection_lst = NULL; |
171 | | /** Flag: Set to true iff we entered the current libevent main loop via |
172 | | * <b>loop_once</b>. If so, there's no need to trigger a loopexit in order |
173 | | * to handle linked connections. */ |
174 | | static int called_loop_once = 0; |
175 | | /** Flag: if true, it's time to shut down, so the main loop should exit as |
176 | | * soon as possible. |
177 | | */ |
178 | | static int main_loop_should_exit = 0; |
179 | | /** The return value that the main loop should yield when it exits, if |
180 | | * main_loop_should_exit is true. |
181 | | */ |
182 | | static int main_loop_exit_value = 0; |
183 | | |
184 | | /** We set this to 1 when we've opened a circuit, so we can print a log |
185 | | * entry to inform the user that Tor is working. We set it to 0 when |
186 | | * we think the fact that we once opened a circuit doesn't mean we can do so |
187 | | * any longer (a big time jump happened, when we notice our directory is |
188 | | * heinously out-of-date, etc. |
189 | | */ |
190 | | static int can_complete_circuits = 0; |
191 | | |
192 | | /** How often do we check for router descriptors that we should download |
193 | | * when we have too little directory info? */ |
194 | 0 | #define GREEDY_DESCRIPTOR_RETRY_INTERVAL (10) |
195 | | /** How often do we check for router descriptors that we should download |
196 | | * when we have enough directory info? */ |
197 | 0 | #define LAZY_DESCRIPTOR_RETRY_INTERVAL (60) |
198 | | |
199 | | static int conn_close_if_marked(int i); |
200 | | static void connection_start_reading_from_linked_conn(connection_t *conn); |
201 | | static int connection_should_read_from_linked_conn(connection_t *conn); |
202 | | static void conn_read_callback(evutil_socket_t fd, short event, void *_conn); |
203 | | static void conn_write_callback(evutil_socket_t fd, short event, void *_conn); |
204 | | static void shutdown_did_not_work_callback(evutil_socket_t fd, short event, |
205 | | void *arg) ATTR_NORETURN; |
206 | | |
207 | | /**************************************************************************** |
208 | | * |
209 | | * This section contains accessors and other methods on the connection_array |
210 | | * variables (which are global within this file and unavailable outside it). |
211 | | * |
212 | | ****************************************************************************/ |
213 | | |
214 | | /** Return 1 if we have successfully built a circuit, and nothing has changed |
215 | | * to make us think that maybe we can't. |
216 | | */ |
217 | | int |
218 | | have_completed_a_circuit(void) |
219 | 0 | { |
220 | 0 | return can_complete_circuits; |
221 | 0 | } |
222 | | |
223 | | /** Note that we have successfully built a circuit, so that reachability |
224 | | * testing and introduction points and so on may be attempted. */ |
225 | | void |
226 | | note_that_we_completed_a_circuit(void) |
227 | 0 | { |
228 | 0 | can_complete_circuits = 1; |
229 | 0 | } |
230 | | |
231 | | /** Note that something has happened (like a clock jump, or DisableNetwork) to |
232 | | * make us think that maybe we can't complete circuits. */ |
233 | | void |
234 | | note_that_we_maybe_cant_complete_circuits(void) |
235 | 0 | { |
236 | 0 | can_complete_circuits = 0; |
237 | 0 | } |
238 | | |
239 | | /** Add <b>conn</b> to the array of connections that we can poll on. The |
240 | | * connection's socket must be set; the connection starts out |
241 | | * non-reading and non-writing. |
242 | | */ |
243 | | int |
244 | | connection_add_impl(connection_t *conn, int is_connecting) |
245 | 0 | { |
246 | 0 | tor_assert(conn); |
247 | 0 | tor_assert(SOCKET_OK(conn->s) || |
248 | 0 | conn->linked || |
249 | 0 | (conn->type == CONN_TYPE_AP && |
250 | 0 | TO_EDGE_CONN(conn)->is_dns_request)); |
251 | | |
252 | 0 | tor_assert(conn->conn_array_index == -1); /* can only connection_add once */ |
253 | 0 | conn->conn_array_index = smartlist_len(connection_array); |
254 | 0 | smartlist_add(connection_array, conn); |
255 | |
|
256 | 0 | (void) is_connecting; |
257 | |
|
258 | 0 | if (SOCKET_OK(conn->s) || conn->linked) { |
259 | 0 | conn->read_event = tor_event_new(tor_libevent_get_base(), |
260 | 0 | conn->s, EV_READ|EV_PERSIST, conn_read_callback, conn); |
261 | 0 | conn->write_event = tor_event_new(tor_libevent_get_base(), |
262 | 0 | conn->s, EV_WRITE|EV_PERSIST, conn_write_callback, conn); |
263 | | /* XXXX CHECK FOR NULL RETURN! */ |
264 | 0 | } |
265 | |
|
266 | 0 | log_debug(LD_NET,"new conn type %s, socket %d, address %s, n_conns %d.", |
267 | 0 | conn_type_to_string(conn->type), (int)conn->s, conn->address, |
268 | 0 | smartlist_len(connection_array)); |
269 | |
|
270 | 0 | return 0; |
271 | 0 | } |
272 | | |
273 | | /** Tell libevent that we don't care about <b>conn</b> any more. */ |
274 | | void |
275 | | connection_unregister_events(connection_t *conn) |
276 | 0 | { |
277 | 0 | tor_event_free(conn->read_event); |
278 | 0 | tor_event_free(conn->write_event); |
279 | 0 | if (conn->type == CONN_TYPE_AP_DNS_LISTENER) { |
280 | 0 | dnsserv_close_listener(conn); |
281 | 0 | } |
282 | 0 | } |
283 | | |
284 | | /** Remove the connection from the global list, and remove the |
285 | | * corresponding poll entry. Calling this function will shift the last |
286 | | * connection (if any) into the position occupied by conn. |
287 | | */ |
288 | | int |
289 | | connection_remove(connection_t *conn) |
290 | 0 | { |
291 | 0 | int current_index; |
292 | 0 | connection_t *tmp; |
293 | |
|
294 | 0 | tor_assert(conn); |
295 | | |
296 | 0 | log_debug(LD_NET,"removing socket %d (type %s), n_conns now %d", |
297 | 0 | (int)conn->s, conn_type_to_string(conn->type), |
298 | 0 | smartlist_len(connection_array)); |
299 | |
|
300 | 0 | if (conn->type == CONN_TYPE_AP && conn->socket_family == AF_UNIX) { |
301 | 0 | log_info(LD_NET, "Closing SOCKS Unix socket connection"); |
302 | 0 | } |
303 | |
|
304 | 0 | control_event_conn_bandwidth(conn); |
305 | |
|
306 | 0 | tor_assert(conn->conn_array_index >= 0); |
307 | 0 | current_index = conn->conn_array_index; |
308 | 0 | connection_unregister_events(conn); /* This is redundant, but cheap. */ |
309 | 0 | if (current_index == smartlist_len(connection_array)-1) { /* at the end */ |
310 | 0 | smartlist_del(connection_array, current_index); |
311 | 0 | return 0; |
312 | 0 | } |
313 | | |
314 | | /* replace this one with the one at the end */ |
315 | 0 | smartlist_del(connection_array, current_index); |
316 | 0 | tmp = smartlist_get(connection_array, current_index); |
317 | 0 | tmp->conn_array_index = current_index; |
318 | |
|
319 | 0 | return 0; |
320 | 0 | } |
321 | | |
322 | | /** If <b>conn</b> is an edge conn, remove it from the list |
323 | | * of conn's on this circuit. If it's not on an edge, |
324 | | * flush and send destroys for all circuits on this conn. |
325 | | * |
326 | | * Remove it from connection_array (if applicable) and |
327 | | * from closeable_connection_list. |
328 | | * |
329 | | * Then free it. |
330 | | */ |
331 | | static void |
332 | | connection_unlink(connection_t *conn) |
333 | 0 | { |
334 | 0 | connection_about_to_close_connection(conn); |
335 | 0 | if (conn->conn_array_index >= 0) { |
336 | 0 | connection_remove(conn); |
337 | 0 | } |
338 | 0 | if (conn->linked_conn) { |
339 | 0 | conn->linked_conn->linked_conn = NULL; |
340 | 0 | if (! conn->linked_conn->marked_for_close && |
341 | 0 | conn->linked_conn->reading_from_linked_conn) |
342 | 0 | connection_start_reading(conn->linked_conn); |
343 | 0 | conn->linked_conn = NULL; |
344 | 0 | } |
345 | 0 | smartlist_remove(closeable_connection_lst, conn); |
346 | 0 | smartlist_remove(active_linked_connection_lst, conn); |
347 | 0 | if (conn->type == CONN_TYPE_EXIT) { |
348 | 0 | assert_connection_edge_not_dns_pending(TO_EDGE_CONN(conn)); |
349 | 0 | } |
350 | 0 | if (conn->type == CONN_TYPE_OR) { |
351 | 0 | if (!tor_digest_is_zero(TO_OR_CONN(conn)->identity_digest)) |
352 | 0 | connection_or_clear_identity(TO_OR_CONN(conn)); |
353 | | /* connection_unlink() can only get called if the connection |
354 | | * was already on the closeable list, and it got there by |
355 | | * connection_mark_for_close(), which was called from |
356 | | * connection_or_close_normally() or |
357 | | * connection_or_close_for_error(), so the channel should |
358 | | * already be in CHANNEL_STATE_CLOSING, and then the |
359 | | * connection_about_to_close_connection() goes to |
360 | | * connection_or_about_to_close(), which calls channel_closed() |
361 | | * to notify the channel_t layer, and closed the channel, so |
362 | | * nothing more to do here to deal with the channel associated |
363 | | * with an orconn. |
364 | | */ |
365 | 0 | } |
366 | 0 | connection_free(conn); |
367 | 0 | } |
368 | | |
369 | | /** Event that invokes schedule_active_linked_connections_cb. */ |
370 | | static mainloop_event_t *schedule_active_linked_connections_event = NULL; |
371 | | |
372 | | /** |
373 | | * Callback: used to activate read events for all linked connections, so |
374 | | * libevent knows to call their read callbacks. This callback run as a |
375 | | * postloop event, so that the events _it_ activates don't happen until |
376 | | * Libevent has a chance to check for other events. |
377 | | */ |
378 | | static void |
379 | | schedule_active_linked_connections_cb(mainloop_event_t *event, void *arg) |
380 | 0 | { |
381 | 0 | (void)event; |
382 | 0 | (void)arg; |
383 | | |
384 | | /* All active linked conns should get their read events activated, |
385 | | * so that libevent knows to run their callbacks. */ |
386 | 0 | SMARTLIST_FOREACH(active_linked_connection_lst, connection_t *, conn, |
387 | 0 | event_active(conn->read_event, EV_READ, 1)); |
388 | | |
389 | | /* Reactivate the event if we still have connections in the active list. |
390 | | * |
391 | | * A linked connection doesn't get woken up by I/O but rather artificially |
392 | | * by this event callback. It has directory data spooled in it and it is |
393 | | * sent incrementally by small chunks unless spool_eagerly is true. For that |
394 | | * to happen, we need to induce the activation of the read event so it can |
395 | | * be flushed. */ |
396 | 0 | if (smartlist_len(active_linked_connection_lst)) { |
397 | 0 | mainloop_event_activate(schedule_active_linked_connections_event); |
398 | 0 | } |
399 | 0 | } |
400 | | |
401 | | /** Initialize the global connection list, closeable connection list, |
402 | | * and active connection list. */ |
403 | | void |
404 | | tor_init_connection_lists(void) |
405 | 0 | { |
406 | 0 | if (!connection_array) |
407 | 0 | connection_array = smartlist_new(); |
408 | 0 | if (!closeable_connection_lst) |
409 | 0 | closeable_connection_lst = smartlist_new(); |
410 | 0 | if (!active_linked_connection_lst) |
411 | 0 | active_linked_connection_lst = smartlist_new(); |
412 | 0 | } |
413 | | |
414 | | /** Schedule <b>conn</b> to be closed. **/ |
415 | | void |
416 | | add_connection_to_closeable_list(connection_t *conn) |
417 | 0 | { |
418 | 0 | tor_assert(!smartlist_contains(closeable_connection_lst, conn)); |
419 | 0 | tor_assert(conn->marked_for_close); |
420 | 0 | assert_connection_ok(conn, time(NULL)); |
421 | 0 | smartlist_add(closeable_connection_lst, conn); |
422 | 0 | mainloop_schedule_postloop_cleanup(); |
423 | 0 | } |
424 | | |
425 | | /** Return 1 if conn is on the closeable list, else return 0. */ |
426 | | int |
427 | | connection_is_on_closeable_list(connection_t *conn) |
428 | 0 | { |
429 | 0 | return smartlist_contains(closeable_connection_lst, conn); |
430 | 0 | } |
431 | | |
432 | | /** Return true iff conn is in the current poll array. */ |
433 | | int |
434 | | connection_in_array(connection_t *conn) |
435 | 0 | { |
436 | 0 | return smartlist_contains(connection_array, conn); |
437 | 0 | } |
438 | | |
439 | | /** Set <b>*array</b> to an array of all connections. <b>*array</b> must not |
440 | | * be modified. |
441 | | */ |
442 | | MOCK_IMPL(smartlist_t *, |
443 | | get_connection_array, (void)) |
444 | 0 | { |
445 | 0 | if (!connection_array) |
446 | 0 | connection_array = smartlist_new(); |
447 | 0 | return connection_array; |
448 | 0 | } |
449 | | |
450 | | /** |
451 | | * Return the amount of network traffic read, in bytes, over the life of this |
452 | | * process. |
453 | | */ |
454 | | MOCK_IMPL(uint64_t, |
455 | | get_bytes_read,(void)) |
456 | 0 | { |
457 | 0 | return stats_n_bytes_read; |
458 | 0 | } |
459 | | |
460 | | /** |
461 | | * Return the amount of network traffic read, in bytes, over the life of this |
462 | | * process. |
463 | | */ |
464 | | MOCK_IMPL(uint64_t, |
465 | | get_bytes_written,(void)) |
466 | 0 | { |
467 | 0 | return stats_n_bytes_written; |
468 | 0 | } |
469 | | |
470 | | /** |
471 | | * Increment the amount of network traffic read and written, over the life of |
472 | | * this process. |
473 | | */ |
474 | | void |
475 | | stats_increment_bytes_read_and_written(uint64_t r, uint64_t w) |
476 | 0 | { |
477 | 0 | stats_n_bytes_read += r; |
478 | 0 | stats_n_bytes_written += w; |
479 | 0 | } |
480 | | |
481 | | /** Set the event mask on <b>conn</b> to <b>events</b>. (The event |
482 | | * mask is a bitmask whose bits are READ_EVENT and WRITE_EVENT) |
483 | | */ |
484 | | void |
485 | | connection_watch_events(connection_t *conn, watchable_events_t events) |
486 | 0 | { |
487 | 0 | if (events & READ_EVENT) |
488 | 0 | connection_start_reading(conn); |
489 | 0 | else |
490 | 0 | connection_stop_reading(conn); |
491 | |
|
492 | 0 | if (events & WRITE_EVENT) |
493 | 0 | connection_start_writing(conn); |
494 | 0 | else |
495 | 0 | connection_stop_writing(conn); |
496 | 0 | } |
497 | | |
498 | | /** Return true iff <b>conn</b> is listening for read events. */ |
499 | | int |
500 | | connection_is_reading(const connection_t *conn) |
501 | 0 | { |
502 | 0 | tor_assert(conn); |
503 | | |
504 | 0 | return conn->reading_from_linked_conn || |
505 | 0 | (conn->read_event && event_pending(conn->read_event, EV_READ, NULL)); |
506 | 0 | } |
507 | | |
508 | | /** Reset our main loop counters. */ |
509 | | void |
510 | | reset_main_loop_counters(void) |
511 | 0 | { |
512 | 0 | stats_n_main_loop_successes = 0; |
513 | 0 | stats_n_main_loop_errors = 0; |
514 | 0 | stats_n_main_loop_idle = 0; |
515 | 0 | } |
516 | | |
517 | | /** Increment the main loop success counter. */ |
518 | | static void |
519 | | increment_main_loop_success_count(void) |
520 | 0 | { |
521 | 0 | ++stats_n_main_loop_successes; |
522 | 0 | } |
523 | | |
524 | | /** Get the main loop success counter. */ |
525 | | uint64_t |
526 | | get_main_loop_success_count(void) |
527 | 0 | { |
528 | 0 | return stats_n_main_loop_successes; |
529 | 0 | } |
530 | | |
531 | | /** Increment the main loop error counter. */ |
532 | | static void |
533 | | increment_main_loop_error_count(void) |
534 | 0 | { |
535 | 0 | ++stats_n_main_loop_errors; |
536 | 0 | } |
537 | | |
538 | | /** Get the main loop error counter. */ |
539 | | uint64_t |
540 | | get_main_loop_error_count(void) |
541 | 0 | { |
542 | 0 | return stats_n_main_loop_errors; |
543 | 0 | } |
544 | | |
545 | | /** Increment the main loop idle counter. */ |
546 | | static void |
547 | | increment_main_loop_idle_count(void) |
548 | 0 | { |
549 | 0 | ++stats_n_main_loop_idle; |
550 | 0 | } |
551 | | |
552 | | /** Get the main loop idle counter. */ |
553 | | uint64_t |
554 | | get_main_loop_idle_count(void) |
555 | 0 | { |
556 | 0 | return stats_n_main_loop_idle; |
557 | 0 | } |
558 | | |
559 | | /** Check whether <b>conn</b> is correct in having (or not having) a |
560 | | * read/write event (passed in <b>ev</b>). On success, return 0. On failure, |
561 | | * log a warning and return -1. */ |
562 | | static int |
563 | | connection_check_event(connection_t *conn, struct event *ev) |
564 | 0 | { |
565 | 0 | int bad; |
566 | |
|
567 | 0 | if (conn->type == CONN_TYPE_AP && TO_EDGE_CONN(conn)->is_dns_request) { |
568 | | /* DNS requests which we launch through the dnsserv.c module do not have |
569 | | * any underlying socket or any underlying linked connection, so they |
570 | | * shouldn't have any attached events either. |
571 | | */ |
572 | 0 | bad = ev != NULL; |
573 | 0 | } else { |
574 | | /* Everything else should have an underlying socket, or a linked |
575 | | * connection (which is also tracked with a read_event/write_event pair). |
576 | | */ |
577 | 0 | bad = ev == NULL; |
578 | 0 | } |
579 | |
|
580 | 0 | if (bad) { |
581 | 0 | log_warn(LD_BUG, "Event missing on connection %p [%s;%s]. " |
582 | 0 | "socket=%d. linked=%d. " |
583 | 0 | "is_dns_request=%d. Marked_for_close=%s:%d", |
584 | 0 | conn, |
585 | 0 | conn_type_to_string(conn->type), |
586 | 0 | conn_state_to_string(conn->type, conn->state), |
587 | 0 | (int)conn->s, (int)conn->linked, |
588 | 0 | (conn->type == CONN_TYPE_AP && |
589 | 0 | TO_EDGE_CONN(conn)->is_dns_request), |
590 | 0 | conn->marked_for_close_file ? conn->marked_for_close_file : "-", |
591 | 0 | conn->marked_for_close |
592 | 0 | ); |
593 | 0 | log_backtrace(LOG_WARN, LD_BUG, "Backtrace attached."); |
594 | 0 | return -1; |
595 | 0 | } |
596 | 0 | return 0; |
597 | 0 | } |
598 | | |
599 | | /** Tell the main loop to stop notifying <b>conn</b> of any read events. */ |
600 | | MOCK_IMPL(void, |
601 | | connection_stop_reading,(connection_t *conn)) |
602 | 0 | { |
603 | 0 | tor_assert(conn); |
604 | | |
605 | 0 | if (connection_check_event(conn, conn->read_event) < 0) { |
606 | 0 | return; |
607 | 0 | } |
608 | | |
609 | 0 | if (conn->linked) { |
610 | 0 | conn->reading_from_linked_conn = 0; |
611 | 0 | connection_stop_reading_from_linked_conn(conn); |
612 | 0 | } else { |
613 | 0 | if (event_del(conn->read_event)) |
614 | 0 | log_warn(LD_NET, "Error from libevent setting read event state for %d " |
615 | 0 | "to unwatched: %s", |
616 | 0 | (int)conn->s, |
617 | 0 | tor_socket_strerror(tor_socket_errno(conn->s))); |
618 | 0 | } |
619 | 0 | } |
620 | | |
621 | | /** Tell the main loop to start notifying <b>conn</b> of any read events. */ |
622 | | MOCK_IMPL(void, |
623 | | connection_start_reading,(connection_t *conn)) |
624 | 0 | { |
625 | 0 | tor_assert(conn); |
626 | | |
627 | 0 | if (connection_check_event(conn, conn->read_event) < 0) { |
628 | 0 | return; |
629 | 0 | } |
630 | | |
631 | 0 | if (conn->linked) { |
632 | 0 | conn->reading_from_linked_conn = 1; |
633 | 0 | if (connection_should_read_from_linked_conn(conn)) |
634 | 0 | connection_start_reading_from_linked_conn(conn); |
635 | 0 | } else { |
636 | 0 | if (CONN_IS_EDGE(conn) && TO_EDGE_CONN(conn)->xoff_received) { |
637 | | /* We should not get called here if we're waiting for an XON, but |
638 | | * belt-and-suspenders */ |
639 | 0 | log_info(LD_NET, |
640 | 0 | "Request to start reading on an edgeconn blocked with XOFF"); |
641 | 0 | return; |
642 | 0 | } |
643 | 0 | if (event_add(conn->read_event, NULL)) |
644 | 0 | log_warn(LD_NET, "Error from libevent setting read event state for %d " |
645 | 0 | "to watched: %s", |
646 | 0 | (int)conn->s, |
647 | 0 | tor_socket_strerror(tor_socket_errno(conn->s))); |
648 | | |
649 | | /* Process the inbuf if it is not empty because the only way to empty it is |
650 | | * through a read event or a SENDME which might not come if the package |
651 | | * window is proper or if the application has nothing more for us to read. |
652 | | * |
653 | | * If this is not done here, we risk having data lingering in the inbuf |
654 | | * forever. */ |
655 | 0 | if (conn->inbuf && buf_datalen(conn->inbuf) > 0) { |
656 | 0 | connection_process_inbuf(conn, 1); |
657 | 0 | } |
658 | 0 | } |
659 | 0 | } |
660 | | |
661 | | /** Return true iff <b>conn</b> is listening for write events. */ |
662 | | int |
663 | | connection_is_writing(connection_t *conn) |
664 | 0 | { |
665 | 0 | tor_assert(conn); |
666 | | |
667 | 0 | return conn->writing_to_linked_conn || |
668 | 0 | (conn->write_event && event_pending(conn->write_event, EV_WRITE, NULL)); |
669 | 0 | } |
670 | | |
671 | | /** Tell the main loop to stop notifying <b>conn</b> of any write events. */ |
672 | | MOCK_IMPL(void, |
673 | | connection_stop_writing,(connection_t *conn)) |
674 | 0 | { |
675 | 0 | tor_assert(conn); |
676 | | |
677 | 0 | if (connection_check_event(conn, conn->write_event) < 0) { |
678 | 0 | return; |
679 | 0 | } |
680 | | |
681 | 0 | if (conn->linked) { |
682 | 0 | conn->writing_to_linked_conn = 0; |
683 | 0 | if (conn->linked_conn) |
684 | 0 | connection_stop_reading_from_linked_conn(conn->linked_conn); |
685 | 0 | } else { |
686 | 0 | if (event_del(conn->write_event)) |
687 | 0 | log_warn(LD_NET, "Error from libevent setting write event state for %d " |
688 | 0 | "to unwatched: %s", |
689 | 0 | (int)conn->s, |
690 | 0 | tor_socket_strerror(tor_socket_errno(conn->s))); |
691 | 0 | } |
692 | 0 | } |
693 | | |
694 | | /** Tell the main loop to start notifying <b>conn</b> of any write events. */ |
695 | | MOCK_IMPL(void, |
696 | | connection_start_writing,(connection_t *conn)) |
697 | 0 | { |
698 | 0 | tor_assert(conn); |
699 | | |
700 | 0 | if (connection_check_event(conn, conn->write_event) < 0) { |
701 | 0 | return; |
702 | 0 | } |
703 | | |
704 | 0 | if (conn->linked) { |
705 | 0 | conn->writing_to_linked_conn = 1; |
706 | 0 | if (conn->linked_conn && |
707 | 0 | connection_should_read_from_linked_conn(conn->linked_conn)) |
708 | 0 | connection_start_reading_from_linked_conn(conn->linked_conn); |
709 | 0 | } else { |
710 | 0 | if (event_add(conn->write_event, NULL)) |
711 | 0 | log_warn(LD_NET, "Error from libevent setting write event state for %d " |
712 | 0 | "to watched: %s", |
713 | 0 | (int)conn->s, |
714 | 0 | tor_socket_strerror(tor_socket_errno(conn->s))); |
715 | 0 | } |
716 | 0 | } |
717 | | |
718 | | /** Return true iff <b>conn</b> is linked conn, and reading from the conn |
719 | | * linked to it would be good and feasible. (Reading is "feasible" if the |
720 | | * other conn exists and has data in its outbuf, and is "good" if we have our |
721 | | * reading_from_linked_conn flag set and the other conn has its |
722 | | * writing_to_linked_conn flag set.)*/ |
723 | | static int |
724 | | connection_should_read_from_linked_conn(connection_t *conn) |
725 | 0 | { |
726 | 0 | if (conn->linked && conn->reading_from_linked_conn) { |
727 | 0 | if (! conn->linked_conn || |
728 | 0 | (conn->linked_conn->writing_to_linked_conn && |
729 | 0 | buf_datalen(conn->linked_conn->outbuf))) |
730 | 0 | return 1; |
731 | 0 | } |
732 | 0 | return 0; |
733 | 0 | } |
734 | | |
735 | | /** Event to run 'shutdown did not work callback'. */ |
736 | | static struct event *shutdown_did_not_work_event = NULL; |
737 | | |
738 | | /** Failsafe measure that should never actually be necessary: If |
739 | | * tor_shutdown_event_loop_and_exit() somehow doesn't successfully exit the |
740 | | * event loop, then this callback will kill Tor with an assertion failure |
741 | | * seconds later |
742 | | */ |
743 | | static void |
744 | | shutdown_did_not_work_callback(evutil_socket_t fd, short event, void *arg) |
745 | 0 | { |
746 | | // LCOV_EXCL_START |
747 | 0 | (void) fd; |
748 | 0 | (void) event; |
749 | 0 | (void) arg; |
750 | 0 | tor_assert_unreached(); |
751 | | // LCOV_EXCL_STOP |
752 | 0 | } |
753 | | |
754 | | #ifdef ENABLE_RESTART_DEBUGGING |
755 | | static struct event *tor_shutdown_event_loop_for_restart_event = NULL; |
756 | | static void |
757 | | tor_shutdown_event_loop_for_restart_cb( |
758 | | evutil_socket_t fd, short event, void *arg) |
759 | | { |
760 | | (void)fd; |
761 | | (void)event; |
762 | | (void)arg; |
763 | | tor_event_free(tor_shutdown_event_loop_for_restart_event); |
764 | | tor_shutdown_event_loop_and_exit(0); |
765 | | } |
766 | | #endif /* defined(ENABLE_RESTART_DEBUGGING) */ |
767 | | |
768 | | /** |
769 | | * After finishing the current callback (if any), shut down the main loop, |
770 | | * clean up the process, and exit with <b>exitcode</b>. |
771 | | */ |
772 | | void |
773 | | tor_shutdown_event_loop_and_exit(int exitcode) |
774 | 0 | { |
775 | 0 | if (main_loop_should_exit) |
776 | 0 | return; /* Ignore multiple calls to this function. */ |
777 | | |
778 | 0 | main_loop_should_exit = 1; |
779 | 0 | main_loop_exit_value = exitcode; |
780 | |
|
781 | 0 | if (! tor_libevent_is_initialized()) { |
782 | 0 | return; /* No event loop to shut down. */ |
783 | 0 | } |
784 | | |
785 | | /* Die with an assertion failure in ten seconds, if for some reason we don't |
786 | | * exit normally. */ |
787 | | /* XXXX We should consider this code if it's never used. */ |
788 | 0 | struct timeval ten_seconds = { 10, 0 }; |
789 | 0 | shutdown_did_not_work_event = tor_evtimer_new( |
790 | 0 | tor_libevent_get_base(), |
791 | 0 | shutdown_did_not_work_callback, NULL); |
792 | 0 | event_add(shutdown_did_not_work_event, &ten_seconds); |
793 | | |
794 | | /* Unlike exit_loop_after_delay(), exit_loop_after_callback |
795 | | * prevents other callbacks from running. */ |
796 | 0 | tor_libevent_exit_loop_after_callback(tor_libevent_get_base()); |
797 | 0 | } |
798 | | |
799 | | /** Return true iff tor_shutdown_event_loop_and_exit() has been called. */ |
800 | | int |
801 | | tor_event_loop_shutdown_is_pending(void) |
802 | 0 | { |
803 | 0 | return main_loop_should_exit; |
804 | 0 | } |
805 | | |
806 | | /** Helper: Tell the main loop to begin reading bytes into <b>conn</b> from |
807 | | * its linked connection, if it is not doing so already. Called by |
808 | | * connection_start_reading and connection_start_writing as appropriate. */ |
809 | | static void |
810 | | connection_start_reading_from_linked_conn(connection_t *conn) |
811 | 0 | { |
812 | 0 | tor_assert(conn); |
813 | 0 | tor_assert(conn->linked == 1); |
814 | | |
815 | 0 | if (!conn->active_on_link) { |
816 | 0 | conn->active_on_link = 1; |
817 | 0 | smartlist_add(active_linked_connection_lst, conn); |
818 | 0 | mainloop_event_activate(schedule_active_linked_connections_event); |
819 | 0 | } else { |
820 | 0 | tor_assert(smartlist_contains(active_linked_connection_lst, conn)); |
821 | 0 | } |
822 | 0 | } |
823 | | |
824 | | /** Tell the main loop to stop reading bytes into <b>conn</b> from its linked |
825 | | * connection, if is currently doing so. Called by connection_stop_reading, |
826 | | * connection_stop_writing, and connection_read. */ |
827 | | void |
828 | | connection_stop_reading_from_linked_conn(connection_t *conn) |
829 | 0 | { |
830 | 0 | tor_assert(conn); |
831 | 0 | tor_assert(conn->linked == 1); |
832 | | |
833 | 0 | if (conn->active_on_link) { |
834 | 0 | conn->active_on_link = 0; |
835 | | /* FFFF We could keep an index here so we can smartlist_del |
836 | | * cleanly. On the other hand, this doesn't show up on profiles, |
837 | | * so let's leave it alone for now. */ |
838 | 0 | smartlist_remove(active_linked_connection_lst, conn); |
839 | 0 | } else { |
840 | 0 | tor_assert(!smartlist_contains(active_linked_connection_lst, conn)); |
841 | 0 | } |
842 | 0 | } |
843 | | |
844 | | /** Close all connections that have been scheduled to get closed. */ |
845 | | STATIC void |
846 | | close_closeable_connections(void) |
847 | 0 | { |
848 | 0 | int i; |
849 | 0 | for (i = 0; i < smartlist_len(closeable_connection_lst); ) { |
850 | 0 | connection_t *conn = smartlist_get(closeable_connection_lst, i); |
851 | 0 | if (conn->conn_array_index < 0) { |
852 | 0 | connection_unlink(conn); /* blow it away right now */ |
853 | 0 | } else { |
854 | 0 | if (!conn_close_if_marked(conn->conn_array_index)) |
855 | 0 | ++i; |
856 | 0 | } |
857 | 0 | } |
858 | 0 | } |
859 | | |
860 | | /** Count moribund connections for the OOS handler */ |
861 | | MOCK_IMPL(int, |
862 | | connection_count_moribund, (void)) |
863 | 0 | { |
864 | 0 | int moribund = 0; |
865 | | |
866 | | /* |
867 | | * Count things we'll try to kill when close_closeable_connections() |
868 | | * runs next. |
869 | | */ |
870 | 0 | SMARTLIST_FOREACH_BEGIN(closeable_connection_lst, connection_t *, conn) { |
871 | 0 | if (SOCKET_OK(conn->s) && connection_is_moribund(conn)) ++moribund; |
872 | 0 | } SMARTLIST_FOREACH_END(conn); |
873 | |
|
874 | 0 | return moribund; |
875 | 0 | } |
876 | | |
877 | | /** Libevent callback: this gets invoked when (connection_t*)<b>conn</b> has |
878 | | * some data to read. */ |
879 | | static void |
880 | | conn_read_callback(evutil_socket_t fd, short event, void *_conn) |
881 | 0 | { |
882 | 0 | connection_t *conn = _conn; |
883 | 0 | (void)fd; |
884 | 0 | (void)event; |
885 | |
|
886 | 0 | log_debug(LD_NET,"socket %d wants to read.",(int)conn->s); |
887 | | |
888 | | /* assert_connection_ok(conn, time(NULL)); */ |
889 | | |
890 | | /* Handle marked for close connections early */ |
891 | 0 | if (conn->marked_for_close && connection_is_reading(conn)) { |
892 | | /* Libevent says we can read, but we are marked for close so we will never |
893 | | * try to read again. We will try to close the connection below inside of |
894 | | * close_closeable_connections(), but let's make sure not to cause Libevent |
895 | | * to spin on conn_read_callback() while we wait for the socket to let us |
896 | | * flush to it.*/ |
897 | 0 | connection_stop_reading(conn); |
898 | 0 | } |
899 | |
|
900 | 0 | if (connection_handle_read(conn) < 0) { |
901 | 0 | if (!conn->marked_for_close) { |
902 | 0 | #ifndef _WIN32 |
903 | 0 | log_warn(LD_BUG,"Unhandled error on read for %s connection " |
904 | 0 | "(fd %d); removing", |
905 | 0 | conn_type_to_string(conn->type), (int)conn->s); |
906 | 0 | tor_fragile_assert(); |
907 | 0 | #endif /* !defined(_WIN32) */ |
908 | 0 | if (CONN_IS_EDGE(conn)) |
909 | 0 | connection_edge_end_errno(TO_EDGE_CONN(conn)); |
910 | 0 | connection_mark_for_close(conn); |
911 | 0 | } |
912 | 0 | } |
913 | 0 | assert_connection_ok(conn, time(NULL)); |
914 | |
|
915 | 0 | if (smartlist_len(closeable_connection_lst)) |
916 | 0 | close_closeable_connections(); |
917 | 0 | } |
918 | | |
919 | | /** Libevent callback: this gets invoked when (connection_t*)<b>conn</b> has |
920 | | * some data to write. */ |
921 | | static void |
922 | | conn_write_callback(evutil_socket_t fd, short events, void *_conn) |
923 | 0 | { |
924 | 0 | connection_t *conn = _conn; |
925 | 0 | (void)fd; |
926 | 0 | (void)events; |
927 | |
|
928 | 0 | LOG_FN_CONN(conn, (LOG_DEBUG, LD_NET, "socket %d wants to write.", |
929 | 0 | (int)conn->s)); |
930 | | |
931 | | /* assert_connection_ok(conn, time(NULL)); */ |
932 | | |
933 | 0 | if (connection_handle_write(conn, 0) < 0) { |
934 | 0 | if (!conn->marked_for_close) { |
935 | | /* this connection is broken. remove it. */ |
936 | 0 | log_fn(LOG_WARN,LD_BUG, |
937 | 0 | "unhandled error on write for %s connection (fd %d); removing", |
938 | 0 | conn_type_to_string(conn->type), (int)conn->s); |
939 | 0 | tor_fragile_assert(); |
940 | 0 | if (CONN_IS_EDGE(conn)) { |
941 | | /* otherwise we cry wolf about duplicate close */ |
942 | 0 | edge_connection_t *edge_conn = TO_EDGE_CONN(conn); |
943 | 0 | if (!edge_conn->end_reason) |
944 | 0 | edge_conn->end_reason = END_STREAM_REASON_INTERNAL; |
945 | 0 | edge_conn->edge_has_sent_end = 1; |
946 | 0 | } |
947 | 0 | connection_close_immediate(conn); /* So we don't try to flush. */ |
948 | 0 | connection_mark_for_close(conn); |
949 | 0 | } |
950 | 0 | } |
951 | 0 | assert_connection_ok(conn, time(NULL)); |
952 | |
|
953 | 0 | if (smartlist_len(closeable_connection_lst)) |
954 | 0 | close_closeable_connections(); |
955 | 0 | } |
956 | | |
957 | | /** If the connection at connection_array[i] is marked for close, then: |
958 | | * - If it has data that it wants to flush, try to flush it. |
959 | | * - If it _still_ has data to flush, and conn->hold_open_until_flushed is |
960 | | * true, then leave the connection open and return. |
961 | | * - Otherwise, remove the connection from connection_array and from |
962 | | * all other lists, close it, and free it. |
963 | | * Returns 1 if the connection was closed, 0 otherwise. |
964 | | */ |
965 | | static int |
966 | | conn_close_if_marked(int i) |
967 | 0 | { |
968 | 0 | connection_t *conn; |
969 | 0 | int retval; |
970 | 0 | time_t now; |
971 | |
|
972 | 0 | conn = smartlist_get(connection_array, i); |
973 | 0 | if (!conn->marked_for_close) |
974 | 0 | return 0; /* nothing to see here, move along */ |
975 | 0 | now = time(NULL); |
976 | 0 | assert_connection_ok(conn, now); |
977 | |
|
978 | 0 | log_debug(LD_NET,"Cleaning up connection (fd "TOR_SOCKET_T_FORMAT").", |
979 | 0 | conn->s); |
980 | | |
981 | | /* If the connection we are about to close was trying to connect to |
982 | | a proxy server and failed, the client won't be able to use that |
983 | | proxy. We should warn the user about this. */ |
984 | 0 | if (conn->proxy_state == PROXY_INFANT) |
985 | 0 | log_failed_proxy_connection(conn); |
986 | |
|
987 | 0 | if ((SOCKET_OK(conn->s) || conn->linked_conn) && |
988 | 0 | connection_wants_to_flush(conn)) { |
989 | | /* s == -1 means it's an incomplete edge connection, or that the socket |
990 | | * has already been closed as unflushable. */ |
991 | 0 | ssize_t sz = connection_bucket_write_limit(conn, now); |
992 | 0 | if (!conn->hold_open_until_flushed) |
993 | 0 | log_info(LD_NET, |
994 | 0 | "Conn (addr %s, fd %d, type %s, state %d) marked, but wants " |
995 | 0 | "to flush %"TOR_PRIuSZ" bytes. (Marked at %s:%d)", |
996 | 0 | escaped_safe_str_client(conn->address), |
997 | 0 | (int)conn->s, conn_type_to_string(conn->type), conn->state, |
998 | 0 | connection_get_outbuf_len(conn), |
999 | 0 | conn->marked_for_close_file, conn->marked_for_close); |
1000 | 0 | if (conn->linked_conn) { |
1001 | 0 | retval = (int) buf_move_all(conn->linked_conn->inbuf, conn->outbuf); |
1002 | 0 | if (retval >= 0) { |
1003 | | /* The linked conn will notice that it has data when it notices that |
1004 | | * we're gone. */ |
1005 | 0 | connection_start_reading_from_linked_conn(conn->linked_conn); |
1006 | 0 | } |
1007 | 0 | log_debug(LD_GENERAL, "Flushed last %d bytes from a linked conn; " |
1008 | 0 | "%d left; wants-to-flush==%d", retval, |
1009 | 0 | (int)connection_get_outbuf_len(conn), |
1010 | 0 | connection_wants_to_flush(conn)); |
1011 | 0 | } else if (connection_speaks_cells(conn)) { |
1012 | 0 | if (conn->state == OR_CONN_STATE_OPEN) { |
1013 | 0 | retval = buf_flush_to_tls(conn->outbuf, TO_OR_CONN(conn)->tls, sz); |
1014 | 0 | } else |
1015 | 0 | retval = -1; /* never flush non-open broken tls connections */ |
1016 | 0 | } else { |
1017 | 0 | retval = buf_flush_to_socket(conn->outbuf, conn->s, sz); |
1018 | 0 | } |
1019 | 0 | if (retval >= 0 && /* Technically, we could survive things like |
1020 | | TLS_WANT_WRITE here. But don't bother for now. */ |
1021 | 0 | conn->hold_open_until_flushed && connection_wants_to_flush(conn)) { |
1022 | 0 | if (retval > 0) { |
1023 | 0 | LOG_FN_CONN(conn, (LOG_INFO,LD_NET, |
1024 | 0 | "Holding conn (fd %d) open for more flushing.", |
1025 | 0 | (int)conn->s)); |
1026 | 0 | conn->timestamp_last_write_allowed = now; /* reset so we can flush |
1027 | | * more */ |
1028 | 0 | } else if (sz == 0) { |
1029 | | /* Also, retval==0. If we get here, we didn't want to write anything |
1030 | | * (because of rate-limiting) and we didn't. */ |
1031 | | |
1032 | | /* Connection must flush before closing, but it's being rate-limited. |
1033 | | * Let's remove from Libevent, and mark it as blocked on bandwidth |
1034 | | * so it will be re-added on next token bucket refill. Prevents |
1035 | | * busy Libevent loops where we keep ending up here and returning |
1036 | | * 0 until we are no longer blocked on bandwidth. |
1037 | | */ |
1038 | 0 | connection_consider_empty_write_buckets(conn); |
1039 | | /* Make sure that consider_empty_buckets really disabled the |
1040 | | * connection: */ |
1041 | 0 | if (BUG(connection_is_writing(conn))) { |
1042 | 0 | connection_write_bw_exhausted(conn, true); |
1043 | 0 | } |
1044 | | |
1045 | | /* The connection is being held due to write rate limit and thus will |
1046 | | * flush its data later. We need to stop reading because this |
1047 | | * connection is about to be closed once flushed. It should not |
1048 | | * process anything more coming in at this stage. */ |
1049 | 0 | connection_stop_reading(conn); |
1050 | 0 | } |
1051 | 0 | return 0; |
1052 | 0 | } |
1053 | 0 | if (connection_wants_to_flush(conn)) { |
1054 | 0 | log_fn(LOG_INFO, LD_NET, "We stalled too much while trying to write %d " |
1055 | 0 | "bytes to address %s. If this happens a lot, either " |
1056 | 0 | "something is wrong with your network connection, or " |
1057 | 0 | "something is wrong with theirs. " |
1058 | 0 | "(fd %d, type %s, state %d, marked at %s:%d).", |
1059 | 0 | (int)connection_get_outbuf_len(conn), |
1060 | 0 | escaped_safe_str_client(conn->address), |
1061 | 0 | (int)conn->s, conn_type_to_string(conn->type), conn->state, |
1062 | 0 | conn->marked_for_close_file, |
1063 | 0 | conn->marked_for_close); |
1064 | 0 | } |
1065 | 0 | } |
1066 | | |
1067 | 0 | connection_unlink(conn); /* unlink, remove, free */ |
1068 | 0 | return 1; |
1069 | 0 | } |
1070 | | |
1071 | | /** Implementation for directory_all_unreachable. This is done in a callback, |
1072 | | * since otherwise it would complicate Tor's control-flow graph beyond all |
1073 | | * reason. |
1074 | | */ |
1075 | | static void |
1076 | | directory_all_unreachable_cb(mainloop_event_t *event, void *arg) |
1077 | 0 | { |
1078 | 0 | (void)event; |
1079 | 0 | (void)arg; |
1080 | |
|
1081 | 0 | connection_t *conn; |
1082 | |
|
1083 | 0 | while ((conn = connection_get_by_type_state(CONN_TYPE_AP, |
1084 | 0 | AP_CONN_STATE_CIRCUIT_WAIT))) { |
1085 | 0 | entry_connection_t *entry_conn = TO_ENTRY_CONN(conn); |
1086 | 0 | log_notice(LD_NET, |
1087 | 0 | "Is your network connection down? " |
1088 | 0 | "Failing connection to '%s:%d'.", |
1089 | 0 | safe_str_client(entry_conn->socks_request->address), |
1090 | 0 | entry_conn->socks_request->port); |
1091 | 0 | connection_mark_unattached_ap(entry_conn, |
1092 | 0 | END_STREAM_REASON_NET_UNREACHABLE); |
1093 | 0 | } |
1094 | 0 | control_event_general_error("DIR_ALL_UNREACHABLE"); |
1095 | 0 | } |
1096 | | |
1097 | | static mainloop_event_t *directory_all_unreachable_cb_event = NULL; |
1098 | | |
1099 | | /** We've just tried every dirserver we know about, and none of |
1100 | | * them were reachable. Assume the network is down. Change state |
1101 | | * so next time an application connection arrives we'll delay it |
1102 | | * and try another directory fetch. Kill off all the circuit_wait |
1103 | | * streams that are waiting now, since they will all timeout anyway. |
1104 | | */ |
1105 | | void |
1106 | | directory_all_unreachable(time_t now) |
1107 | 0 | { |
1108 | 0 | (void)now; |
1109 | |
|
1110 | 0 | reset_uptime(); /* reset it */ |
1111 | |
|
1112 | 0 | if (!directory_all_unreachable_cb_event) { |
1113 | 0 | directory_all_unreachable_cb_event = |
1114 | 0 | mainloop_event_new(directory_all_unreachable_cb, NULL); |
1115 | 0 | tor_assert(directory_all_unreachable_cb_event); |
1116 | 0 | } |
1117 | | |
1118 | 0 | mainloop_event_activate(directory_all_unreachable_cb_event); |
1119 | 0 | } |
1120 | | |
1121 | | /** This function is called whenever we successfully pull down some new |
1122 | | * network statuses or server descriptors. */ |
1123 | | void |
1124 | | directory_info_has_arrived(time_t now, int from_cache, int suppress_logs) |
1125 | 0 | { |
1126 | 0 | const or_options_t *options = get_options(); |
1127 | | |
1128 | | /* if we have enough dir info, then update our guard status with |
1129 | | * whatever we just learned. */ |
1130 | 0 | int invalidate_circs = guards_update_all(); |
1131 | |
|
1132 | 0 | if (invalidate_circs) { |
1133 | 0 | circuit_mark_all_unused_circs(); |
1134 | 0 | circuit_mark_all_dirty_circs_as_unusable(); |
1135 | 0 | } |
1136 | |
|
1137 | 0 | if (!router_have_minimum_dir_info()) { |
1138 | 0 | int quiet = suppress_logs || from_cache || |
1139 | 0 | dirclient_too_idle_to_fetch_descriptors(options, now); |
1140 | 0 | tor_log(quiet ? LOG_INFO : LOG_NOTICE, LD_DIR, |
1141 | 0 | "I learned some more directory information, but not enough to " |
1142 | 0 | "build a circuit: %s", get_dir_info_status_string()); |
1143 | 0 | update_all_descriptor_downloads(now); |
1144 | 0 | return; |
1145 | 0 | } else { |
1146 | 0 | if (dirclient_fetches_from_authorities(options)) { |
1147 | 0 | update_all_descriptor_downloads(now); |
1148 | 0 | } |
1149 | | |
1150 | | /* Don't even bother trying to get extrainfo until the rest of our |
1151 | | * directory info is up-to-date */ |
1152 | 0 | if (options->DownloadExtraInfo) |
1153 | 0 | update_extrainfo_downloads(now); |
1154 | 0 | } |
1155 | | |
1156 | 0 | if (server_mode(options) && !net_is_disabled() && !from_cache && |
1157 | 0 | (have_completed_a_circuit() || !any_predicted_circuits(now))) |
1158 | 0 | router_do_reachability_checks(); |
1159 | 0 | } |
1160 | | |
1161 | | /** Perform regular maintenance tasks for a single connection. This |
1162 | | * function gets run once per second per connection by run_scheduled_events. |
1163 | | */ |
1164 | | static void |
1165 | | run_connection_housekeeping(int i, time_t now) |
1166 | 0 | { |
1167 | 0 | cell_t cell; |
1168 | 0 | connection_t *conn = smartlist_get(connection_array, i); |
1169 | 0 | const or_options_t *options = get_options(); |
1170 | 0 | or_connection_t *or_conn; |
1171 | 0 | channel_t *chan = NULL; |
1172 | 0 | int have_any_circuits; |
1173 | 0 | int past_keepalive = |
1174 | 0 | now >= conn->timestamp_last_write_allowed + options->KeepalivePeriod; |
1175 | |
|
1176 | 0 | if (conn->outbuf && !connection_get_outbuf_len(conn) && |
1177 | 0 | conn->type == CONN_TYPE_OR) |
1178 | 0 | TO_OR_CONN(conn)->timestamp_lastempty = now; |
1179 | |
|
1180 | 0 | if (conn->marked_for_close) { |
1181 | | /* nothing to do here */ |
1182 | 0 | return; |
1183 | 0 | } |
1184 | | |
1185 | | /* Expire any directory connections that haven't been active (sent |
1186 | | * if a server or received if a client) for 5 min */ |
1187 | 0 | if (conn->type == CONN_TYPE_DIR && |
1188 | 0 | ((DIR_CONN_IS_SERVER(conn) && |
1189 | 0 | conn->timestamp_last_write_allowed |
1190 | 0 | + options->TestingDirConnectionMaxStall < now) || |
1191 | 0 | (!DIR_CONN_IS_SERVER(conn) && |
1192 | 0 | conn->timestamp_last_read_allowed |
1193 | 0 | + options->TestingDirConnectionMaxStall < now))) { |
1194 | 0 | log_info(LD_DIR,"Expiring wedged directory conn (fd %d, purpose %d)", |
1195 | 0 | (int)conn->s, conn->purpose); |
1196 | | /* This check is temporary; it's to let us know whether we should consider |
1197 | | * parsing partial serverdesc responses. */ |
1198 | 0 | if (conn->purpose == DIR_PURPOSE_FETCH_SERVERDESC && |
1199 | 0 | connection_get_inbuf_len(conn) >= 1024) { |
1200 | 0 | log_info(LD_DIR,"Trying to extract information from wedged server desc " |
1201 | 0 | "download."); |
1202 | 0 | connection_dir_reached_eof(TO_DIR_CONN(conn)); |
1203 | 0 | } else { |
1204 | 0 | connection_mark_for_close(conn); |
1205 | 0 | } |
1206 | 0 | return; |
1207 | 0 | } |
1208 | | |
1209 | 0 | if (!connection_speaks_cells(conn)) |
1210 | 0 | return; /* we're all done here, the rest is just for OR conns */ |
1211 | | |
1212 | | /* If we haven't flushed to an OR connection for a while, then either nuke |
1213 | | the connection or send a keepalive, depending. */ |
1214 | | |
1215 | 0 | or_conn = TO_OR_CONN(conn); |
1216 | 0 | tor_assert(conn->outbuf); |
1217 | | |
1218 | 0 | chan = TLS_CHAN_TO_BASE(or_conn->chan); |
1219 | 0 | tor_assert(chan); |
1220 | | |
1221 | 0 | if (channel_num_circuits(chan) != 0) { |
1222 | 0 | have_any_circuits = 1; |
1223 | 0 | chan->timestamp_last_had_circuits = now; |
1224 | 0 | } else { |
1225 | 0 | have_any_circuits = 0; |
1226 | 0 | } |
1227 | |
|
1228 | 0 | if (channel_is_bad_for_new_circs(TLS_CHAN_TO_BASE(or_conn->chan)) && |
1229 | 0 | ! have_any_circuits) { |
1230 | | /* It's bad for new circuits, and has no unmarked circuits on it: |
1231 | | * mark it now. */ |
1232 | 0 | log_info(LD_OR, |
1233 | 0 | "Expiring non-used OR connection to fd %d (%s:%d) [Too old].", |
1234 | 0 | (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port); |
1235 | 0 | if (conn->state == OR_CONN_STATE_CONNECTING) |
1236 | 0 | connection_or_connect_failed(TO_OR_CONN(conn), |
1237 | 0 | END_OR_CONN_REASON_TIMEOUT, |
1238 | 0 | "Tor gave up on the connection"); |
1239 | 0 | connection_or_close_normally(TO_OR_CONN(conn), 1); |
1240 | 0 | } else if (!connection_state_is_open(conn)) { |
1241 | 0 | if (past_keepalive) { |
1242 | | /* We never managed to actually get this connection open and happy. */ |
1243 | 0 | log_info(LD_OR,"Expiring non-open OR connection to fd %d (%s:%d).", |
1244 | 0 | (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port); |
1245 | 0 | connection_or_close_normally(TO_OR_CONN(conn), 0); |
1246 | 0 | } |
1247 | 0 | } else if (we_are_hibernating() && |
1248 | 0 | ! have_any_circuits && |
1249 | 0 | !connection_get_outbuf_len(conn)) { |
1250 | | /* We're hibernating or shutting down, there's no circuits, and nothing to |
1251 | | * flush.*/ |
1252 | 0 | log_info(LD_OR,"Expiring non-used OR connection to fd %d (%s:%d) " |
1253 | 0 | "[Hibernating or exiting].", |
1254 | 0 | (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port); |
1255 | 0 | connection_or_close_normally(TO_OR_CONN(conn), 1); |
1256 | 0 | } else if (!have_any_circuits && |
1257 | 0 | now - or_conn->idle_timeout >= |
1258 | 0 | chan->timestamp_last_had_circuits) { |
1259 | 0 | log_info(LD_OR,"Expiring non-used OR connection %"PRIu64" to fd %d " |
1260 | 0 | "(%s:%d) [no circuits for %d; timeout %d; %scanonical].", |
1261 | 0 | (chan->global_identifier), |
1262 | 0 | (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port, |
1263 | 0 | (int)(now - chan->timestamp_last_had_circuits), |
1264 | 0 | or_conn->idle_timeout, |
1265 | 0 | or_conn->is_canonical ? "" : "non"); |
1266 | 0 | connection_or_close_normally(TO_OR_CONN(conn), 0); |
1267 | 0 | } else if ( |
1268 | 0 | now >= or_conn->timestamp_lastempty + options->KeepalivePeriod*10 && |
1269 | 0 | now >= |
1270 | 0 | conn->timestamp_last_write_allowed + options->KeepalivePeriod*10) { |
1271 | 0 | log_fn(LOG_PROTOCOL_WARN,LD_PROTOCOL, |
1272 | 0 | "Expiring stuck OR connection to fd %d (%s:%d). (%d bytes to " |
1273 | 0 | "flush; %d seconds since last write)", |
1274 | 0 | (int)conn->s, safe_str(fmt_and_decorate_addr(&conn->addr)), |
1275 | 0 | conn->port, (int)connection_get_outbuf_len(conn), |
1276 | 0 | (int)(now-conn->timestamp_last_write_allowed)); |
1277 | 0 | connection_or_close_normally(TO_OR_CONN(conn), 0); |
1278 | 0 | } else if (past_keepalive && !connection_get_outbuf_len(conn)) { |
1279 | | /* send a padding cell */ |
1280 | 0 | log_fn(LOG_DEBUG,LD_OR,"Sending keepalive to (%s:%d)", |
1281 | 0 | fmt_and_decorate_addr(&conn->addr), conn->port); |
1282 | 0 | memset(&cell,0,sizeof(cell_t)); |
1283 | 0 | cell.command = CELL_PADDING; |
1284 | 0 | connection_or_write_cell_to_buf(&cell, or_conn); |
1285 | 0 | } else { |
1286 | 0 | channelpadding_decide_to_pad_channel(chan); |
1287 | 0 | } |
1288 | 0 | } |
1289 | | |
1290 | | /** Honor a NEWNYM request: make future requests unlinkable to past |
1291 | | * requests. */ |
1292 | | static void |
1293 | | signewnym_impl(time_t now) |
1294 | 0 | { |
1295 | 0 | const or_options_t *options = get_options(); |
1296 | 0 | if (!proxy_mode(options)) { |
1297 | 0 | log_info(LD_CONTROL, "Ignoring SIGNAL NEWNYM because client functionality " |
1298 | 0 | "is disabled."); |
1299 | 0 | return; |
1300 | 0 | } |
1301 | | |
1302 | 0 | circuit_mark_all_dirty_circs_as_unusable(); |
1303 | 0 | addressmap_clear_transient(); |
1304 | 0 | hs_client_purge_state(); |
1305 | 0 | purge_vanguards_lite(); |
1306 | 0 | time_of_last_signewnym = now; |
1307 | 0 | signewnym_is_pending = 0; |
1308 | |
|
1309 | 0 | ++newnym_epoch; |
1310 | |
|
1311 | 0 | control_event_signal(SIGNEWNYM); |
1312 | 0 | } |
1313 | | |
1314 | | /** Callback: run a deferred signewnym. */ |
1315 | | static void |
1316 | | handle_deferred_signewnym_cb(mainloop_event_t *event, void *arg) |
1317 | 0 | { |
1318 | 0 | (void)event; |
1319 | 0 | (void)arg; |
1320 | 0 | log_info(LD_CONTROL, "Honoring delayed NEWNYM request"); |
1321 | 0 | do_signewnym(time(NULL)); |
1322 | 0 | } |
1323 | | |
1324 | | /** Either perform a signewnym or schedule one, depending on rate limiting. */ |
1325 | | void |
1326 | | do_signewnym(time_t now) |
1327 | 0 | { |
1328 | 0 | if (time_of_last_signewnym + MAX_SIGNEWNYM_RATE > now) { |
1329 | 0 | const time_t delay_sec = |
1330 | 0 | time_of_last_signewnym + MAX_SIGNEWNYM_RATE - now; |
1331 | 0 | if (! signewnym_is_pending) { |
1332 | 0 | signewnym_is_pending = 1; |
1333 | 0 | if (!handle_deferred_signewnym_ev) { |
1334 | 0 | handle_deferred_signewnym_ev = |
1335 | 0 | mainloop_event_postloop_new(handle_deferred_signewnym_cb, NULL); |
1336 | 0 | } |
1337 | 0 | const struct timeval delay_tv = { delay_sec, 0 }; |
1338 | 0 | mainloop_event_schedule(handle_deferred_signewnym_ev, &delay_tv); |
1339 | 0 | } |
1340 | 0 | log_notice(LD_CONTROL, |
1341 | 0 | "Rate limiting NEWNYM request: delaying by %d second(s)", |
1342 | 0 | (int)(delay_sec)); |
1343 | 0 | } else { |
1344 | 0 | signewnym_impl(now); |
1345 | 0 | } |
1346 | 0 | } |
1347 | | |
1348 | | /** Return the number of times that signewnym has been called. */ |
1349 | | unsigned |
1350 | | get_signewnym_epoch(void) |
1351 | 0 | { |
1352 | 0 | return newnym_epoch; |
1353 | 0 | } |
1354 | | |
1355 | | /** True iff we have initialized all the members of <b>periodic_events</b>. |
1356 | | * Used to prevent double-initialization. */ |
1357 | | static int periodic_events_initialized = 0; |
1358 | | |
1359 | | /* Declare all the timer callback functions... */ |
1360 | | #ifndef COCCI |
1361 | | #undef CALLBACK |
1362 | | #define CALLBACK(name) \ |
1363 | | static int name ## _callback(time_t, const or_options_t *) |
1364 | | |
1365 | | CALLBACK(add_entropy); |
1366 | | CALLBACK(check_expired_networkstatus); |
1367 | | CALLBACK(clean_caches); |
1368 | | CALLBACK(clean_consdiffmgr); |
1369 | | CALLBACK(fetch_networkstatus); |
1370 | | CALLBACK(heartbeat); |
1371 | | CALLBACK(hs_service); |
1372 | | CALLBACK(launch_descriptor_fetches); |
1373 | | CALLBACK(prune_old_routers); |
1374 | | CALLBACK(record_bridge_stats); |
1375 | | CALLBACK(rend_cache_failure_clean); |
1376 | | CALLBACK(reset_padding_counts); |
1377 | | CALLBACK(retry_listeners); |
1378 | | CALLBACK(rotate_x509_certificate); |
1379 | | CALLBACK(save_state); |
1380 | | CALLBACK(write_stats_file); |
1381 | | CALLBACK(control_per_second_events); |
1382 | | CALLBACK(second_elapsed); |
1383 | | CALLBACK(manage_vglite); |
1384 | | |
1385 | | #undef CALLBACK |
1386 | | |
1387 | | /* Now we declare an array of periodic_event_item_t for each periodic event */ |
1388 | | #define CALLBACK(name, r, f) \ |
1389 | | PERIODIC_EVENT(name, PERIODIC_EVENT_ROLE_ ## r, f) |
1390 | | #define FL(name) (PERIODIC_EVENT_FLAG_ ## name) |
1391 | | #endif /* !defined(COCCI) */ |
1392 | | |
1393 | | STATIC periodic_event_item_t mainloop_periodic_events[] = { |
1394 | | |
1395 | | /* Everyone needs to run these. They need to have very long timeouts for |
1396 | | * that to be safe. */ |
1397 | | CALLBACK(add_entropy, ALL, 0), |
1398 | | CALLBACK(heartbeat, ALL, 0), |
1399 | | CALLBACK(reset_padding_counts, ALL, 0), |
1400 | | |
1401 | | /* This is a legacy catch-all callback that runs once per second if |
1402 | | * we are online and active. */ |
1403 | | CALLBACK(second_elapsed, NET_PARTICIPANT, |
1404 | | FL(RUN_ON_DISABLE)), |
1405 | | |
1406 | | /* Update vanguards-lite once per hour, if we have networking */ |
1407 | | CALLBACK(manage_vglite, NET_PARTICIPANT, FL(NEED_NET)), |
1408 | | |
1409 | | /* XXXX Do we have a reason to do this on a callback? Does it do any good at |
1410 | | * all? For now, if we're dormant, we can let our listeners decay. */ |
1411 | | CALLBACK(retry_listeners, NET_PARTICIPANT, FL(NEED_NET)), |
1412 | | |
1413 | | /* We need to do these if we're participating in the Tor network. */ |
1414 | | CALLBACK(check_expired_networkstatus, NET_PARTICIPANT, 0), |
1415 | | CALLBACK(fetch_networkstatus, NET_PARTICIPANT, 0), |
1416 | | CALLBACK(launch_descriptor_fetches, NET_PARTICIPANT, FL(NEED_NET)), |
1417 | | CALLBACK(rotate_x509_certificate, NET_PARTICIPANT, 0), |
1418 | | CALLBACK(check_network_participation, NET_PARTICIPANT, 0), |
1419 | | |
1420 | | /* We need to do these if we're participating in the Tor network, and |
1421 | | * immediately before we stop. */ |
1422 | | CALLBACK(clean_caches, NET_PARTICIPANT, FL(RUN_ON_DISABLE)), |
1423 | | CALLBACK(save_state, NET_PARTICIPANT, FL(RUN_ON_DISABLE)), |
1424 | | CALLBACK(write_stats_file, NET_PARTICIPANT, FL(RUN_ON_DISABLE)), |
1425 | | CALLBACK(prune_old_routers, NET_PARTICIPANT, FL(RUN_ON_DISABLE)), |
1426 | | |
1427 | | /* Hidden Service service only. */ |
1428 | | CALLBACK(hs_service, HS_SERVICE, FL(NEED_NET)), // XXXX break this down more |
1429 | | |
1430 | | /* Bridge only. */ |
1431 | | CALLBACK(record_bridge_stats, BRIDGE, 0), |
1432 | | |
1433 | | /* Client only. */ |
1434 | | /* XXXX this could be restricted to CLIENT+NET_PARTICIPANT */ |
1435 | | CALLBACK(rend_cache_failure_clean, NET_PARTICIPANT, FL(RUN_ON_DISABLE)), |
1436 | | |
1437 | | /* Directory server only. */ |
1438 | | CALLBACK(clean_consdiffmgr, DIRSERVER, 0), |
1439 | | |
1440 | | /* Controller with per-second events only. */ |
1441 | | CALLBACK(control_per_second_events, CONTROLEV, 0), |
1442 | | |
1443 | | END_OF_PERIODIC_EVENTS |
1444 | | }; |
1445 | | #ifndef COCCI |
1446 | | #undef CALLBACK |
1447 | | #undef FL |
1448 | | #endif |
1449 | | |
1450 | | /* These are pointers to members of periodic_events[] that are used to |
1451 | | * implement particular callbacks. We keep them separate here so that we |
1452 | | * can access them by name. We also keep them inside periodic_events[] |
1453 | | * so that we can implement "reset all timers" in a reasonable way. */ |
1454 | | static periodic_event_item_t *fetch_networkstatus_event=NULL; |
1455 | | static periodic_event_item_t *launch_descriptor_fetches_event=NULL; |
1456 | | static periodic_event_item_t *check_dns_honesty_event=NULL; |
1457 | | static periodic_event_item_t *save_state_event=NULL; |
1458 | | static periodic_event_item_t *prune_old_routers_event=NULL; |
1459 | | |
1460 | | /** Reset all the periodic events so we'll do all our actions again as if we |
1461 | | * just started up. |
1462 | | * Useful if our clock just moved back a long time from the future, |
1463 | | * so we don't wait until that future arrives again before acting. |
1464 | | */ |
1465 | | void |
1466 | | reset_all_main_loop_timers(void) |
1467 | 0 | { |
1468 | 0 | periodic_events_reset_all(); |
1469 | 0 | } |
1470 | | |
1471 | | /** Return a bitmask of the roles this tor instance is configured for using |
1472 | | * the given options. */ |
1473 | | STATIC int |
1474 | | get_my_roles(const or_options_t *options) |
1475 | 0 | { |
1476 | 0 | tor_assert(options); |
1477 | | |
1478 | 0 | int roles = PERIODIC_EVENT_ROLE_ALL; |
1479 | 0 | int is_bridge = options->BridgeRelay; |
1480 | 0 | int is_relay = server_mode(options); |
1481 | 0 | int is_dirauth = authdir_mode_v3(options); |
1482 | 0 | int is_bridgeauth = authdir_mode_bridge(options); |
1483 | 0 | int is_hidden_service = !!hs_service_get_num_services(); |
1484 | 0 | int is_dirserver = dir_server_mode(options); |
1485 | 0 | int sending_control_events = control_any_per_second_event_enabled(); |
1486 | | |
1487 | | /* We also consider tor to have the role of a client if the ControlPort is |
1488 | | * set because a lot of things can be done over the control port which |
1489 | | * requires tor to have basic functionalities. */ |
1490 | 0 | int is_client = options_any_client_port_set(options) || |
1491 | 0 | options->ControlPort_set || |
1492 | 0 | options->OwningControllerFD != UINT64_MAX; |
1493 | |
|
1494 | 0 | int is_net_participant = is_participating_on_network() || |
1495 | 0 | is_relay || is_hidden_service; |
1496 | |
|
1497 | 0 | if (is_bridge) roles |= PERIODIC_EVENT_ROLE_BRIDGE; |
1498 | 0 | if (is_client) roles |= PERIODIC_EVENT_ROLE_CLIENT; |
1499 | 0 | if (is_relay) roles |= PERIODIC_EVENT_ROLE_RELAY; |
1500 | 0 | if (is_dirauth) roles |= PERIODIC_EVENT_ROLE_DIRAUTH; |
1501 | 0 | if (is_bridgeauth) roles |= PERIODIC_EVENT_ROLE_BRIDGEAUTH; |
1502 | 0 | if (is_hidden_service) roles |= PERIODIC_EVENT_ROLE_HS_SERVICE; |
1503 | 0 | if (is_dirserver) roles |= PERIODIC_EVENT_ROLE_DIRSERVER; |
1504 | 0 | if (is_net_participant) roles |= PERIODIC_EVENT_ROLE_NET_PARTICIPANT; |
1505 | 0 | if (sending_control_events) roles |= PERIODIC_EVENT_ROLE_CONTROLEV; |
1506 | |
|
1507 | 0 | return roles; |
1508 | 0 | } |
1509 | | |
1510 | | /** Event to run initialize_periodic_events_cb */ |
1511 | | static struct event *initialize_periodic_events_event = NULL; |
1512 | | |
1513 | | /** Helper, run one second after setup: |
1514 | | * Initializes all members of periodic_events and starts them running. |
1515 | | * |
1516 | | * (We do this one second after setup for backward-compatibility reasons; |
1517 | | * it might not actually be necessary.) */ |
1518 | | static void |
1519 | | initialize_periodic_events_cb(evutil_socket_t fd, short events, void *data) |
1520 | 0 | { |
1521 | 0 | (void) fd; |
1522 | 0 | (void) events; |
1523 | 0 | (void) data; |
1524 | |
|
1525 | 0 | tor_event_free(initialize_periodic_events_event); |
1526 | |
|
1527 | 0 | rescan_periodic_events(get_options()); |
1528 | 0 | } |
1529 | | |
1530 | | /** Set up all the members of mainloop_periodic_events[], and configure them |
1531 | | * all to be launched from a callback. */ |
1532 | | void |
1533 | | initialize_periodic_events(void) |
1534 | 0 | { |
1535 | 0 | if (periodic_events_initialized) |
1536 | 0 | return; |
1537 | | |
1538 | 0 | periodic_events_initialized = 1; |
1539 | |
|
1540 | 0 | for (int i = 0; mainloop_periodic_events[i].name; ++i) { |
1541 | 0 | periodic_events_register(&mainloop_periodic_events[i]); |
1542 | 0 | } |
1543 | | |
1544 | | /* Set up all periodic events. We'll launch them by roles. */ |
1545 | |
|
1546 | 0 | #ifndef COCCI |
1547 | 0 | #define NAMED_CALLBACK(name) \ |
1548 | 0 | STMT_BEGIN name ## _event = periodic_events_find( #name ); STMT_END |
1549 | 0 | #endif |
1550 | |
|
1551 | 0 | NAMED_CALLBACK(prune_old_routers); |
1552 | 0 | NAMED_CALLBACK(fetch_networkstatus); |
1553 | 0 | NAMED_CALLBACK(launch_descriptor_fetches); |
1554 | 0 | NAMED_CALLBACK(check_dns_honesty); |
1555 | 0 | NAMED_CALLBACK(save_state); |
1556 | 0 | } |
1557 | | |
1558 | | STATIC void |
1559 | | teardown_periodic_events(void) |
1560 | 0 | { |
1561 | 0 | periodic_events_disconnect_all(); |
1562 | 0 | fetch_networkstatus_event = NULL; |
1563 | 0 | launch_descriptor_fetches_event = NULL; |
1564 | 0 | check_dns_honesty_event = NULL; |
1565 | 0 | save_state_event = NULL; |
1566 | 0 | prune_old_routers_event = NULL; |
1567 | 0 | periodic_events_initialized = 0; |
1568 | 0 | } |
1569 | | |
1570 | | static mainloop_event_t *rescan_periodic_events_ev = NULL; |
1571 | | |
1572 | | /** Callback: rescan the periodic event list. */ |
1573 | | static void |
1574 | | rescan_periodic_events_cb(mainloop_event_t *event, void *arg) |
1575 | 0 | { |
1576 | 0 | (void)event; |
1577 | 0 | (void)arg; |
1578 | 0 | rescan_periodic_events(get_options()); |
1579 | 0 | } |
1580 | | |
1581 | | /** |
1582 | | * Schedule an event that will rescan which periodic events should run. |
1583 | | **/ |
1584 | | MOCK_IMPL(void, |
1585 | | schedule_rescan_periodic_events,(void)) |
1586 | 0 | { |
1587 | 0 | if (!rescan_periodic_events_ev) { |
1588 | 0 | rescan_periodic_events_ev = |
1589 | 0 | mainloop_event_new(rescan_periodic_events_cb, NULL); |
1590 | 0 | } |
1591 | 0 | mainloop_event_activate(rescan_periodic_events_ev); |
1592 | 0 | } |
1593 | | |
1594 | | /** Do a pass at all our periodic events, disable those we don't need anymore |
1595 | | * and enable those we need now using the given options. */ |
1596 | | void |
1597 | | rescan_periodic_events(const or_options_t *options) |
1598 | 0 | { |
1599 | 0 | tor_assert(options); |
1600 | | |
1601 | 0 | periodic_events_rescan_by_roles(get_my_roles(options), net_is_disabled()); |
1602 | 0 | } |
1603 | | |
1604 | | /* We just got new options globally set, see if we need to enabled or disable |
1605 | | * periodic events. */ |
1606 | | void |
1607 | | periodic_events_on_new_options(const or_options_t *options) |
1608 | 0 | { |
1609 | 0 | rescan_periodic_events(options); |
1610 | 0 | } |
1611 | | |
1612 | | /** |
1613 | | * Update our schedule so that we'll check whether we need to fetch directory |
1614 | | * info immediately. |
1615 | | */ |
1616 | | void |
1617 | | reschedule_directory_downloads(void) |
1618 | 0 | { |
1619 | 0 | tor_assert(fetch_networkstatus_event); |
1620 | 0 | tor_assert(launch_descriptor_fetches_event); |
1621 | | |
1622 | 0 | periodic_event_reschedule(fetch_networkstatus_event); |
1623 | 0 | periodic_event_reschedule(launch_descriptor_fetches_event); |
1624 | 0 | } |
1625 | | |
1626 | | /** Mainloop callback: clean up circuits, channels, and connections |
1627 | | * that are pending close. */ |
1628 | | static void |
1629 | | postloop_cleanup_cb(mainloop_event_t *ev, void *arg) |
1630 | 0 | { |
1631 | 0 | (void)ev; |
1632 | 0 | (void)arg; |
1633 | 0 | circuit_close_all_marked(); |
1634 | 0 | close_closeable_connections(); |
1635 | 0 | channel_run_cleanup(); |
1636 | 0 | channel_listener_run_cleanup(); |
1637 | 0 | } |
1638 | | |
1639 | | /** Event to run postloop_cleanup_cb */ |
1640 | | static mainloop_event_t *postloop_cleanup_ev=NULL; |
1641 | | |
1642 | | /** Schedule a post-loop event to clean up marked channels, connections, and |
1643 | | * circuits. */ |
1644 | | void |
1645 | | mainloop_schedule_postloop_cleanup(void) |
1646 | 0 | { |
1647 | 0 | if (PREDICT_UNLIKELY(postloop_cleanup_ev == NULL)) { |
1648 | | // (It's possible that we can get here if we decide to close a connection |
1649 | | // in the earliest stages of our configuration, before we create events.) |
1650 | 0 | return; |
1651 | 0 | } |
1652 | 0 | mainloop_event_activate(postloop_cleanup_ev); |
1653 | 0 | } |
1654 | | |
1655 | | /** Event to run 'scheduled_shutdown_cb' */ |
1656 | | static mainloop_event_t *scheduled_shutdown_ev=NULL; |
1657 | | |
1658 | | /** Callback: run a scheduled shutdown */ |
1659 | | static void |
1660 | | scheduled_shutdown_cb(mainloop_event_t *ev, void *arg) |
1661 | 0 | { |
1662 | 0 | (void)ev; |
1663 | 0 | (void)arg; |
1664 | 0 | log_notice(LD_GENERAL, "Clean shutdown finished. Exiting."); |
1665 | 0 | tor_shutdown_event_loop_and_exit(0); |
1666 | 0 | } |
1667 | | |
1668 | | /** Schedule the mainloop to exit after <b>delay_sec</b> seconds. */ |
1669 | | void |
1670 | | mainloop_schedule_shutdown(int delay_sec) |
1671 | 0 | { |
1672 | 0 | const struct timeval delay_tv = { delay_sec, 0 }; |
1673 | 0 | if (! scheduled_shutdown_ev) { |
1674 | 0 | scheduled_shutdown_ev = mainloop_event_new(scheduled_shutdown_cb, NULL); |
1675 | 0 | } |
1676 | 0 | mainloop_event_schedule(scheduled_shutdown_ev, &delay_tv); |
1677 | 0 | } |
1678 | | |
1679 | | /** |
1680 | | * Update vanguards-lite layer2 nodes, once every 15 minutes |
1681 | | */ |
1682 | | static int |
1683 | | manage_vglite_callback(time_t now, const or_options_t *options) |
1684 | 0 | { |
1685 | 0 | (void)now; |
1686 | 0 | (void)options; |
1687 | 0 | #define VANGUARDS_LITE_INTERVAL (15*60) |
1688 | |
|
1689 | 0 | maintain_layer2_guards(); |
1690 | |
|
1691 | 0 | return VANGUARDS_LITE_INTERVAL; |
1692 | 0 | } |
1693 | | |
1694 | | /** Perform regular maintenance tasks. This function gets run once per |
1695 | | * second. |
1696 | | */ |
1697 | | static int |
1698 | | second_elapsed_callback(time_t now, const or_options_t *options) |
1699 | 0 | { |
1700 | | /* 0. See if our bandwidth limits are exhausted and we should hibernate |
1701 | | * |
1702 | | * Note: we have redundant mechanisms to handle the case where it's |
1703 | | * time to wake up from hibernation; or where we have a scheduled |
1704 | | * shutdown and it's time to run it, but this will also handle those. |
1705 | | */ |
1706 | 0 | consider_hibernation(now); |
1707 | | |
1708 | | /* Maybe enough time elapsed for us to reconsider a circuit. */ |
1709 | 0 | circuit_upgrade_circuits_from_guard_wait(); |
1710 | |
|
1711 | 0 | if (options->UseBridges && !net_is_disabled()) { |
1712 | | /* Note: this check uses net_is_disabled(), not should_delay_dir_fetches() |
1713 | | * -- the latter is only for fetching consensus-derived directory info. */ |
1714 | | // TODO: client |
1715 | | // Also, schedule this rather than probing 1x / sec |
1716 | 0 | fetch_bridge_descriptors(options, now); |
1717 | 0 | } |
1718 | |
|
1719 | 0 | if (accounting_is_enabled(options)) { |
1720 | | // TODO: refactor or rewrite? |
1721 | 0 | accounting_run_housekeeping(now); |
1722 | 0 | } |
1723 | | |
1724 | | /* 3a. Every second, we examine pending circuits and prune the |
1725 | | * ones which have been pending for more than a few seconds. |
1726 | | * We do this before step 4, so it can try building more if |
1727 | | * it's not comfortable with the number of available circuits. |
1728 | | */ |
1729 | | /* (If our circuit build timeout can ever become lower than a second (which |
1730 | | * it can't, currently), we should do this more often.) */ |
1731 | | // TODO: All expire stuff can become NET_PARTICIPANT, RUN_ON_DISABLE |
1732 | 0 | circuit_expire_building(); |
1733 | 0 | circuit_expire_waiting_for_better_guard(); |
1734 | | |
1735 | | /* 3b. Also look at pending streams and prune the ones that 'began' |
1736 | | * a long time ago but haven't gotten a 'connected' yet. |
1737 | | * Do this before step 4, so we can put them back into pending |
1738 | | * state to be picked up by the new circuit. |
1739 | | */ |
1740 | 0 | connection_ap_expire_beginning(); |
1741 | | |
1742 | | /* 3c. And expire connections that we've held open for too long. |
1743 | | */ |
1744 | 0 | connection_expire_held_open(); |
1745 | | |
1746 | | /* 4. Every second, we try a new circuit if there are no valid |
1747 | | * circuits. Every NewCircuitPeriod seconds, we expire circuits |
1748 | | * that became dirty more than MaxCircuitDirtiness seconds ago, |
1749 | | * and we make a new circ if there are no clean circuits. |
1750 | | */ |
1751 | 0 | const int have_dir_info = router_have_minimum_dir_info(); |
1752 | 0 | if (have_dir_info && !net_is_disabled()) { |
1753 | 0 | circuit_build_needed_circs(now); |
1754 | 0 | } else { |
1755 | 0 | circuit_expire_old_circs_as_needed(now); |
1756 | 0 | } |
1757 | | |
1758 | | /* 5. We do housekeeping for each connection... */ |
1759 | 0 | channel_update_bad_for_new_circs(NULL, 0); |
1760 | 0 | int i; |
1761 | 0 | for (i=0;i<smartlist_len(connection_array);i++) { |
1762 | 0 | run_connection_housekeeping(i, now); |
1763 | 0 | } |
1764 | | |
1765 | | /* Run again in a second. */ |
1766 | 0 | return 1; |
1767 | 0 | } |
1768 | | |
1769 | | /** |
1770 | | * Periodic callback: Every {LAZY,GREEDY}_DESCRIPTOR_RETRY_INTERVAL, |
1771 | | * see about fetching descriptors, microdescriptors, and extrainfo |
1772 | | * documents. |
1773 | | */ |
1774 | | static int |
1775 | | launch_descriptor_fetches_callback(time_t now, const or_options_t *options) |
1776 | 0 | { |
1777 | 0 | if (should_delay_dir_fetches(options, NULL)) |
1778 | 0 | return PERIODIC_EVENT_NO_UPDATE; |
1779 | | |
1780 | 0 | update_all_descriptor_downloads(now); |
1781 | 0 | update_extrainfo_downloads(now); |
1782 | 0 | if (router_have_minimum_dir_info()) |
1783 | 0 | return LAZY_DESCRIPTOR_RETRY_INTERVAL; |
1784 | 0 | else |
1785 | 0 | return GREEDY_DESCRIPTOR_RETRY_INTERVAL; |
1786 | 0 | } |
1787 | | |
1788 | | /** |
1789 | | * Periodic event: Rotate our X.509 certificates and TLS keys once every |
1790 | | * MAX_SSL_KEY_LIFETIME_INTERNAL. |
1791 | | */ |
1792 | | static int |
1793 | | rotate_x509_certificate_callback(time_t now, const or_options_t *options) |
1794 | 0 | { |
1795 | 0 | static int first = 1; |
1796 | 0 | (void)now; |
1797 | 0 | (void)options; |
1798 | 0 | if (first) { |
1799 | 0 | first = 0; |
1800 | 0 | return MAX_SSL_KEY_LIFETIME_INTERNAL; |
1801 | 0 | } |
1802 | | |
1803 | | /* 1b. Every MAX_SSL_KEY_LIFETIME_INTERNAL seconds, we change our |
1804 | | * TLS context. */ |
1805 | 0 | log_info(LD_GENERAL,"Rotating tls context."); |
1806 | 0 | if (router_initialize_tls_context() < 0) { |
1807 | 0 | log_err(LD_BUG, "Error reinitializing TLS context"); |
1808 | 0 | tor_assert_unreached(); |
1809 | 0 | } |
1810 | 0 | if (generate_ed_link_cert(options, now, 1)) { |
1811 | 0 | log_err(LD_OR, "Unable to update Ed25519->TLS link certificate for " |
1812 | 0 | "new TLS context."); |
1813 | 0 | tor_assert_unreached(); |
1814 | 0 | } |
1815 | | |
1816 | | /* We also make sure to rotate the TLS connections themselves if they've |
1817 | | * been up for too long -- but that's done via is_bad_for_new_circs in |
1818 | | * run_connection_housekeeping() above. */ |
1819 | 0 | return MAX_SSL_KEY_LIFETIME_INTERNAL; |
1820 | 0 | } |
1821 | | |
1822 | | /** |
1823 | | * Periodic callback: once an hour, grab some more entropy from the |
1824 | | * kernel and feed it to our CSPRNG. |
1825 | | **/ |
1826 | | static int |
1827 | | add_entropy_callback(time_t now, const or_options_t *options) |
1828 | 0 | { |
1829 | 0 | (void)now; |
1830 | 0 | (void)options; |
1831 | | /* We already seeded once, so don't die on failure. */ |
1832 | 0 | if (crypto_seed_rng() < 0) { |
1833 | 0 | log_warn(LD_GENERAL, "Tried to re-seed RNG, but failed. We already " |
1834 | 0 | "seeded once, though, so we won't exit here."); |
1835 | 0 | } |
1836 | | |
1837 | | /** How often do we add more entropy to OpenSSL's RNG pool? */ |
1838 | 0 | #define ENTROPY_INTERVAL (60*60) |
1839 | 0 | return ENTROPY_INTERVAL; |
1840 | 0 | } |
1841 | | |
1842 | | /** Periodic callback: if there has been no network usage in a while, |
1843 | | * enter a dormant state. */ |
1844 | | STATIC int |
1845 | | check_network_participation_callback(time_t now, const or_options_t *options) |
1846 | 0 | { |
1847 | | /* If we're a server, we can't become dormant. */ |
1848 | 0 | if (server_mode(options)) { |
1849 | 0 | goto found_activity; |
1850 | 0 | } |
1851 | | |
1852 | | /* If we aren't allowed to become dormant, then participation doesn't |
1853 | | matter */ |
1854 | 0 | if (! options->DormantTimeoutEnabled) { |
1855 | 0 | goto found_activity; |
1856 | 0 | } |
1857 | | |
1858 | | /* If we're running an onion service, we can't become dormant. */ |
1859 | | /* XXXX this would be nice to change, so that we can be dormant with a |
1860 | | * service. */ |
1861 | 0 | if (hs_service_get_num_services()) { |
1862 | 0 | goto found_activity; |
1863 | 0 | } |
1864 | | |
1865 | | /* If we have any currently open entry streams other than "linked" |
1866 | | * connections used for directory requests, those count as user activity. |
1867 | | */ |
1868 | 0 | if (options->DormantTimeoutDisabledByIdleStreams) { |
1869 | 0 | if (connection_get_by_type_nonlinked(CONN_TYPE_AP) != NULL) { |
1870 | 0 | goto found_activity; |
1871 | 0 | } |
1872 | 0 | } |
1873 | | |
1874 | | /* XXXX Make this configurable? */ |
1875 | | /** How often do we check whether we have had network activity? */ |
1876 | 0 | #define CHECK_PARTICIPATION_INTERVAL (5*60) |
1877 | | |
1878 | | /* Become dormant if there has been no user activity in a long time. |
1879 | | * (The funny checks below are in order to prevent overflow.) */ |
1880 | 0 | time_t time_since_last_activity = 0; |
1881 | 0 | if (get_last_user_activity_time() < now) |
1882 | 0 | time_since_last_activity = now - get_last_user_activity_time(); |
1883 | 0 | if (time_since_last_activity >= options->DormantClientTimeout) { |
1884 | 0 | log_notice(LD_GENERAL, "No user activity in a long time: becoming" |
1885 | 0 | " dormant."); |
1886 | 0 | set_network_participation(false); |
1887 | 0 | rescan_periodic_events(options); |
1888 | 0 | } |
1889 | |
|
1890 | 0 | return CHECK_PARTICIPATION_INTERVAL; |
1891 | | |
1892 | 0 | found_activity: |
1893 | 0 | note_user_activity(now); |
1894 | 0 | return CHECK_PARTICIPATION_INTERVAL; |
1895 | 0 | } |
1896 | | |
1897 | | /** |
1898 | | * Periodic callback: If our consensus is too old, recalculate whether |
1899 | | * we can actually use it. |
1900 | | */ |
1901 | | static int |
1902 | | check_expired_networkstatus_callback(time_t now, const or_options_t *options) |
1903 | 0 | { |
1904 | 0 | (void)options; |
1905 | | /* Check whether our networkstatus has expired. */ |
1906 | 0 | networkstatus_t *ns = networkstatus_get_latest_consensus(); |
1907 | | /* Use reasonably live consensuses until they are no longer reasonably live. |
1908 | | */ |
1909 | 0 | if (ns && !networkstatus_consensus_reasonably_live(ns, now) && |
1910 | 0 | router_have_minimum_dir_info()) { |
1911 | 0 | router_dir_info_changed(); |
1912 | 0 | } |
1913 | 0 | #define CHECK_EXPIRED_NS_INTERVAL (2*60) |
1914 | 0 | return CHECK_EXPIRED_NS_INTERVAL; |
1915 | 0 | } |
1916 | | |
1917 | | /** |
1918 | | * Scheduled callback: Save the state file to disk if appropriate. |
1919 | | */ |
1920 | | static int |
1921 | | save_state_callback(time_t now, const or_options_t *options) |
1922 | 0 | { |
1923 | 0 | (void) options; |
1924 | 0 | (void) or_state_save(now); // only saves if appropriate |
1925 | 0 | const time_t next_write = get_or_state()->next_write; |
1926 | 0 | if (next_write == TIME_MAX) { |
1927 | 0 | return 86400; |
1928 | 0 | } |
1929 | 0 | return safe_timer_diff(now, next_write); |
1930 | 0 | } |
1931 | | |
1932 | | /** Reschedule the event for saving the state file. |
1933 | | * |
1934 | | * Run this when the state becomes dirty. */ |
1935 | | void |
1936 | | reschedule_or_state_save(void) |
1937 | 0 | { |
1938 | 0 | if (save_state_event == NULL) { |
1939 | | /* This can happen early on during startup. */ |
1940 | 0 | return; |
1941 | 0 | } |
1942 | 0 | periodic_event_reschedule(save_state_event); |
1943 | 0 | } |
1944 | | |
1945 | | /** |
1946 | | * Periodic callback: Write statistics to disk if appropriate. |
1947 | | */ |
1948 | | static int |
1949 | | write_stats_file_callback(time_t now, const or_options_t *options) |
1950 | 0 | { |
1951 | | /* 1g. Check whether we should write statistics to disk. |
1952 | | */ |
1953 | 0 | #define CHECK_WRITE_STATS_INTERVAL (60*60) |
1954 | 0 | time_t next_time_to_write_stats_files = now + CHECK_WRITE_STATS_INTERVAL; |
1955 | 0 | if (options->CellStatistics) { |
1956 | 0 | time_t next_write = |
1957 | 0 | rep_hist_buffer_stats_write(now); |
1958 | 0 | if (next_write && next_write < next_time_to_write_stats_files) |
1959 | 0 | next_time_to_write_stats_files = next_write; |
1960 | 0 | } |
1961 | 0 | if (options->DirReqStatistics) { |
1962 | 0 | time_t next_write = geoip_dirreq_stats_write(now); |
1963 | 0 | if (next_write && next_write < next_time_to_write_stats_files) |
1964 | 0 | next_time_to_write_stats_files = next_write; |
1965 | 0 | } |
1966 | 0 | if (options->EntryStatistics) { |
1967 | 0 | time_t next_write = geoip_entry_stats_write(now); |
1968 | 0 | if (next_write && next_write < next_time_to_write_stats_files) |
1969 | 0 | next_time_to_write_stats_files = next_write; |
1970 | 0 | } |
1971 | 0 | if (options->HiddenServiceStatistics) { |
1972 | 0 | time_t next_write = rep_hist_hs_stats_write(now, false); |
1973 | 0 | if (next_write && next_write < next_time_to_write_stats_files) |
1974 | 0 | next_time_to_write_stats_files = next_write; |
1975 | |
|
1976 | 0 | next_write = rep_hist_hs_stats_write(now, true); |
1977 | 0 | if (next_write && next_write < next_time_to_write_stats_files) |
1978 | 0 | next_time_to_write_stats_files = next_write; |
1979 | 0 | } |
1980 | 0 | if (options->ExitPortStatistics) { |
1981 | 0 | time_t next_write = rep_hist_exit_stats_write(now); |
1982 | 0 | if (next_write && next_write < next_time_to_write_stats_files) |
1983 | 0 | next_time_to_write_stats_files = next_write; |
1984 | 0 | } |
1985 | 0 | if (options->ConnDirectionStatistics) { |
1986 | 0 | time_t next_write = conn_stats_save(now); |
1987 | 0 | if (next_write && next_write < next_time_to_write_stats_files) |
1988 | 0 | next_time_to_write_stats_files = next_write; |
1989 | 0 | } |
1990 | 0 | if (options->BridgeAuthoritativeDir) { |
1991 | 0 | time_t next_write = rep_hist_desc_stats_write(now); |
1992 | 0 | if (next_write && next_write < next_time_to_write_stats_files) |
1993 | 0 | next_time_to_write_stats_files = next_write; |
1994 | 0 | } |
1995 | |
|
1996 | 0 | return safe_timer_diff(now, next_time_to_write_stats_files); |
1997 | 0 | } |
1998 | | |
1999 | | static int |
2000 | | reset_padding_counts_callback(time_t now, const or_options_t *options) |
2001 | 0 | { |
2002 | 0 | if (options->PaddingStatistics) { |
2003 | 0 | rep_hist_prep_published_padding_counts(now); |
2004 | 0 | } |
2005 | |
|
2006 | 0 | rep_hist_reset_padding_counts(); |
2007 | 0 | return REPHIST_CELL_PADDING_COUNTS_INTERVAL; |
2008 | 0 | } |
2009 | | |
2010 | | static int should_init_bridge_stats = 1; |
2011 | | |
2012 | | /** |
2013 | | * Periodic callback: Write bridge statistics to disk if appropriate. |
2014 | | */ |
2015 | | static int |
2016 | | record_bridge_stats_callback(time_t now, const or_options_t *options) |
2017 | 0 | { |
2018 | | /* 1h. Check whether we should write bridge statistics to disk. |
2019 | | */ |
2020 | 0 | if (should_record_bridge_info(options)) { |
2021 | 0 | if (should_init_bridge_stats) { |
2022 | | /* (Re-)initialize bridge statistics. */ |
2023 | 0 | geoip_bridge_stats_init(now); |
2024 | 0 | should_init_bridge_stats = 0; |
2025 | 0 | return WRITE_STATS_INTERVAL; |
2026 | 0 | } else { |
2027 | | /* Possibly write bridge statistics to disk and ask when to write |
2028 | | * them next time. */ |
2029 | 0 | time_t next = geoip_bridge_stats_write(now); |
2030 | 0 | return safe_timer_diff(now, next); |
2031 | 0 | } |
2032 | 0 | } else if (!should_init_bridge_stats) { |
2033 | | /* Bridge mode was turned off. Ensure that stats are re-initialized |
2034 | | * next time bridge mode is turned on. */ |
2035 | 0 | should_init_bridge_stats = 1; |
2036 | 0 | } |
2037 | 0 | return PERIODIC_EVENT_NO_UPDATE; |
2038 | 0 | } |
2039 | | |
2040 | | /** |
2041 | | * Periodic callback: Clean in-memory caches every once in a while |
2042 | | */ |
2043 | | static int |
2044 | | clean_caches_callback(time_t now, const or_options_t *options) |
2045 | 0 | { |
2046 | | /* Remove old information from rephist and the rend cache. */ |
2047 | 0 | rep_history_clean(now - options->RephistTrackTime); |
2048 | 0 | hs_cache_clean_as_client(now); |
2049 | 0 | hs_cache_clean_as_dir(now); |
2050 | 0 | microdesc_cache_rebuild(NULL, 0); |
2051 | 0 | #define CLEAN_CACHES_INTERVAL (30*60) |
2052 | 0 | return CLEAN_CACHES_INTERVAL; |
2053 | 0 | } |
2054 | | |
2055 | | /** |
2056 | | * Periodic callback: Clean the cache of failed hidden service lookups |
2057 | | * frequently. |
2058 | | */ |
2059 | | static int |
2060 | | rend_cache_failure_clean_callback(time_t now, const or_options_t *options) |
2061 | 0 | { |
2062 | 0 | (void)options; |
2063 | | /* We don't keep entries that are more than five minutes old so we try to |
2064 | | * clean it as soon as we can since we want to make sure the client waits |
2065 | | * as little as possible for reachability reasons. */ |
2066 | 0 | hs_cache_client_intro_state_clean(now); |
2067 | 0 | return 30; |
2068 | 0 | } |
2069 | | |
2070 | | /** |
2071 | | * Periodic callback: prune routerlist of old information about Tor network. |
2072 | | */ |
2073 | | static int |
2074 | | prune_old_routers_callback(time_t now, const or_options_t *options) |
2075 | 0 | { |
2076 | 0 | #define ROUTERLIST_PRUNING_INTERVAL (60*60) // 1 hour. |
2077 | 0 | (void)now; |
2078 | 0 | (void)options; |
2079 | |
|
2080 | 0 | if (!net_is_disabled()) { |
2081 | | /* If any networkstatus documents are no longer recent, we need to |
2082 | | * update all the descriptors' running status. */ |
2083 | | /* Remove dead routers. */ |
2084 | 0 | log_debug(LD_GENERAL, "Pruning routerlist..."); |
2085 | 0 | routerlist_remove_old_routers(); |
2086 | 0 | } |
2087 | |
|
2088 | 0 | return ROUTERLIST_PRUNING_INTERVAL; |
2089 | 0 | } |
2090 | | |
2091 | | /** |
2092 | | * Periodic event: once a minute, (or every second if TestingTorNetwork, or |
2093 | | * during client bootstrap), check whether we want to download any |
2094 | | * networkstatus documents. */ |
2095 | | static int |
2096 | | fetch_networkstatus_callback(time_t now, const or_options_t *options) |
2097 | 0 | { |
2098 | | /* How often do we check whether we should download network status |
2099 | | * documents? */ |
2100 | 0 | const int we_are_bootstrapping = networkstatus_consensus_is_bootstrapping( |
2101 | 0 | now); |
2102 | 0 | const int prefer_mirrors = !dirclient_fetches_from_authorities( |
2103 | 0 | get_options()); |
2104 | 0 | int networkstatus_dl_check_interval = 60; |
2105 | | /* check more often when testing, or when bootstrapping from mirrors |
2106 | | * (connection limits prevent too many connections being made) */ |
2107 | 0 | if (options->TestingTorNetwork |
2108 | 0 | || (we_are_bootstrapping && prefer_mirrors)) { |
2109 | 0 | networkstatus_dl_check_interval = 1; |
2110 | 0 | } |
2111 | |
|
2112 | 0 | if (should_delay_dir_fetches(options, NULL)) |
2113 | 0 | return PERIODIC_EVENT_NO_UPDATE; |
2114 | | |
2115 | 0 | update_networkstatus_downloads(now); |
2116 | 0 | return networkstatus_dl_check_interval; |
2117 | 0 | } |
2118 | | |
2119 | | /** |
2120 | | * Periodic callback: Every 60 seconds, we relaunch listeners if any died. */ |
2121 | | static int |
2122 | | retry_listeners_callback(time_t now, const or_options_t *options) |
2123 | 0 | { |
2124 | 0 | (void)now; |
2125 | 0 | (void)options; |
2126 | 0 | if (!net_is_disabled()) { |
2127 | 0 | retry_all_listeners(NULL, 0); |
2128 | 0 | return 60; |
2129 | 0 | } |
2130 | 0 | return PERIODIC_EVENT_NO_UPDATE; |
2131 | 0 | } |
2132 | | |
2133 | | static int heartbeat_callback_first_time = 1; |
2134 | | |
2135 | | /** |
2136 | | * Periodic callback: write the heartbeat message in the logs. |
2137 | | * |
2138 | | * If writing the heartbeat message to the logs fails for some reason, retry |
2139 | | * again after <b>MIN_HEARTBEAT_PERIOD</b> seconds. |
2140 | | */ |
2141 | | static int |
2142 | | heartbeat_callback(time_t now, const or_options_t *options) |
2143 | 0 | { |
2144 | | /* Check if heartbeat is disabled */ |
2145 | 0 | if (!options->HeartbeatPeriod) { |
2146 | 0 | return PERIODIC_EVENT_NO_UPDATE; |
2147 | 0 | } |
2148 | | |
2149 | | /* Skip the first one. */ |
2150 | 0 | if (heartbeat_callback_first_time) { |
2151 | 0 | heartbeat_callback_first_time = 0; |
2152 | 0 | return options->HeartbeatPeriod; |
2153 | 0 | } |
2154 | | |
2155 | | /* Write the heartbeat message */ |
2156 | 0 | if (log_heartbeat(now) == 0) { |
2157 | 0 | return options->HeartbeatPeriod; |
2158 | 0 | } else { |
2159 | | /* If we couldn't write the heartbeat log message, try again in the minimum |
2160 | | * interval of time. */ |
2161 | 0 | return MIN_HEARTBEAT_PERIOD; |
2162 | 0 | } |
2163 | 0 | } |
2164 | | |
2165 | 0 | #define CDM_CLEAN_CALLBACK_INTERVAL 600 |
2166 | | static int |
2167 | | clean_consdiffmgr_callback(time_t now, const or_options_t *options) |
2168 | 0 | { |
2169 | 0 | (void)now; |
2170 | 0 | if (dir_server_mode(options)) { |
2171 | 0 | consdiffmgr_cleanup(); |
2172 | 0 | } |
2173 | 0 | return CDM_CLEAN_CALLBACK_INTERVAL; |
2174 | 0 | } |
2175 | | |
2176 | | /* |
2177 | | * Periodic callback: Run scheduled events for HS service. This is called |
2178 | | * every second. |
2179 | | */ |
2180 | | static int |
2181 | | hs_service_callback(time_t now, const or_options_t *options) |
2182 | 0 | { |
2183 | 0 | (void) options; |
2184 | | |
2185 | | /* We need to at least be able to build circuits and that we actually have |
2186 | | * a working network. */ |
2187 | 0 | if (!have_completed_a_circuit() || net_is_disabled() || |
2188 | 0 | !networkstatus_get_reasonably_live_consensus(now, |
2189 | 0 | usable_consensus_flavor())) { |
2190 | 0 | goto end; |
2191 | 0 | } |
2192 | | |
2193 | 0 | hs_service_run_scheduled_events(now); |
2194 | |
|
2195 | 0 | end: |
2196 | | /* Every 1 second. */ |
2197 | 0 | return 1; |
2198 | 0 | } |
2199 | | |
2200 | | /* |
2201 | | * Periodic callback: Send once-per-second events to the controller(s). |
2202 | | * This is called every second. |
2203 | | */ |
2204 | | static int |
2205 | | control_per_second_events_callback(time_t now, const or_options_t *options) |
2206 | 0 | { |
2207 | 0 | (void) options; |
2208 | 0 | (void) now; |
2209 | |
|
2210 | 0 | control_per_second_events(); |
2211 | |
|
2212 | 0 | return 1; |
2213 | 0 | } |
2214 | | |
2215 | | /** Last time that update_current_time was called. */ |
2216 | | static time_t current_second = 0; |
2217 | | /** Last time that update_current_time updated current_second. */ |
2218 | | static monotime_coarse_t current_second_last_changed; |
2219 | | |
2220 | | /** |
2221 | | * Set the current time to "now", which should be the value returned by |
2222 | | * time(). Check for clock jumps and track the total number of seconds we |
2223 | | * have been running. |
2224 | | */ |
2225 | | void |
2226 | | update_current_time(time_t now) |
2227 | 0 | { |
2228 | 0 | if (PREDICT_LIKELY(now == current_second)) { |
2229 | | /* We call this function a lot. Most frequently, the current second |
2230 | | * will not have changed, so we just return. */ |
2231 | 0 | return; |
2232 | 0 | } |
2233 | | |
2234 | 0 | const time_t seconds_elapsed = current_second ? (now - current_second) : 0; |
2235 | | |
2236 | | /* Check the wall clock against the monotonic clock, so we can |
2237 | | * better tell idleness from clock jumps and/or other shenanigans. */ |
2238 | 0 | monotime_coarse_t last_updated; |
2239 | 0 | memcpy(&last_updated, ¤t_second_last_changed, sizeof(last_updated)); |
2240 | 0 | monotime_coarse_get(¤t_second_last_changed); |
2241 | | |
2242 | | /** How much clock jumping means that we should adjust our idea of when |
2243 | | * to go dormant? */ |
2244 | 0 | #define NUM_JUMPED_SECONDS_BEFORE_NETSTATUS_UPDATE 20 |
2245 | | |
2246 | | /* Don't go dormant early or late just because we jumped in time. */ |
2247 | 0 | if (ABS(seconds_elapsed) >= NUM_JUMPED_SECONDS_BEFORE_NETSTATUS_UPDATE) { |
2248 | 0 | if (is_participating_on_network()) { |
2249 | 0 | netstatus_note_clock_jumped(seconds_elapsed); |
2250 | 0 | } |
2251 | 0 | } |
2252 | | |
2253 | | /** How much clock jumping do we tolerate? */ |
2254 | 0 | #define NUM_JUMPED_SECONDS_BEFORE_WARN 100 |
2255 | | |
2256 | | /** How much idleness do we tolerate? */ |
2257 | 0 | #define NUM_IDLE_SECONDS_BEFORE_WARN 3600 |
2258 | |
|
2259 | 0 | if (seconds_elapsed < -NUM_JUMPED_SECONDS_BEFORE_WARN) { |
2260 | | // moving back in time is always a bad sign. |
2261 | 0 | circuit_note_clock_jumped(seconds_elapsed, false); |
2262 | |
|
2263 | 0 | } else if (seconds_elapsed >= NUM_JUMPED_SECONDS_BEFORE_WARN) { |
2264 | | /* Compare the monotonic clock to the result of time(). */ |
2265 | 0 | const int32_t monotime_msec_passed = |
2266 | 0 | monotime_coarse_diff_msec32(&last_updated, |
2267 | 0 | ¤t_second_last_changed); |
2268 | 0 | const int monotime_sec_passed = monotime_msec_passed / 1000; |
2269 | 0 | const int discrepancy = monotime_sec_passed - (int)seconds_elapsed; |
2270 | | /* If the monotonic clock deviates from time(NULL), we have a couple of |
2271 | | * possibilities. On some systems, this means we have been suspended or |
2272 | | * sleeping. Everywhere, it can mean that the wall-clock time has |
2273 | | * been changed -- for example, with settimeofday(). |
2274 | | * |
2275 | | * On the other hand, if the monotonic time matches with the wall-clock |
2276 | | * time, we've probably just been idle for a while, with no events firing. |
2277 | | * we tolerate much more of that. |
2278 | | */ |
2279 | 0 | const bool clock_jumped = abs(discrepancy) > 2; |
2280 | |
|
2281 | 0 | if (clock_jumped || seconds_elapsed >= NUM_IDLE_SECONDS_BEFORE_WARN) { |
2282 | 0 | circuit_note_clock_jumped(seconds_elapsed, ! clock_jumped); |
2283 | 0 | } |
2284 | 0 | } else if (seconds_elapsed > 0) { |
2285 | 0 | stats_n_seconds_working += seconds_elapsed; |
2286 | 0 | } |
2287 | |
|
2288 | 0 | update_approx_time(now); |
2289 | 0 | current_second = now; |
2290 | 0 | } |
2291 | | |
2292 | | #ifdef HAVE_SYSTEMD_209 |
2293 | | static periodic_timer_t *systemd_watchdog_timer = NULL; |
2294 | | |
2295 | | /** Libevent callback: invoked to reset systemd watchdog. */ |
2296 | | static void |
2297 | | systemd_watchdog_callback(periodic_timer_t *timer, void *arg) |
2298 | | { |
2299 | | (void)timer; |
2300 | | (void)arg; |
2301 | | sd_notify(0, "WATCHDOG=1"); |
2302 | | } |
2303 | | #endif /* defined(HAVE_SYSTEMD_209) */ |
2304 | | |
2305 | 0 | #define UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST (6*60*60) |
2306 | | |
2307 | | /** Called when our IP address seems to have changed. <b>on_client_conn</b> |
2308 | | * should be true if: |
2309 | | * - we detected a change in our interface address, using an outbound |
2310 | | * connection, and therefore |
2311 | | * - our client TLS keys need to be rotated. |
2312 | | * Otherwise, it should be false, and: |
2313 | | * - we detected a change in our published address |
2314 | | * (using some other method), and therefore |
2315 | | * - the published addresses in our descriptor need to change. |
2316 | | */ |
2317 | | void |
2318 | | ip_address_changed(int on_client_conn) |
2319 | 0 | { |
2320 | 0 | const or_options_t *options = get_options(); |
2321 | 0 | int server = server_mode(options); |
2322 | |
|
2323 | 0 | if (on_client_conn) { |
2324 | 0 | if (! server) { |
2325 | | /* Okay, change our keys. */ |
2326 | 0 | if (init_keys_client() < 0) |
2327 | 0 | log_warn(LD_GENERAL, "Unable to rotate keys after IP change!"); |
2328 | 0 | } |
2329 | 0 | } else { |
2330 | 0 | if (server) { |
2331 | 0 | if (get_uptime() > UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST) |
2332 | 0 | reset_bandwidth_test(); |
2333 | 0 | reset_uptime(); |
2334 | 0 | router_reset_reachability(); |
2335 | | /* All relays include their IP addresses as their ORPort addresses in |
2336 | | * their descriptor. |
2337 | | * Exit relays also incorporate interface addresses in their exit |
2338 | | * policies, when ExitPolicyRejectLocalInterfaces is set. */ |
2339 | 0 | mark_my_descriptor_dirty("IP address changed"); |
2340 | 0 | } |
2341 | 0 | } |
2342 | |
|
2343 | 0 | dns_servers_relaunch_checks(); |
2344 | 0 | } |
2345 | | |
2346 | | /** Forget what we've learned about the correctness of our DNS servers, and |
2347 | | * start learning again. */ |
2348 | | void |
2349 | | dns_servers_relaunch_checks(void) |
2350 | 0 | { |
2351 | 0 | if (server_mode(get_options())) { |
2352 | 0 | dns_reset_correctness_checks(); |
2353 | 0 | if (check_dns_honesty_event) { |
2354 | 0 | periodic_event_reschedule(check_dns_honesty_event); |
2355 | 0 | } |
2356 | 0 | } |
2357 | 0 | } |
2358 | | |
2359 | | /** Initialize some mainloop_event_t objects that we require. */ |
2360 | | void |
2361 | | initialize_mainloop_events(void) |
2362 | 0 | { |
2363 | 0 | if (!schedule_active_linked_connections_event) { |
2364 | 0 | schedule_active_linked_connections_event = |
2365 | 0 | mainloop_event_postloop_new(schedule_active_linked_connections_cb, NULL); |
2366 | 0 | } |
2367 | 0 | if (!postloop_cleanup_ev) { |
2368 | 0 | postloop_cleanup_ev = |
2369 | 0 | mainloop_event_postloop_new(postloop_cleanup_cb, NULL); |
2370 | 0 | } |
2371 | 0 | } |
2372 | | |
2373 | | /** Tor main loop. */ |
2374 | | int |
2375 | | do_main_loop(void) |
2376 | 0 | { |
2377 | | /* initialize the periodic events first, so that code that depends on the |
2378 | | * events being present does not assert. |
2379 | | */ |
2380 | 0 | tor_assert(periodic_events_initialized); |
2381 | 0 | initialize_mainloop_events(); |
2382 | |
|
2383 | 0 | periodic_events_connect_all(); |
2384 | |
|
2385 | 0 | struct timeval one_second = { 1, 0 }; |
2386 | 0 | initialize_periodic_events_event = tor_evtimer_new( |
2387 | 0 | tor_libevent_get_base(), |
2388 | 0 | initialize_periodic_events_cb, NULL); |
2389 | 0 | event_add(initialize_periodic_events_event, &one_second); |
2390 | |
|
2391 | | #ifdef HAVE_SYSTEMD_209 |
2392 | | uint64_t watchdog_delay; |
2393 | | /* set up systemd watchdog notification. */ |
2394 | | if (sd_watchdog_enabled(1, &watchdog_delay) > 0) { |
2395 | | if (! systemd_watchdog_timer) { |
2396 | | struct timeval watchdog; |
2397 | | /* The manager will "act on" us if we don't send them a notification |
2398 | | * every 'watchdog_delay' microseconds. So, send notifications twice |
2399 | | * that often. */ |
2400 | | watchdog_delay /= 2; |
2401 | | watchdog.tv_sec = watchdog_delay / 1000000; |
2402 | | watchdog.tv_usec = watchdog_delay % 1000000; |
2403 | | |
2404 | | systemd_watchdog_timer = periodic_timer_new(tor_libevent_get_base(), |
2405 | | &watchdog, |
2406 | | systemd_watchdog_callback, |
2407 | | NULL); |
2408 | | tor_assert(systemd_watchdog_timer); |
2409 | | } |
2410 | | } |
2411 | | #endif /* defined(HAVE_SYSTEMD_209) */ |
2412 | | #ifdef ENABLE_RESTART_DEBUGGING |
2413 | | { |
2414 | | static int first_time = 1; |
2415 | | |
2416 | | if (first_time && getenv("TOR_DEBUG_RESTART")) { |
2417 | | first_time = 0; |
2418 | | const char *sec_str = getenv("TOR_DEBUG_RESTART_AFTER_SECONDS"); |
2419 | | long sec; |
2420 | | int sec_ok=0; |
2421 | | if (sec_str && |
2422 | | (sec = tor_parse_long(sec_str, 10, 0, INT_MAX, &sec_ok, NULL)) && |
2423 | | sec_ok) { |
2424 | | /* Okay, we parsed the seconds. */ |
2425 | | } else { |
2426 | | sec = 5; |
2427 | | } |
2428 | | struct timeval restart_after = { (time_t) sec, 0 }; |
2429 | | tor_shutdown_event_loop_for_restart_event = |
2430 | | tor_evtimer_new(tor_libevent_get_base(), |
2431 | | tor_shutdown_event_loop_for_restart_cb, NULL); |
2432 | | event_add(tor_shutdown_event_loop_for_restart_event, &restart_after); |
2433 | | } |
2434 | | } |
2435 | | #endif /* defined(ENABLE_RESTART_DEBUGGING) */ |
2436 | |
|
2437 | 0 | return run_main_loop_until_done(); |
2438 | 0 | } |
2439 | | |
2440 | | #ifndef _WIN32 |
2441 | | /** Rate-limiter for EINVAL-type libevent warnings. */ |
2442 | | static ratelim_t libevent_error_ratelim = RATELIM_INIT(10); |
2443 | | #endif |
2444 | | |
2445 | | /** |
2446 | | * Run the main loop a single time. Return 0 for "exit"; -1 for "exit with |
2447 | | * error", and 1 for "run this again." |
2448 | | */ |
2449 | | static int |
2450 | | run_main_loop_once(void) |
2451 | 0 | { |
2452 | 0 | int loop_result; |
2453 | |
|
2454 | 0 | if (nt_service_is_stopping()) |
2455 | 0 | return 0; |
2456 | | |
2457 | 0 | if (main_loop_should_exit) |
2458 | 0 | return 0; |
2459 | | |
2460 | 0 | #ifndef _WIN32 |
2461 | | /* Make it easier to tell whether libevent failure is our fault or not. */ |
2462 | 0 | errno = 0; |
2463 | 0 | #endif |
2464 | |
|
2465 | 0 | if (get_options()->MainloopStats) { |
2466 | | /* We always enforce that EVLOOP_ONCE is passed to event_base_loop() if we |
2467 | | * are collecting main loop statistics. */ |
2468 | 0 | called_loop_once = 1; |
2469 | 0 | } else { |
2470 | 0 | called_loop_once = 0; |
2471 | 0 | } |
2472 | | |
2473 | | /* Make sure we know (about) what time it is. */ |
2474 | 0 | update_approx_time(time(NULL)); |
2475 | | |
2476 | | /* Here it is: the main loop. Here we tell Libevent to poll until we have |
2477 | | * an event, or the second ends, or until we have some active linked |
2478 | | * connections to trigger events for. Libevent will wait till one |
2479 | | * of these happens, then run all the appropriate callbacks. */ |
2480 | 0 | loop_result = tor_libevent_run_event_loop(tor_libevent_get_base(), |
2481 | 0 | called_loop_once); |
2482 | |
|
2483 | 0 | if (get_options()->MainloopStats) { |
2484 | | /* Update our main loop counters. */ |
2485 | 0 | if (loop_result == 0) { |
2486 | | // The call was successful. |
2487 | 0 | increment_main_loop_success_count(); |
2488 | 0 | } else if (loop_result == -1) { |
2489 | | // The call was erroneous. |
2490 | 0 | increment_main_loop_error_count(); |
2491 | 0 | } else if (loop_result == 1) { |
2492 | | // The call didn't have any active or pending events |
2493 | | // to handle. |
2494 | 0 | increment_main_loop_idle_count(); |
2495 | 0 | } |
2496 | 0 | } |
2497 | | |
2498 | | /* Oh, the loop failed. That might be an error that we need to |
2499 | | * catch, but more likely, it's just an interrupted poll() call or something, |
2500 | | * and we should try again. */ |
2501 | 0 | if (loop_result < 0) { |
2502 | 0 | int e = tor_socket_errno(-1); |
2503 | | /* let the program survive things like ^z */ |
2504 | 0 | if (e != EINTR && !ERRNO_IS_EINPROGRESS(e)) { |
2505 | 0 | log_err(LD_NET,"libevent call with %s failed: %s [%d]", |
2506 | 0 | tor_libevent_get_method(), tor_socket_strerror(e), e); |
2507 | 0 | return -1; |
2508 | 0 | #ifndef _WIN32 |
2509 | 0 | } else if (e == EINVAL) { |
2510 | 0 | log_fn_ratelim(&libevent_error_ratelim, LOG_WARN, LD_NET, |
2511 | 0 | "EINVAL from libevent: should you upgrade libevent?"); |
2512 | 0 | if (libevent_error_ratelim.n_calls_since_last_time > 8) { |
2513 | 0 | log_err(LD_NET, "Too many libevent errors, too fast: dying"); |
2514 | 0 | return -1; |
2515 | 0 | } |
2516 | 0 | #endif /* !defined(_WIN32) */ |
2517 | 0 | } else { |
2518 | 0 | tor_assert_nonfatal_once(! ERRNO_IS_EINPROGRESS(e)); |
2519 | 0 | log_debug(LD_NET,"libevent call interrupted."); |
2520 | | /* You can't trust the results of this poll(). Go back to the |
2521 | | * top of the big for loop. */ |
2522 | 0 | return 1; |
2523 | 0 | } |
2524 | 0 | } |
2525 | | |
2526 | 0 | if (main_loop_should_exit) |
2527 | 0 | return 0; |
2528 | | |
2529 | 0 | return 1; |
2530 | 0 | } |
2531 | | |
2532 | | /** Run the run_main_loop_once() function until it declares itself done, |
2533 | | * and return its final return value. |
2534 | | * |
2535 | | * Shadow won't invoke this function, so don't fill it up with things. |
2536 | | */ |
2537 | | STATIC int |
2538 | | run_main_loop_until_done(void) |
2539 | 0 | { |
2540 | 0 | int loop_result = 1; |
2541 | |
|
2542 | 0 | main_loop_should_exit = 0; |
2543 | 0 | main_loop_exit_value = 0; |
2544 | |
|
2545 | 0 | do { |
2546 | 0 | loop_result = run_main_loop_once(); |
2547 | 0 | } while (loop_result == 1); |
2548 | |
|
2549 | 0 | if (main_loop_should_exit) |
2550 | 0 | return main_loop_exit_value; |
2551 | 0 | else |
2552 | 0 | return loop_result; |
2553 | 0 | } |
2554 | | |
2555 | | /** Returns Tor's uptime. */ |
2556 | | MOCK_IMPL(long, |
2557 | | get_uptime,(void)) |
2558 | 0 | { |
2559 | 0 | return stats_n_seconds_working; |
2560 | 0 | } |
2561 | | |
2562 | | /** Reset Tor's uptime. */ |
2563 | | MOCK_IMPL(void, |
2564 | | reset_uptime,(void)) |
2565 | 0 | { |
2566 | 0 | stats_n_seconds_working = 0; |
2567 | 0 | } |
2568 | | |
2569 | | void |
2570 | | tor_mainloop_free_all(void) |
2571 | 0 | { |
2572 | 0 | smartlist_free(connection_array); |
2573 | 0 | smartlist_free(closeable_connection_lst); |
2574 | 0 | smartlist_free(active_linked_connection_lst); |
2575 | 0 | teardown_periodic_events(); |
2576 | 0 | tor_event_free(shutdown_did_not_work_event); |
2577 | 0 | tor_event_free(initialize_periodic_events_event); |
2578 | 0 | mainloop_event_free(directory_all_unreachable_cb_event); |
2579 | 0 | mainloop_event_free(schedule_active_linked_connections_event); |
2580 | 0 | mainloop_event_free(postloop_cleanup_ev); |
2581 | 0 | mainloop_event_free(handle_deferred_signewnym_ev); |
2582 | 0 | mainloop_event_free(scheduled_shutdown_ev); |
2583 | 0 | mainloop_event_free(rescan_periodic_events_ev); |
2584 | |
|
2585 | | #ifdef HAVE_SYSTEMD_209 |
2586 | | periodic_timer_free(systemd_watchdog_timer); |
2587 | | #endif |
2588 | |
|
2589 | 0 | stats_n_bytes_read = stats_n_bytes_written = 0; |
2590 | |
|
2591 | 0 | memset(&global_bucket, 0, sizeof(global_bucket)); |
2592 | 0 | memset(&global_relayed_bucket, 0, sizeof(global_relayed_bucket)); |
2593 | 0 | time_of_process_start = 0; |
2594 | 0 | time_of_last_signewnym = 0; |
2595 | 0 | signewnym_is_pending = 0; |
2596 | 0 | newnym_epoch = 0; |
2597 | 0 | called_loop_once = 0; |
2598 | 0 | main_loop_should_exit = 0; |
2599 | 0 | main_loop_exit_value = 0; |
2600 | 0 | can_complete_circuits = 0; |
2601 | 0 | quiet_level = 0; |
2602 | 0 | should_init_bridge_stats = 1; |
2603 | 0 | heartbeat_callback_first_time = 1; |
2604 | 0 | current_second = 0; |
2605 | 0 | memset(¤t_second_last_changed, 0, |
2606 | 0 | sizeof(current_second_last_changed)); |
2607 | 0 | } |