/src/tor/src/feature/dircache/dirserv.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (c) 2001-2004, Roger Dingledine. |
2 | | * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. |
3 | | * Copyright (c) 2007-2021, The Tor Project, Inc. */ |
4 | | /* See LICENSE for licensing information */ |
5 | | |
6 | | #include "core/or/or.h" |
7 | | |
8 | | #include "app/config/config.h" |
9 | | #include "core/mainloop/connection.h" |
10 | | #include "feature/dircache/conscache.h" |
11 | | #include "feature/dircache/consdiffmgr.h" |
12 | | #include "feature/dircommon/directory.h" |
13 | | #include "feature/dircache/dirserv.h" |
14 | | #include "feature/nodelist/microdesc.h" |
15 | | #include "feature/nodelist/routerlist.h" |
16 | | #include "feature/relay/router.h" |
17 | | #include "feature/relay/routermode.h" |
18 | | #include "feature/stats/predict_ports.h" |
19 | | |
20 | | #include "feature/dircache/cached_dir_st.h" |
21 | | #include "feature/dircommon/dir_connection_st.h" |
22 | | #include "feature/nodelist/extrainfo_st.h" |
23 | | #include "feature/nodelist/microdesc_st.h" |
24 | | #include "feature/nodelist/routerinfo_st.h" |
25 | | #include "feature/nodelist/routerlist_st.h" |
26 | | |
27 | | #include "lib/compress/compress.h" |
28 | | |
29 | | /** |
30 | | * \file dirserv.c |
31 | | * \brief Directory server core implementation. Manages directory |
32 | | * contents and generates directory documents. |
33 | | * |
34 | | * This module implements most of directory cache functionality, and some of |
35 | | * the directory authority functionality. The directory.c module delegates |
36 | | * here in order to handle incoming requests from clients, via |
37 | | * connection_dirserv_flushed_some() and its kin. In order to save RAM, this |
38 | | * module is responsible for spooling directory objects (in whole or in part) |
39 | | * onto buf_t instances, and then closing the dir_connection_t once the |
40 | | * objects are totally flushed. |
41 | | * |
42 | | * The directory.c module also delegates here for handling descriptor uploads |
43 | | * via dirserv_add_multiple_descriptors(). |
44 | | * |
45 | | * Additionally, this module handles some aspects of voting, including: |
46 | | * deciding how to vote on individual flags (based on decisions reached in |
47 | | * rephist.c), of formatting routerstatus lines, and deciding what relays to |
48 | | * include in an authority's vote. (TODO: Those functions could profitably be |
49 | | * split off. They only live in this file because historically they were |
50 | | * shared among the v1, v2, and v3 directory code.) |
51 | | */ |
52 | | |
53 | | static void clear_cached_dir(cached_dir_t *d); |
54 | | static const signed_descriptor_t *get_signed_descriptor_by_fp( |
55 | | const uint8_t *fp, |
56 | | int extrainfo); |
57 | | |
58 | | static int spooled_resource_lookup_body(const spooled_resource_t *spooled, |
59 | | int conn_is_encrypted, |
60 | | const uint8_t **body_out, |
61 | | size_t *size_out, |
62 | | time_t *published_out); |
63 | | static cached_dir_t *spooled_resource_lookup_cached_dir( |
64 | | const spooled_resource_t *spooled, |
65 | | time_t *published_out); |
66 | | static cached_dir_t *lookup_cached_dir_by_fp(const uint8_t *fp); |
67 | | |
68 | | /********************************************************************/ |
69 | | |
70 | | /* A set of functions to answer questions about how we'd like to behave |
71 | | * as a directory mirror */ |
72 | | |
73 | | /** Return true iff we want to serve certificates for authorities |
74 | | * that we don't acknowledge as authorities ourself. |
75 | | * Use we_want_to_fetch_unknown_auth_certs to check if we want to fetch |
76 | | * and keep these certificates. |
77 | | */ |
78 | | int |
79 | | directory_caches_unknown_auth_certs(const or_options_t *options) |
80 | 0 | { |
81 | 0 | return dir_server_mode(options) || options->BridgeRelay; |
82 | 0 | } |
83 | | |
84 | | /** Return 1 if we want to fetch and serve descriptors, networkstatuses, etc |
85 | | * Else return 0. |
86 | | * Check options->DirPort_set and directory_permits_begindir_requests() |
87 | | * to see if we are willing to serve these directory documents to others via |
88 | | * the DirPort and begindir-over-ORPort, respectively. |
89 | | * |
90 | | * To check if we should fetch documents, use we_want_to_fetch_flavor and |
91 | | * we_want_to_fetch_unknown_auth_certs instead of this function. |
92 | | */ |
93 | | int |
94 | | directory_caches_dir_info(const or_options_t *options) |
95 | 0 | { |
96 | 0 | if (options->BridgeRelay || dir_server_mode(options)) |
97 | 0 | return 1; |
98 | 0 | if (!server_mode(options) || !advertised_server_mode()) |
99 | 0 | return 0; |
100 | | /* We need an up-to-date view of network info if we're going to try to |
101 | | * block exit attempts from unknown relays. */ |
102 | 0 | return ! router_my_exit_policy_is_reject_star() && |
103 | 0 | should_refuse_unknown_exits(options); |
104 | 0 | } |
105 | | |
106 | | /** Return 1 if we want to allow remote clients to ask us directory |
107 | | * requests via the "begin_dir" interface, which doesn't require |
108 | | * having any separate port open. */ |
109 | | int |
110 | | directory_permits_begindir_requests(const or_options_t *options) |
111 | 0 | { |
112 | 0 | return options->BridgeRelay != 0 || dir_server_mode(options); |
113 | 0 | } |
114 | | |
115 | | /********************************************************************/ |
116 | | |
117 | | /** Map from flavor name to the cached_dir_t for the v3 consensuses that we're |
118 | | * currently serving. */ |
119 | | static strmap_t *cached_consensuses = NULL; |
120 | | |
121 | | /** Decrement the reference count on <b>d</b>, and free it if it no longer has |
122 | | * any references. */ |
123 | | void |
124 | | cached_dir_decref(cached_dir_t *d) |
125 | 0 | { |
126 | 0 | if (!d || --d->refcnt > 0) |
127 | 0 | return; |
128 | 0 | clear_cached_dir(d); |
129 | 0 | tor_free(d); |
130 | 0 | } |
131 | | |
132 | | /** Allocate and return a new cached_dir_t containing the string <b>s</b>, |
133 | | * published at <b>published</b>. */ |
134 | | cached_dir_t * |
135 | | new_cached_dir(char *s, time_t published) |
136 | 0 | { |
137 | 0 | cached_dir_t *d = tor_malloc_zero(sizeof(cached_dir_t)); |
138 | 0 | d->refcnt = 1; |
139 | 0 | d->dir = s; |
140 | 0 | d->dir_len = strlen(s); |
141 | 0 | d->published = published; |
142 | 0 | if (tor_compress(&(d->dir_compressed), &(d->dir_compressed_len), |
143 | 0 | d->dir, d->dir_len, ZLIB_METHOD)) { |
144 | 0 | log_warn(LD_BUG, "Error compressing directory"); |
145 | 0 | } |
146 | 0 | return d; |
147 | 0 | } |
148 | | |
149 | | /** Remove all storage held in <b>d</b>, but do not free <b>d</b> itself. */ |
150 | | static void |
151 | | clear_cached_dir(cached_dir_t *d) |
152 | 0 | { |
153 | 0 | tor_free(d->dir); |
154 | 0 | tor_free(d->dir_compressed); |
155 | 0 | memset(d, 0, sizeof(cached_dir_t)); |
156 | 0 | } |
157 | | |
158 | | /** Free all storage held by the cached_dir_t in <b>d</b>. */ |
159 | | static void |
160 | | free_cached_dir_(void *_d) |
161 | 0 | { |
162 | 0 | cached_dir_t *d; |
163 | 0 | if (!_d) |
164 | 0 | return; |
165 | | |
166 | 0 | d = (cached_dir_t *)_d; |
167 | 0 | cached_dir_decref(d); |
168 | 0 | } |
169 | | |
170 | | /** Replace the v3 consensus networkstatus of type <b>flavor_name</b> that |
171 | | * we're serving with <b>networkstatus</b>, published at <b>published</b>. No |
172 | | * validation is performed. */ |
173 | | void |
174 | | dirserv_set_cached_consensus_networkstatus(const char *networkstatus, |
175 | | size_t networkstatus_len, |
176 | | const char *flavor_name, |
177 | | const common_digests_t *digests, |
178 | | const uint8_t *sha3_as_signed, |
179 | | time_t published) |
180 | 0 | { |
181 | 0 | cached_dir_t *new_networkstatus; |
182 | 0 | cached_dir_t *old_networkstatus; |
183 | 0 | if (!cached_consensuses) |
184 | 0 | cached_consensuses = strmap_new(); |
185 | |
|
186 | 0 | new_networkstatus = |
187 | 0 | new_cached_dir(tor_memdup_nulterm(networkstatus, networkstatus_len), |
188 | 0 | published); |
189 | 0 | memcpy(&new_networkstatus->digests, digests, sizeof(common_digests_t)); |
190 | 0 | memcpy(&new_networkstatus->digest_sha3_as_signed, sha3_as_signed, |
191 | 0 | DIGEST256_LEN); |
192 | 0 | old_networkstatus = strmap_set(cached_consensuses, flavor_name, |
193 | 0 | new_networkstatus); |
194 | 0 | if (old_networkstatus) |
195 | 0 | cached_dir_decref(old_networkstatus); |
196 | 0 | } |
197 | | |
198 | | /** Return the latest downloaded consensus networkstatus in encoded, signed, |
199 | | * optionally compressed format, suitable for sending to clients. */ |
200 | | MOCK_IMPL(cached_dir_t *, |
201 | | dirserv_get_consensus,(const char *flavor_name)) |
202 | 0 | { |
203 | 0 | if (!cached_consensuses) |
204 | 0 | return NULL; |
205 | 0 | return strmap_get(cached_consensuses, flavor_name); |
206 | 0 | } |
207 | | |
208 | | /** As dir_split_resource_into_fingerprints, but instead fills |
209 | | * <b>spool_out</b> with a list of spoolable_resource_t for the resource |
210 | | * identified through <b>source</b>. */ |
211 | | int |
212 | | dir_split_resource_into_spoolable(const char *resource, |
213 | | dir_spool_source_t source, |
214 | | smartlist_t *spool_out, |
215 | | int *compressed_out, |
216 | | int flags) |
217 | 0 | { |
218 | 0 | smartlist_t *fingerprints = smartlist_new(); |
219 | |
|
220 | 0 | tor_assert(flags & (DSR_HEX|DSR_BASE64)); |
221 | 0 | const size_t digest_len = |
222 | 0 | (flags & DSR_DIGEST256) ? DIGEST256_LEN : DIGEST_LEN; |
223 | |
|
224 | 0 | int r = dir_split_resource_into_fingerprints(resource, fingerprints, |
225 | 0 | compressed_out, flags); |
226 | | /* This is not a very efficient implementation XXXX */ |
227 | 0 | SMARTLIST_FOREACH_BEGIN(fingerprints, uint8_t *, digest) { |
228 | 0 | spooled_resource_t *spooled = |
229 | 0 | spooled_resource_new(source, digest, digest_len); |
230 | 0 | if (spooled) |
231 | 0 | smartlist_add(spool_out, spooled); |
232 | 0 | tor_free(digest); |
233 | 0 | } SMARTLIST_FOREACH_END(digest); |
234 | |
|
235 | 0 | smartlist_free(fingerprints); |
236 | 0 | return r; |
237 | 0 | } |
238 | | |
239 | | /** As dirserv_get_routerdescs(), but instead of getting signed_descriptor_t |
240 | | * pointers, adds copies of digests to fps_out, and doesn't use the |
241 | | * /tor/server/ prefix. For a /d/ request, adds descriptor digests; for other |
242 | | * requests, adds identity digests. |
243 | | */ |
244 | | int |
245 | | dirserv_get_routerdesc_spool(smartlist_t *spool_out, |
246 | | const char *key, |
247 | | dir_spool_source_t source, |
248 | | int conn_is_encrypted, |
249 | | const char **msg_out) |
250 | 0 | { |
251 | 0 | *msg_out = NULL; |
252 | |
|
253 | 0 | if (!strcmp(key, "all")) { |
254 | 0 | const routerlist_t *rl = router_get_routerlist(); |
255 | 0 | SMARTLIST_FOREACH_BEGIN(rl->routers, const routerinfo_t *, r) { |
256 | 0 | spooled_resource_t *spooled; |
257 | 0 | spooled = spooled_resource_new(source, |
258 | 0 | (const uint8_t *)r->cache_info.identity_digest, |
259 | 0 | DIGEST_LEN); |
260 | | /* Treat "all" requests as if they were unencrypted */ |
261 | 0 | conn_is_encrypted = 0; |
262 | 0 | smartlist_add(spool_out, spooled); |
263 | 0 | } SMARTLIST_FOREACH_END(r); |
264 | 0 | } else if (!strcmp(key, "authority")) { |
265 | 0 | const routerinfo_t *ri = router_get_my_routerinfo(); |
266 | 0 | if (ri) |
267 | 0 | smartlist_add(spool_out, |
268 | 0 | spooled_resource_new(source, |
269 | 0 | (const uint8_t *)ri->cache_info.identity_digest, |
270 | 0 | DIGEST_LEN)); |
271 | 0 | } else if (!strcmpstart(key, "d/")) { |
272 | 0 | key += strlen("d/"); |
273 | 0 | dir_split_resource_into_spoolable(key, source, spool_out, NULL, |
274 | 0 | DSR_HEX|DSR_SORT_UNIQ); |
275 | 0 | } else if (!strcmpstart(key, "fp/")) { |
276 | 0 | key += strlen("fp/"); |
277 | 0 | dir_split_resource_into_spoolable(key, source, spool_out, NULL, |
278 | 0 | DSR_HEX|DSR_SORT_UNIQ); |
279 | 0 | } else { |
280 | 0 | *msg_out = "Not found"; |
281 | 0 | return -1; |
282 | 0 | } |
283 | | |
284 | 0 | if (! conn_is_encrypted) { |
285 | | /* Remove anything that insists it not be sent unencrypted. */ |
286 | 0 | SMARTLIST_FOREACH_BEGIN(spool_out, spooled_resource_t *, spooled) { |
287 | 0 | const uint8_t *body = NULL; |
288 | 0 | size_t bodylen = 0; |
289 | 0 | int r = spooled_resource_lookup_body(spooled, conn_is_encrypted, |
290 | 0 | &body, &bodylen, NULL); |
291 | 0 | if (r < 0 || body == NULL || bodylen == 0) { |
292 | 0 | SMARTLIST_DEL_CURRENT(spool_out, spooled); |
293 | 0 | spooled_resource_free(spooled); |
294 | 0 | } |
295 | 0 | } SMARTLIST_FOREACH_END(spooled); |
296 | 0 | } |
297 | |
|
298 | 0 | if (!smartlist_len(spool_out)) { |
299 | 0 | *msg_out = "Servers unavailable"; |
300 | 0 | return -1; |
301 | 0 | } |
302 | 0 | return 0; |
303 | 0 | } |
304 | | |
305 | | /* ========== |
306 | | * Spooling code. |
307 | | * ========== */ |
308 | | |
309 | | spooled_resource_t * |
310 | | spooled_resource_new(dir_spool_source_t source, |
311 | | const uint8_t *digest, size_t digestlen) |
312 | 0 | { |
313 | 0 | spooled_resource_t *spooled = tor_malloc_zero(sizeof(spooled_resource_t)); |
314 | 0 | spooled->spool_source = source; |
315 | 0 | switch (source) { |
316 | 0 | case DIR_SPOOL_NETWORKSTATUS: |
317 | 0 | spooled->spool_eagerly = 0; |
318 | 0 | break; |
319 | 0 | case DIR_SPOOL_SERVER_BY_DIGEST: |
320 | 0 | case DIR_SPOOL_SERVER_BY_FP: |
321 | 0 | case DIR_SPOOL_EXTRA_BY_DIGEST: |
322 | 0 | case DIR_SPOOL_EXTRA_BY_FP: |
323 | 0 | case DIR_SPOOL_MICRODESC: |
324 | 0 | default: |
325 | 0 | spooled->spool_eagerly = 1; |
326 | 0 | break; |
327 | 0 | case DIR_SPOOL_CONSENSUS_CACHE_ENTRY: |
328 | 0 | tor_assert_unreached(); |
329 | 0 | break; |
330 | 0 | } |
331 | 0 | tor_assert(digestlen <= sizeof(spooled->digest)); |
332 | 0 | if (digest) |
333 | 0 | memcpy(spooled->digest, digest, digestlen); |
334 | 0 | return spooled; |
335 | 0 | } |
336 | | |
337 | | /** |
338 | | * Create a new spooled_resource_t to spool the contents of <b>entry</b> to |
339 | | * the user. Return the spooled object on success, or NULL on failure (which |
340 | | * is probably caused by a failure to map the body of the item from disk). |
341 | | * |
342 | | * Adds a reference to entry's reference counter. |
343 | | */ |
344 | | spooled_resource_t * |
345 | | spooled_resource_new_from_cache_entry(consensus_cache_entry_t *entry) |
346 | 0 | { |
347 | 0 | spooled_resource_t *spooled = tor_malloc_zero(sizeof(spooled_resource_t)); |
348 | 0 | spooled->spool_source = DIR_SPOOL_CONSENSUS_CACHE_ENTRY; |
349 | 0 | spooled->spool_eagerly = 0; |
350 | 0 | consensus_cache_entry_incref(entry); |
351 | 0 | spooled->consensus_cache_entry = entry; |
352 | |
|
353 | 0 | int r = consensus_cache_entry_get_body(entry, |
354 | 0 | &spooled->cce_body, |
355 | 0 | &spooled->cce_len); |
356 | 0 | if (r == 0) { |
357 | 0 | return spooled; |
358 | 0 | } else { |
359 | 0 | spooled_resource_free(spooled); |
360 | 0 | return NULL; |
361 | 0 | } |
362 | 0 | } |
363 | | |
364 | | /** Release all storage held by <b>spooled</b>. */ |
365 | | void |
366 | | spooled_resource_free_(spooled_resource_t *spooled) |
367 | 0 | { |
368 | 0 | if (spooled == NULL) |
369 | 0 | return; |
370 | | |
371 | 0 | if (spooled->cached_dir_ref) { |
372 | 0 | cached_dir_decref(spooled->cached_dir_ref); |
373 | 0 | } |
374 | |
|
375 | 0 | if (spooled->consensus_cache_entry) { |
376 | 0 | consensus_cache_entry_decref(spooled->consensus_cache_entry); |
377 | 0 | } |
378 | |
|
379 | 0 | tor_free(spooled); |
380 | 0 | } |
381 | | |
382 | | /** When spooling data from a cached_dir_t object, we always add |
383 | | * at least this much. */ |
384 | | #define DIRSERV_CACHED_DIR_CHUNK_SIZE 8192 |
385 | | |
386 | | /** Return an compression ratio for compressing objects from <b>source</b>. |
387 | | */ |
388 | | static double |
389 | | estimate_compression_ratio(dir_spool_source_t source) |
390 | 0 | { |
391 | | /* We should put in better estimates here, depending on the number of |
392 | | objects and their type */ |
393 | 0 | (void) source; |
394 | 0 | return 0.5; |
395 | 0 | } |
396 | | |
397 | | /** Return an estimated number of bytes needed for transmitting the |
398 | | * resource in <b>spooled</b> on <b>conn</b> |
399 | | * |
400 | | * As a convenient side-effect, set *<b>published_out</b> to the resource's |
401 | | * publication time. |
402 | | */ |
403 | | static size_t |
404 | | spooled_resource_estimate_size(const spooled_resource_t *spooled, |
405 | | dir_connection_t *conn, |
406 | | int compressed, |
407 | | time_t *published_out) |
408 | 0 | { |
409 | 0 | if (spooled->spool_eagerly) { |
410 | 0 | const uint8_t *body = NULL; |
411 | 0 | size_t bodylen = 0; |
412 | 0 | int r = spooled_resource_lookup_body(spooled, |
413 | 0 | connection_dir_is_encrypted(conn), |
414 | 0 | &body, &bodylen, |
415 | 0 | published_out); |
416 | 0 | if (r == -1 || body == NULL || bodylen == 0) |
417 | 0 | return 0; |
418 | 0 | if (compressed) { |
419 | 0 | double ratio = estimate_compression_ratio(spooled->spool_source); |
420 | 0 | bodylen = (size_t)(bodylen * ratio); |
421 | 0 | } |
422 | 0 | return bodylen; |
423 | 0 | } else { |
424 | 0 | cached_dir_t *cached; |
425 | 0 | if (spooled->consensus_cache_entry) { |
426 | 0 | if (published_out) { |
427 | 0 | consensus_cache_entry_get_valid_after( |
428 | 0 | spooled->consensus_cache_entry, published_out); |
429 | 0 | } |
430 | |
|
431 | 0 | return spooled->cce_len; |
432 | 0 | } |
433 | 0 | if (spooled->cached_dir_ref) { |
434 | 0 | cached = spooled->cached_dir_ref; |
435 | 0 | } else { |
436 | 0 | cached = spooled_resource_lookup_cached_dir(spooled, |
437 | 0 | published_out); |
438 | 0 | } |
439 | 0 | if (cached == NULL) { |
440 | 0 | return 0; |
441 | 0 | } |
442 | 0 | size_t result = compressed ? cached->dir_compressed_len : cached->dir_len; |
443 | 0 | return result; |
444 | 0 | } |
445 | 0 | } |
446 | | |
447 | | /** Return code for spooled_resource_flush_some */ |
448 | | typedef enum { |
449 | | SRFS_ERR = -1, |
450 | | SRFS_MORE = 0, |
451 | | SRFS_DONE |
452 | | } spooled_resource_flush_status_t; |
453 | | |
454 | | /** Flush some or all of the bytes from <b>spooled</b> onto <b>conn</b>. |
455 | | * Return SRFS_ERR on error, SRFS_MORE if there are more bytes to flush from |
456 | | * this spooled resource, or SRFS_DONE if we are done flushing this spooled |
457 | | * resource. |
458 | | */ |
459 | | static spooled_resource_flush_status_t |
460 | | spooled_resource_flush_some(spooled_resource_t *spooled, |
461 | | dir_connection_t *conn) |
462 | 0 | { |
463 | 0 | if (spooled->spool_eagerly) { |
464 | | /* Spool_eagerly resources are sent all-at-once. */ |
465 | 0 | const uint8_t *body = NULL; |
466 | 0 | size_t bodylen = 0; |
467 | 0 | int r = spooled_resource_lookup_body(spooled, |
468 | 0 | connection_dir_is_encrypted(conn), |
469 | 0 | &body, &bodylen, NULL); |
470 | 0 | if (r == -1 || body == NULL || bodylen == 0) { |
471 | | /* Absent objects count as "done". */ |
472 | 0 | return SRFS_DONE; |
473 | 0 | } |
474 | | |
475 | 0 | connection_dir_buf_add((const char*)body, bodylen, conn, 0); |
476 | |
|
477 | 0 | return SRFS_DONE; |
478 | 0 | } else { |
479 | 0 | cached_dir_t *cached = spooled->cached_dir_ref; |
480 | 0 | consensus_cache_entry_t *cce = spooled->consensus_cache_entry; |
481 | 0 | if (cached == NULL && cce == NULL) { |
482 | | /* The cached_dir_t hasn't been materialized yet. So let's look it up. */ |
483 | 0 | cached = spooled->cached_dir_ref = |
484 | 0 | spooled_resource_lookup_cached_dir(spooled, NULL); |
485 | 0 | if (!cached) { |
486 | | /* Absent objects count as done. */ |
487 | 0 | return SRFS_DONE; |
488 | 0 | } |
489 | 0 | ++cached->refcnt; |
490 | 0 | tor_assert_nonfatal(spooled->cached_dir_offset == 0); |
491 | 0 | } |
492 | | |
493 | 0 | if (BUG(!cached && !cce)) |
494 | 0 | return SRFS_DONE; |
495 | | |
496 | 0 | int64_t total_len; |
497 | 0 | const char *ptr; |
498 | 0 | if (cached) { |
499 | 0 | total_len = cached->dir_compressed_len; |
500 | 0 | ptr = cached->dir_compressed; |
501 | 0 | } else { |
502 | 0 | total_len = spooled->cce_len; |
503 | 0 | ptr = (const char *)spooled->cce_body; |
504 | 0 | } |
505 | | /* How many bytes left to flush? */ |
506 | 0 | int64_t remaining; |
507 | 0 | remaining = total_len - spooled->cached_dir_offset; |
508 | 0 | if (BUG(remaining < 0)) |
509 | 0 | return SRFS_ERR; |
510 | 0 | ssize_t bytes = (ssize_t) MIN(DIRSERV_CACHED_DIR_CHUNK_SIZE, remaining); |
511 | |
|
512 | 0 | connection_dir_buf_add(ptr + spooled->cached_dir_offset, |
513 | 0 | bytes, conn, 0); |
514 | |
|
515 | 0 | spooled->cached_dir_offset += bytes; |
516 | 0 | if (spooled->cached_dir_offset >= (off_t)total_len) { |
517 | 0 | return SRFS_DONE; |
518 | 0 | } else { |
519 | 0 | return SRFS_MORE; |
520 | 0 | } |
521 | 0 | } |
522 | 0 | } |
523 | | |
524 | | /** Helper: find the cached_dir_t for a spooled_resource_t, for |
525 | | * sending it to <b>conn</b>. Set *<b>published_out</b>, if provided, |
526 | | * to the published time of the cached_dir_t. |
527 | | * |
528 | | * DOES NOT increase the reference count on the result. Callers must do that |
529 | | * themselves if they mean to hang on to it. |
530 | | */ |
531 | | static cached_dir_t * |
532 | | spooled_resource_lookup_cached_dir(const spooled_resource_t *spooled, |
533 | | time_t *published_out) |
534 | 0 | { |
535 | 0 | tor_assert(spooled->spool_eagerly == 0); |
536 | 0 | cached_dir_t *d = lookup_cached_dir_by_fp(spooled->digest); |
537 | 0 | if (d != NULL) { |
538 | 0 | if (published_out) |
539 | 0 | *published_out = d->published; |
540 | 0 | } |
541 | 0 | return d; |
542 | 0 | } |
543 | | |
544 | | /** Helper: Look up the body for an eagerly-served spooled_resource. If |
545 | | * <b>conn_is_encrypted</b> is false, don't look up any resource that |
546 | | * shouldn't be sent over an unencrypted connection. On success, set |
547 | | * <b>body_out</b>, <b>size_out</b>, and <b>published_out</b> to refer |
548 | | * to the resource's body, size, and publication date, and return 0. |
549 | | * On failure return -1. */ |
550 | | static int |
551 | | spooled_resource_lookup_body(const spooled_resource_t *spooled, |
552 | | int conn_is_encrypted, |
553 | | const uint8_t **body_out, |
554 | | size_t *size_out, |
555 | | time_t *published_out) |
556 | 0 | { |
557 | 0 | tor_assert(spooled->spool_eagerly == 1); |
558 | | |
559 | 0 | const signed_descriptor_t *sd = NULL; |
560 | |
|
561 | 0 | switch (spooled->spool_source) { |
562 | 0 | case DIR_SPOOL_EXTRA_BY_FP: { |
563 | 0 | sd = get_signed_descriptor_by_fp(spooled->digest, 1); |
564 | 0 | break; |
565 | 0 | } |
566 | 0 | case DIR_SPOOL_SERVER_BY_FP: { |
567 | 0 | sd = get_signed_descriptor_by_fp(spooled->digest, 0); |
568 | 0 | break; |
569 | 0 | } |
570 | 0 | case DIR_SPOOL_SERVER_BY_DIGEST: { |
571 | 0 | sd = router_get_by_descriptor_digest((const char *)spooled->digest); |
572 | 0 | break; |
573 | 0 | } |
574 | 0 | case DIR_SPOOL_EXTRA_BY_DIGEST: { |
575 | 0 | sd = extrainfo_get_by_descriptor_digest((const char *)spooled->digest); |
576 | 0 | break; |
577 | 0 | } |
578 | 0 | case DIR_SPOOL_MICRODESC: { |
579 | 0 | microdesc_t *md = microdesc_cache_lookup_by_digest256( |
580 | 0 | get_microdesc_cache(), |
581 | 0 | (const char *)spooled->digest); |
582 | 0 | if (! md || ! md->body) { |
583 | 0 | return -1; |
584 | 0 | } |
585 | 0 | *body_out = (const uint8_t *)md->body; |
586 | 0 | *size_out = md->bodylen; |
587 | 0 | if (published_out) |
588 | 0 | *published_out = TIME_MAX; |
589 | 0 | return 0; |
590 | 0 | } |
591 | 0 | case DIR_SPOOL_NETWORKSTATUS: |
592 | 0 | case DIR_SPOOL_CONSENSUS_CACHE_ENTRY: |
593 | 0 | default: |
594 | | /* LCOV_EXCL_START */ |
595 | 0 | tor_assert_nonfatal_unreached(); |
596 | 0 | return -1; |
597 | | /* LCOV_EXCL_STOP */ |
598 | 0 | } |
599 | | |
600 | | /* If we get here, then we tried to set "sd" to a signed_descriptor_t. */ |
601 | | |
602 | 0 | if (sd == NULL) { |
603 | 0 | return -1; |
604 | 0 | } |
605 | 0 | if (sd->send_unencrypted == 0 && ! conn_is_encrypted) { |
606 | | /* we did this check once before (so we could have an accurate size |
607 | | * estimate and maybe send a 404 if somebody asked for only bridges on |
608 | | * a connection), but we need to do it again in case a previously |
609 | | * unknown bridge descriptor has shown up between then and now. */ |
610 | 0 | return -1; |
611 | 0 | } |
612 | 0 | *body_out = (const uint8_t *) signed_descriptor_get_body(sd); |
613 | 0 | *size_out = sd->signed_descriptor_len; |
614 | 0 | if (published_out) |
615 | 0 | *published_out = sd->published_on; |
616 | 0 | return 0; |
617 | 0 | } |
618 | | |
619 | | /** Given a fingerprint <b>fp</b> which is either set if we're looking for a |
620 | | * v2 status, or zeroes if we're looking for a v3 status, or a NUL-padded |
621 | | * flavor name if we want a flavored v3 status, return a pointer to the |
622 | | * appropriate cached dir object, or NULL if there isn't one available. */ |
623 | | static cached_dir_t * |
624 | | lookup_cached_dir_by_fp(const uint8_t *fp) |
625 | 0 | { |
626 | 0 | cached_dir_t *d = NULL; |
627 | 0 | if (tor_digest_is_zero((const char *)fp) && cached_consensuses) { |
628 | 0 | d = strmap_get(cached_consensuses, "ns"); |
629 | 0 | } else if (memchr(fp, '\0', DIGEST_LEN) && cached_consensuses) { |
630 | | /* this here interface is a nasty hack: we're shoving a flavor into |
631 | | * a digest field. */ |
632 | 0 | d = strmap_get(cached_consensuses, (const char *)fp); |
633 | 0 | } |
634 | 0 | return d; |
635 | 0 | } |
636 | | |
637 | | /** Try to guess the number of bytes that will be needed to send the |
638 | | * spooled objects for <b>conn</b>'s outgoing spool. In the process, |
639 | | * remove every element of the spool that refers to an absent object, or |
640 | | * which was published earlier than <b>cutoff</b>. Set *<b>size_out</b> |
641 | | * to the number of bytes, and *<b>n_expired_out</b> to the number of |
642 | | * objects removed for being too old. */ |
643 | | void |
644 | | dirserv_spool_remove_missing_and_guess_size(dir_connection_t *conn, |
645 | | time_t cutoff, |
646 | | int compression, |
647 | | size_t *size_out, |
648 | | int *n_expired_out) |
649 | 0 | { |
650 | 0 | if (BUG(!conn)) |
651 | 0 | return; |
652 | | |
653 | 0 | smartlist_t *spool = conn->spool; |
654 | 0 | if (!spool) { |
655 | 0 | if (size_out) |
656 | 0 | *size_out = 0; |
657 | 0 | if (n_expired_out) |
658 | 0 | *n_expired_out = 0; |
659 | 0 | return; |
660 | 0 | } |
661 | 0 | int n_expired = 0; |
662 | 0 | uint64_t total = 0; |
663 | 0 | SMARTLIST_FOREACH_BEGIN(spool, spooled_resource_t *, spooled) { |
664 | 0 | time_t published = TIME_MAX; |
665 | 0 | size_t sz = spooled_resource_estimate_size(spooled, conn, |
666 | 0 | compression, &published); |
667 | 0 | if (published < cutoff) { |
668 | 0 | ++n_expired; |
669 | 0 | SMARTLIST_DEL_CURRENT(spool, spooled); |
670 | 0 | spooled_resource_free(spooled); |
671 | 0 | } else if (sz == 0) { |
672 | 0 | SMARTLIST_DEL_CURRENT(spool, spooled); |
673 | 0 | spooled_resource_free(spooled); |
674 | 0 | } else { |
675 | 0 | total += sz; |
676 | 0 | } |
677 | 0 | } SMARTLIST_FOREACH_END(spooled); |
678 | |
|
679 | 0 | if (size_out) { |
680 | 0 | *size_out = (total > SIZE_MAX) ? SIZE_MAX : (size_t)total; |
681 | 0 | } |
682 | 0 | if (n_expired_out) |
683 | 0 | *n_expired_out = n_expired; |
684 | 0 | } |
685 | | |
686 | | /** Helper: used to sort a connection's spool. */ |
687 | | static int |
688 | | dirserv_spool_sort_comparison_(const void **a_, const void **b_) |
689 | 0 | { |
690 | 0 | const spooled_resource_t *a = *a_; |
691 | 0 | const spooled_resource_t *b = *b_; |
692 | 0 | return fast_memcmp(a->digest, b->digest, sizeof(a->digest)); |
693 | 0 | } |
694 | | |
695 | | /** Sort all the entries in <b>conn</b> by digest. */ |
696 | | void |
697 | | dirserv_spool_sort(dir_connection_t *conn) |
698 | 0 | { |
699 | 0 | if (conn->spool == NULL) |
700 | 0 | return; |
701 | 0 | smartlist_sort(conn->spool, dirserv_spool_sort_comparison_); |
702 | 0 | } |
703 | | |
704 | | /** Return the cache-info for identity fingerprint <b>fp</b>, or |
705 | | * its extra-info document if <b>extrainfo</b> is true. Return |
706 | | * NULL if not found or if the descriptor is older than |
707 | | * <b>publish_cutoff</b>. */ |
708 | | static const signed_descriptor_t * |
709 | | get_signed_descriptor_by_fp(const uint8_t *fp, int extrainfo) |
710 | 0 | { |
711 | 0 | if (router_digest_is_me((const char *)fp)) { |
712 | 0 | if (extrainfo) |
713 | 0 | return &(router_get_my_extrainfo()->cache_info); |
714 | 0 | else |
715 | 0 | return &(router_get_my_routerinfo()->cache_info); |
716 | 0 | } else { |
717 | 0 | const routerinfo_t *ri = router_get_by_id_digest((const char *)fp); |
718 | 0 | if (ri) { |
719 | 0 | if (extrainfo) |
720 | 0 | return extrainfo_get_by_descriptor_digest( |
721 | 0 | ri->cache_info.extra_info_digest); |
722 | 0 | else |
723 | 0 | return &ri->cache_info; |
724 | 0 | } |
725 | 0 | } |
726 | 0 | return NULL; |
727 | 0 | } |
728 | | |
729 | | /** When we're spooling data onto our outbuf, add more whenever we dip |
730 | | * below this threshold. */ |
731 | 0 | #define DIRSERV_BUFFER_MIN 16384 |
732 | | |
733 | | /** |
734 | | * Called whenever we have flushed some directory data in state |
735 | | * SERVER_WRITING, or whenever we want to fill the buffer with initial |
736 | | * directory data (so that subsequent writes will occur, and trigger this |
737 | | * function again.) |
738 | | * |
739 | | * Return 0 on success, and -1 on failure. |
740 | | */ |
741 | | int |
742 | | connection_dirserv_flushed_some(dir_connection_t *conn) |
743 | 0 | { |
744 | 0 | tor_assert(conn->base_.state == DIR_CONN_STATE_SERVER_WRITING); |
745 | 0 | if (conn->spool == NULL) |
746 | 0 | return 0; |
747 | | |
748 | 0 | while (connection_get_outbuf_len(TO_CONN(conn)) < DIRSERV_BUFFER_MIN && |
749 | 0 | smartlist_len(conn->spool)) { |
750 | 0 | spooled_resource_t *spooled = |
751 | 0 | smartlist_get(conn->spool, smartlist_len(conn->spool)-1); |
752 | 0 | spooled_resource_flush_status_t status; |
753 | 0 | status = spooled_resource_flush_some(spooled, conn); |
754 | 0 | if (status == SRFS_ERR) { |
755 | 0 | return -1; |
756 | 0 | } else if (status == SRFS_MORE) { |
757 | 0 | return 0; |
758 | 0 | } |
759 | 0 | tor_assert(status == SRFS_DONE); |
760 | | |
761 | | /* If we're here, we're done flushing this resource. */ |
762 | 0 | tor_assert(smartlist_pop_last(conn->spool) == spooled); |
763 | 0 | spooled_resource_free(spooled); |
764 | 0 | } |
765 | | |
766 | 0 | if (smartlist_len(conn->spool) > 0) { |
767 | | /* We're still spooling something. */ |
768 | 0 | return 0; |
769 | 0 | } |
770 | | |
771 | | /* If we get here, we're done. */ |
772 | 0 | smartlist_free(conn->spool); |
773 | 0 | conn->spool = NULL; |
774 | 0 | if (conn->compress_state) { |
775 | | /* Flush the compression state: there could be more bytes pending in there, |
776 | | * and we don't want to omit bytes. */ |
777 | 0 | connection_buf_add_compress("", 0, conn, 1); |
778 | 0 | tor_compress_free(conn->compress_state); |
779 | 0 | conn->compress_state = NULL; |
780 | 0 | } |
781 | 0 | return 0; |
782 | 0 | } |
783 | | |
784 | | /** Remove every element from <b>conn</b>'s outgoing spool, and delete |
785 | | * the spool. */ |
786 | | void |
787 | | dir_conn_clear_spool(dir_connection_t *conn) |
788 | 0 | { |
789 | 0 | if (!conn || ! conn->spool) |
790 | 0 | return; |
791 | 0 | SMARTLIST_FOREACH(conn->spool, spooled_resource_t *, s, |
792 | 0 | spooled_resource_free(s)); |
793 | 0 | smartlist_free(conn->spool); |
794 | 0 | conn->spool = NULL; |
795 | 0 | } |
796 | | |
797 | | /** Release all storage used by the directory server. */ |
798 | | void |
799 | | dirserv_free_all(void) |
800 | 0 | { |
801 | 0 | strmap_free(cached_consensuses, free_cached_dir_); |
802 | 0 | cached_consensuses = NULL; |
803 | 0 | } |