Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * name-hash.c |
3 | | * |
4 | | * Hashing names in the index state |
5 | | * |
6 | | * Copyright (C) 2008 Linus Torvalds |
7 | | */ |
8 | | #include "cache.h" |
9 | | #include "thread-utils.h" |
10 | | #include "trace2.h" |
11 | | #include "sparse-index.h" |
12 | | |
13 | | struct dir_entry { |
14 | | struct hashmap_entry ent; |
15 | | struct dir_entry *parent; |
16 | | int nr; |
17 | | unsigned int namelen; |
18 | | char name[FLEX_ARRAY]; |
19 | | }; |
20 | | |
21 | | static int dir_entry_cmp(const void *cmp_data UNUSED, |
22 | | const struct hashmap_entry *eptr, |
23 | | const struct hashmap_entry *entry_or_key, |
24 | | const void *keydata) |
25 | 0 | { |
26 | 0 | const struct dir_entry *e1, *e2; |
27 | 0 | const char *name = keydata; |
28 | |
|
29 | 0 | e1 = container_of(eptr, const struct dir_entry, ent); |
30 | 0 | e2 = container_of(entry_or_key, const struct dir_entry, ent); |
31 | |
|
32 | 0 | return e1->namelen != e2->namelen || strncasecmp(e1->name, |
33 | 0 | name ? name : e2->name, e1->namelen); |
34 | 0 | } |
35 | | |
36 | | static struct dir_entry *find_dir_entry__hash(struct index_state *istate, |
37 | | const char *name, unsigned int namelen, unsigned int hash) |
38 | 0 | { |
39 | 0 | struct dir_entry key; |
40 | 0 | hashmap_entry_init(&key.ent, hash); |
41 | 0 | key.namelen = namelen; |
42 | 0 | return hashmap_get_entry(&istate->dir_hash, &key, ent, name); |
43 | 0 | } |
44 | | |
45 | | static struct dir_entry *find_dir_entry(struct index_state *istate, |
46 | | const char *name, unsigned int namelen) |
47 | 0 | { |
48 | 0 | return find_dir_entry__hash(istate, name, namelen, memihash(name, namelen)); |
49 | 0 | } |
50 | | |
51 | | static struct dir_entry *hash_dir_entry(struct index_state *istate, |
52 | | struct cache_entry *ce, int namelen) |
53 | 0 | { |
54 | | /* |
55 | | * Throw each directory component in the hash for quick lookup |
56 | | * during a git status. Directory components are stored without their |
57 | | * closing slash. Despite submodules being a directory, they never |
58 | | * reach this point, because they are stored |
59 | | * in index_state.name_hash (as ordinary cache_entries). |
60 | | */ |
61 | 0 | struct dir_entry *dir; |
62 | | |
63 | | /* get length of parent directory */ |
64 | 0 | while (namelen > 0 && !is_dir_sep(ce->name[namelen - 1])) |
65 | 0 | namelen--; |
66 | 0 | if (namelen <= 0) |
67 | 0 | return NULL; |
68 | 0 | namelen--; |
69 | | |
70 | | /* lookup existing entry for that directory */ |
71 | 0 | dir = find_dir_entry(istate, ce->name, namelen); |
72 | 0 | if (!dir) { |
73 | | /* not found, create it and add to hash table */ |
74 | 0 | FLEX_ALLOC_MEM(dir, name, ce->name, namelen); |
75 | 0 | hashmap_entry_init(&dir->ent, memihash(ce->name, namelen)); |
76 | 0 | dir->namelen = namelen; |
77 | 0 | hashmap_add(&istate->dir_hash, &dir->ent); |
78 | | |
79 | | /* recursively add missing parent directories */ |
80 | 0 | dir->parent = hash_dir_entry(istate, ce, namelen); |
81 | 0 | } |
82 | 0 | return dir; |
83 | 0 | } |
84 | | |
85 | | static void add_dir_entry(struct index_state *istate, struct cache_entry *ce) |
86 | 0 | { |
87 | | /* Add reference to the directory entry (and parents if 0). */ |
88 | 0 | struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce)); |
89 | 0 | while (dir && !(dir->nr++)) |
90 | 0 | dir = dir->parent; |
91 | 0 | } |
92 | | |
93 | | static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce) |
94 | 0 | { |
95 | | /* |
96 | | * Release reference to the directory entry. If 0, remove and continue |
97 | | * with parent directory. |
98 | | */ |
99 | 0 | struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce)); |
100 | 0 | while (dir && !(--dir->nr)) { |
101 | 0 | struct dir_entry *parent = dir->parent; |
102 | 0 | hashmap_remove(&istate->dir_hash, &dir->ent, NULL); |
103 | 0 | free(dir); |
104 | 0 | dir = parent; |
105 | 0 | } |
106 | 0 | } |
107 | | |
108 | | static void hash_index_entry(struct index_state *istate, struct cache_entry *ce) |
109 | 11.3k | { |
110 | 11.3k | if (ce->ce_flags & CE_HASHED) |
111 | 0 | return; |
112 | 11.3k | ce->ce_flags |= CE_HASHED; |
113 | | |
114 | 11.3k | if (!S_ISSPARSEDIR(ce->ce_mode)) { |
115 | 11.3k | hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce))); |
116 | 11.3k | hashmap_add(&istate->name_hash, &ce->ent); |
117 | 11.3k | } |
118 | | |
119 | 11.3k | if (ignore_case) |
120 | 0 | add_dir_entry(istate, ce); |
121 | 11.3k | } |
122 | | |
123 | | static int cache_entry_cmp(const void *cmp_data UNUSED, |
124 | | const struct hashmap_entry *eptr, |
125 | | const struct hashmap_entry *entry_or_key, |
126 | | const void *remove) |
127 | 31.1k | { |
128 | 31.1k | const struct cache_entry *ce1, *ce2; |
129 | | |
130 | 31.1k | ce1 = container_of(eptr, const struct cache_entry, ent); |
131 | 31.1k | ce2 = container_of(entry_or_key, const struct cache_entry, ent); |
132 | | |
133 | | /* |
134 | | * For remove_name_hash, find the exact entry (pointer equality); for |
135 | | * index_file_exists, find all entries with matching hash code and |
136 | | * decide whether the entry matches in same_name. |
137 | | */ |
138 | 31.1k | return remove ? !(ce1 == ce2) : 0; |
139 | 31.1k | } |
140 | | |
141 | | static int lazy_try_threaded = 1; |
142 | | static int lazy_nr_dir_threads; |
143 | | |
144 | | /* |
145 | | * Set a minimum number of cache_entries that we will handle per |
146 | | * thread and use that to decide how many threads to run (up to |
147 | | * the number on the system). |
148 | | * |
149 | | * For guidance setting the lower per-thread bound, see: |
150 | | * t/helper/test-lazy-init-name-hash --analyze |
151 | | */ |
152 | 0 | #define LAZY_THREAD_COST (2000) |
153 | | |
154 | | /* |
155 | | * We use n mutexes to guard n partitions of the "istate->dir_hash" |
156 | | * hashtable. Since "find" and "insert" operations will hash to a |
157 | | * particular bucket and modify/search a single chain, we can say |
158 | | * that "all chains mod n" are guarded by the same mutex -- rather |
159 | | * than having a single mutex to guard the entire table. (This does |
160 | | * require that we disable "rehashing" on the hashtable.) |
161 | | * |
162 | | * So, a larger value here decreases the probability of a collision |
163 | | * and the time that each thread must wait for the mutex. |
164 | | */ |
165 | 0 | #define LAZY_MAX_MUTEX (32) |
166 | | |
167 | | static pthread_mutex_t *lazy_dir_mutex_array; |
168 | | |
169 | | /* |
170 | | * An array of lazy_entry items is used by the n threads in |
171 | | * the directory parse (first) phase to (lock-free) store the |
172 | | * intermediate results. These values are then referenced by |
173 | | * the 2 threads in the second phase. |
174 | | */ |
175 | | struct lazy_entry { |
176 | | struct dir_entry *dir; |
177 | | unsigned int hash_dir; |
178 | | unsigned int hash_name; |
179 | | }; |
180 | | |
181 | | /* |
182 | | * Decide if we want to use threads (if available) to load |
183 | | * the hash tables. We set "lazy_nr_dir_threads" to zero when |
184 | | * it is not worth it. |
185 | | */ |
186 | | static int lookup_lazy_params(struct index_state *istate) |
187 | 1.28k | { |
188 | 1.28k | int nr_cpus; |
189 | | |
190 | 1.28k | lazy_nr_dir_threads = 0; |
191 | | |
192 | 1.28k | if (!lazy_try_threaded) |
193 | 0 | return 0; |
194 | | |
195 | | /* |
196 | | * If we are respecting case, just use the original |
197 | | * code to build the "istate->name_hash". We don't |
198 | | * need the complexity here. |
199 | | */ |
200 | 1.28k | if (!ignore_case) |
201 | 1.28k | return 0; |
202 | | |
203 | 0 | nr_cpus = online_cpus(); |
204 | 0 | if (nr_cpus < 2) |
205 | 0 | return 0; |
206 | | |
207 | 0 | if (istate->cache_nr < 2 * LAZY_THREAD_COST) |
208 | 0 | return 0; |
209 | | |
210 | 0 | if (istate->cache_nr < nr_cpus * LAZY_THREAD_COST) |
211 | 0 | nr_cpus = istate->cache_nr / LAZY_THREAD_COST; |
212 | 0 | lazy_nr_dir_threads = nr_cpus; |
213 | 0 | return lazy_nr_dir_threads; |
214 | 0 | } |
215 | | |
216 | | /* |
217 | | * Initialize n mutexes for use when searching and inserting |
218 | | * into "istate->dir_hash". All "dir" threads are trying |
219 | | * to insert partial pathnames into the hash as they iterate |
220 | | * over their portions of the index, so lock contention is |
221 | | * high. |
222 | | * |
223 | | * However, the hashmap is going to put items into bucket |
224 | | * chains based on their hash values. Use that to create n |
225 | | * mutexes and lock on mutex[bucket(hash) % n]. This will |
226 | | * decrease the collision rate by (hopefully) a factor of n. |
227 | | */ |
228 | | static void init_dir_mutex(void) |
229 | 0 | { |
230 | 0 | int j; |
231 | |
|
232 | 0 | CALLOC_ARRAY(lazy_dir_mutex_array, LAZY_MAX_MUTEX); |
233 | |
|
234 | 0 | for (j = 0; j < LAZY_MAX_MUTEX; j++) |
235 | 0 | init_recursive_mutex(&lazy_dir_mutex_array[j]); |
236 | 0 | } |
237 | | |
238 | | static void cleanup_dir_mutex(void) |
239 | 0 | { |
240 | 0 | int j; |
241 | |
|
242 | 0 | for (j = 0; j < LAZY_MAX_MUTEX; j++) |
243 | 0 | pthread_mutex_destroy(&lazy_dir_mutex_array[j]); |
244 | |
|
245 | 0 | free(lazy_dir_mutex_array); |
246 | 0 | } |
247 | | |
248 | | static void lock_dir_mutex(int j) |
249 | 0 | { |
250 | 0 | pthread_mutex_lock(&lazy_dir_mutex_array[j]); |
251 | 0 | } |
252 | | |
253 | | static void unlock_dir_mutex(int j) |
254 | 0 | { |
255 | 0 | pthread_mutex_unlock(&lazy_dir_mutex_array[j]); |
256 | 0 | } |
257 | | |
258 | | static inline int compute_dir_lock_nr( |
259 | | const struct hashmap *map, |
260 | | unsigned int hash) |
261 | 0 | { |
262 | 0 | return hashmap_bucket(map, hash) % LAZY_MAX_MUTEX; |
263 | 0 | } |
264 | | |
265 | | static struct dir_entry *hash_dir_entry_with_parent_and_prefix( |
266 | | struct index_state *istate, |
267 | | struct dir_entry *parent, |
268 | | struct strbuf *prefix) |
269 | 0 | { |
270 | 0 | struct dir_entry *dir; |
271 | 0 | unsigned int hash; |
272 | 0 | int lock_nr; |
273 | | |
274 | | /* |
275 | | * Either we have a parent directory and path with slash(es) |
276 | | * or the directory is an immediate child of the root directory. |
277 | | */ |
278 | 0 | assert((parent != NULL) ^ (strchr(prefix->buf, '/') == NULL)); |
279 | | |
280 | 0 | if (parent) |
281 | 0 | hash = memihash_cont(parent->ent.hash, |
282 | 0 | prefix->buf + parent->namelen, |
283 | 0 | prefix->len - parent->namelen); |
284 | 0 | else |
285 | 0 | hash = memihash(prefix->buf, prefix->len); |
286 | |
|
287 | 0 | lock_nr = compute_dir_lock_nr(&istate->dir_hash, hash); |
288 | 0 | lock_dir_mutex(lock_nr); |
289 | |
|
290 | 0 | dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash); |
291 | 0 | if (!dir) { |
292 | 0 | FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len); |
293 | 0 | hashmap_entry_init(&dir->ent, hash); |
294 | 0 | dir->namelen = prefix->len; |
295 | 0 | dir->parent = parent; |
296 | 0 | hashmap_add(&istate->dir_hash, &dir->ent); |
297 | |
|
298 | 0 | if (parent) { |
299 | 0 | unlock_dir_mutex(lock_nr); |
300 | | |
301 | | /* All I really need here is an InterlockedIncrement(&(parent->nr)) */ |
302 | 0 | lock_nr = compute_dir_lock_nr(&istate->dir_hash, parent->ent.hash); |
303 | 0 | lock_dir_mutex(lock_nr); |
304 | 0 | parent->nr++; |
305 | 0 | } |
306 | 0 | } |
307 | |
|
308 | 0 | unlock_dir_mutex(lock_nr); |
309 | |
|
310 | 0 | return dir; |
311 | 0 | } |
312 | | |
313 | | /* |
314 | | * handle_range_1() and handle_range_dir() are derived from |
315 | | * clear_ce_flags_1() and clear_ce_flags_dir() in unpack-trees.c |
316 | | * and handle the iteration over the entire array of index entries. |
317 | | * They use recursion for adjacent entries in the same parent |
318 | | * directory. |
319 | | */ |
320 | | static int handle_range_1( |
321 | | struct index_state *istate, |
322 | | int k_start, |
323 | | int k_end, |
324 | | struct dir_entry *parent, |
325 | | struct strbuf *prefix, |
326 | | struct lazy_entry *lazy_entries); |
327 | | |
328 | | static int handle_range_dir( |
329 | | struct index_state *istate, |
330 | | int k_start, |
331 | | int k_end, |
332 | | struct dir_entry *parent, |
333 | | struct strbuf *prefix, |
334 | | struct lazy_entry *lazy_entries, |
335 | | struct dir_entry **dir_new_out) |
336 | 0 | { |
337 | 0 | int rc, k; |
338 | 0 | int input_prefix_len = prefix->len; |
339 | 0 | struct dir_entry *dir_new; |
340 | |
|
341 | 0 | dir_new = hash_dir_entry_with_parent_and_prefix(istate, parent, prefix); |
342 | |
|
343 | 0 | strbuf_addch(prefix, '/'); |
344 | | |
345 | | /* |
346 | | * Scan forward in the index array for index entries having the same |
347 | | * path prefix (that are also in this directory). |
348 | | */ |
349 | 0 | if (k_start + 1 >= k_end) |
350 | 0 | k = k_end; |
351 | 0 | else if (strncmp(istate->cache[k_start + 1]->name, prefix->buf, prefix->len) > 0) |
352 | 0 | k = k_start + 1; |
353 | 0 | else if (strncmp(istate->cache[k_end - 1]->name, prefix->buf, prefix->len) == 0) |
354 | 0 | k = k_end; |
355 | 0 | else { |
356 | 0 | int begin = k_start; |
357 | 0 | int end = k_end; |
358 | 0 | assert(begin >= 0); |
359 | 0 | while (begin < end) { |
360 | 0 | int mid = begin + ((end - begin) >> 1); |
361 | 0 | int cmp = strncmp(istate->cache[mid]->name, prefix->buf, prefix->len); |
362 | 0 | if (cmp == 0) /* mid has same prefix; look in second part */ |
363 | 0 | begin = mid + 1; |
364 | 0 | else if (cmp > 0) /* mid is past group; look in first part */ |
365 | 0 | end = mid; |
366 | 0 | else |
367 | 0 | die("cache entry out of order"); |
368 | 0 | } |
369 | 0 | k = begin; |
370 | 0 | } |
371 | | |
372 | | /* |
373 | | * Recurse and process what we can of this subset [k_start, k). |
374 | | */ |
375 | 0 | rc = handle_range_1(istate, k_start, k, dir_new, prefix, lazy_entries); |
376 | |
|
377 | 0 | strbuf_setlen(prefix, input_prefix_len); |
378 | |
|
379 | 0 | *dir_new_out = dir_new; |
380 | 0 | return rc; |
381 | 0 | } |
382 | | |
383 | | static int handle_range_1( |
384 | | struct index_state *istate, |
385 | | int k_start, |
386 | | int k_end, |
387 | | struct dir_entry *parent, |
388 | | struct strbuf *prefix, |
389 | | struct lazy_entry *lazy_entries) |
390 | 0 | { |
391 | 0 | int input_prefix_len = prefix->len; |
392 | 0 | int k = k_start; |
393 | |
|
394 | 0 | while (k < k_end) { |
395 | 0 | struct cache_entry *ce_k = istate->cache[k]; |
396 | 0 | const char *name, *slash; |
397 | |
|
398 | 0 | if (prefix->len && strncmp(ce_k->name, prefix->buf, prefix->len)) |
399 | 0 | break; |
400 | | |
401 | 0 | name = ce_k->name + prefix->len; |
402 | 0 | slash = strchr(name, '/'); |
403 | |
|
404 | 0 | if (slash) { |
405 | 0 | int len = slash - name; |
406 | 0 | int processed; |
407 | 0 | struct dir_entry *dir_new; |
408 | |
|
409 | 0 | strbuf_add(prefix, name, len); |
410 | 0 | processed = handle_range_dir(istate, k, k_end, parent, prefix, lazy_entries, &dir_new); |
411 | 0 | if (processed) { |
412 | 0 | k += processed; |
413 | 0 | strbuf_setlen(prefix, input_prefix_len); |
414 | 0 | continue; |
415 | 0 | } |
416 | | |
417 | 0 | strbuf_addch(prefix, '/'); |
418 | 0 | processed = handle_range_1(istate, k, k_end, dir_new, prefix, lazy_entries); |
419 | 0 | k += processed; |
420 | 0 | strbuf_setlen(prefix, input_prefix_len); |
421 | 0 | continue; |
422 | 0 | } |
423 | | |
424 | | /* |
425 | | * It is too expensive to take a lock to insert "ce_k" |
426 | | * into "istate->name_hash" and increment the ref-count |
427 | | * on the "parent" dir. So we defer actually updating |
428 | | * permanent data structures until phase 2 (where we |
429 | | * can change the locking requirements) and simply |
430 | | * accumulate our current results into the lazy_entries |
431 | | * data array). |
432 | | * |
433 | | * We do not need to lock the lazy_entries array because |
434 | | * we have exclusive access to the cells in the range |
435 | | * [k_start,k_end) that this thread was given. |
436 | | */ |
437 | 0 | lazy_entries[k].dir = parent; |
438 | 0 | if (parent) { |
439 | 0 | lazy_entries[k].hash_name = memihash_cont( |
440 | 0 | parent->ent.hash, |
441 | 0 | ce_k->name + parent->namelen, |
442 | 0 | ce_namelen(ce_k) - parent->namelen); |
443 | 0 | lazy_entries[k].hash_dir = parent->ent.hash; |
444 | 0 | } else { |
445 | 0 | lazy_entries[k].hash_name = memihash(ce_k->name, ce_namelen(ce_k)); |
446 | 0 | } |
447 | |
|
448 | 0 | k++; |
449 | 0 | } |
450 | |
|
451 | 0 | return k - k_start; |
452 | 0 | } |
453 | | |
454 | | struct lazy_dir_thread_data { |
455 | | pthread_t pthread; |
456 | | struct index_state *istate; |
457 | | struct lazy_entry *lazy_entries; |
458 | | int k_start; |
459 | | int k_end; |
460 | | }; |
461 | | |
462 | | static void *lazy_dir_thread_proc(void *_data) |
463 | 0 | { |
464 | 0 | struct lazy_dir_thread_data *d = _data; |
465 | 0 | struct strbuf prefix = STRBUF_INIT; |
466 | 0 | handle_range_1(d->istate, d->k_start, d->k_end, NULL, &prefix, d->lazy_entries); |
467 | 0 | strbuf_release(&prefix); |
468 | 0 | return NULL; |
469 | 0 | } |
470 | | |
471 | | struct lazy_name_thread_data { |
472 | | pthread_t pthread; |
473 | | struct index_state *istate; |
474 | | struct lazy_entry *lazy_entries; |
475 | | }; |
476 | | |
477 | | static void *lazy_name_thread_proc(void *_data) |
478 | 0 | { |
479 | 0 | struct lazy_name_thread_data *d = _data; |
480 | 0 | int k; |
481 | |
|
482 | 0 | for (k = 0; k < d->istate->cache_nr; k++) { |
483 | 0 | struct cache_entry *ce_k = d->istate->cache[k]; |
484 | 0 | ce_k->ce_flags |= CE_HASHED; |
485 | 0 | hashmap_entry_init(&ce_k->ent, d->lazy_entries[k].hash_name); |
486 | 0 | hashmap_add(&d->istate->name_hash, &ce_k->ent); |
487 | 0 | } |
488 | |
|
489 | 0 | return NULL; |
490 | 0 | } |
491 | | |
492 | | static inline void lazy_update_dir_ref_counts( |
493 | | struct index_state *istate, |
494 | | struct lazy_entry *lazy_entries) |
495 | 0 | { |
496 | 0 | int k; |
497 | |
|
498 | 0 | for (k = 0; k < istate->cache_nr; k++) { |
499 | 0 | if (lazy_entries[k].dir) |
500 | 0 | lazy_entries[k].dir->nr++; |
501 | 0 | } |
502 | 0 | } |
503 | | |
504 | | static void threaded_lazy_init_name_hash( |
505 | | struct index_state *istate) |
506 | 0 | { |
507 | 0 | int err; |
508 | 0 | int nr_each; |
509 | 0 | int k_start; |
510 | 0 | int t; |
511 | 0 | struct lazy_entry *lazy_entries; |
512 | 0 | struct lazy_dir_thread_data *td_dir; |
513 | 0 | struct lazy_name_thread_data *td_name; |
514 | |
|
515 | 0 | if (!HAVE_THREADS) |
516 | 0 | return; |
517 | | |
518 | 0 | k_start = 0; |
519 | 0 | nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads); |
520 | |
|
521 | 0 | CALLOC_ARRAY(lazy_entries, istate->cache_nr); |
522 | 0 | CALLOC_ARRAY(td_dir, lazy_nr_dir_threads); |
523 | 0 | CALLOC_ARRAY(td_name, 1); |
524 | |
|
525 | 0 | init_dir_mutex(); |
526 | | |
527 | | /* |
528 | | * Phase 1: |
529 | | * Build "istate->dir_hash" using n "dir" threads (and a read-only index). |
530 | | */ |
531 | 0 | for (t = 0; t < lazy_nr_dir_threads; t++) { |
532 | 0 | struct lazy_dir_thread_data *td_dir_t = td_dir + t; |
533 | 0 | td_dir_t->istate = istate; |
534 | 0 | td_dir_t->lazy_entries = lazy_entries; |
535 | 0 | td_dir_t->k_start = k_start; |
536 | 0 | k_start += nr_each; |
537 | 0 | if (k_start > istate->cache_nr) |
538 | 0 | k_start = istate->cache_nr; |
539 | 0 | td_dir_t->k_end = k_start; |
540 | 0 | err = pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t); |
541 | 0 | if (err) |
542 | 0 | die(_("unable to create lazy_dir thread: %s"), strerror(err)); |
543 | 0 | } |
544 | 0 | for (t = 0; t < lazy_nr_dir_threads; t++) { |
545 | 0 | struct lazy_dir_thread_data *td_dir_t = td_dir + t; |
546 | 0 | if (pthread_join(td_dir_t->pthread, NULL)) |
547 | 0 | die("unable to join lazy_dir_thread"); |
548 | 0 | } |
549 | | |
550 | | /* |
551 | | * Phase 2: |
552 | | * Iterate over all index entries and add them to the "istate->name_hash" |
553 | | * using a single "name" background thread. |
554 | | * (Testing showed it wasn't worth running more than 1 thread for this.) |
555 | | * |
556 | | * Meanwhile, finish updating the parent directory ref-counts for each |
557 | | * index entry using the current thread. (This step is very fast and |
558 | | * doesn't need threading.) |
559 | | */ |
560 | 0 | td_name->istate = istate; |
561 | 0 | td_name->lazy_entries = lazy_entries; |
562 | 0 | err = pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name); |
563 | 0 | if (err) |
564 | 0 | die(_("unable to create lazy_name thread: %s"), strerror(err)); |
565 | | |
566 | 0 | lazy_update_dir_ref_counts(istate, lazy_entries); |
567 | |
|
568 | 0 | err = pthread_join(td_name->pthread, NULL); |
569 | 0 | if (err) |
570 | 0 | die(_("unable to join lazy_name thread: %s"), strerror(err)); |
571 | | |
572 | 0 | cleanup_dir_mutex(); |
573 | |
|
574 | 0 | free(td_name); |
575 | 0 | free(td_dir); |
576 | 0 | free(lazy_entries); |
577 | 0 | } |
578 | | |
579 | | static void lazy_init_name_hash(struct index_state *istate) |
580 | 62.5k | { |
581 | | |
582 | 62.5k | if (istate->name_hash_initialized) |
583 | 61.2k | return; |
584 | 1.28k | trace_performance_enter(); |
585 | 1.28k | trace2_region_enter("index", "name-hash-init", istate->repo); |
586 | 1.28k | hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr); |
587 | 1.28k | hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr); |
588 | | |
589 | 1.28k | if (lookup_lazy_params(istate)) { |
590 | | /* |
591 | | * Disable item counting and automatic rehashing because |
592 | | * we do per-chain (mod n) locking rather than whole hashmap |
593 | | * locking and we need to prevent the table-size from changing |
594 | | * and bucket items from being redistributed. |
595 | | */ |
596 | 0 | hashmap_disable_item_counting(&istate->dir_hash); |
597 | 0 | threaded_lazy_init_name_hash(istate); |
598 | 0 | hashmap_enable_item_counting(&istate->dir_hash); |
599 | 1.28k | } else { |
600 | 1.28k | int nr; |
601 | 1.28k | for (nr = 0; nr < istate->cache_nr; nr++) |
602 | 0 | hash_index_entry(istate, istate->cache[nr]); |
603 | 1.28k | } |
604 | | |
605 | 1.28k | istate->name_hash_initialized = 1; |
606 | 1.28k | trace2_region_leave("index", "name-hash-init", istate->repo); |
607 | 1.28k | trace_performance_leave("initialize name hash"); |
608 | 1.28k | } |
609 | | |
610 | | /* |
611 | | * A test routine for t/helper/ sources. |
612 | | * |
613 | | * Returns the number of threads used or 0 when |
614 | | * the non-threaded code path was used. |
615 | | * |
616 | | * Requesting threading WILL NOT override guards |
617 | | * in lookup_lazy_params(). |
618 | | */ |
619 | | int test_lazy_init_name_hash(struct index_state *istate, int try_threaded) |
620 | 0 | { |
621 | 0 | lazy_nr_dir_threads = 0; |
622 | 0 | lazy_try_threaded = try_threaded; |
623 | |
|
624 | 0 | lazy_init_name_hash(istate); |
625 | |
|
626 | 0 | return lazy_nr_dir_threads; |
627 | 0 | } |
628 | | |
629 | | void add_name_hash(struct index_state *istate, struct cache_entry *ce) |
630 | 11.3k | { |
631 | 11.3k | if (istate->name_hash_initialized) |
632 | 11.3k | hash_index_entry(istate, ce); |
633 | 11.3k | } |
634 | | |
635 | | void remove_name_hash(struct index_state *istate, struct cache_entry *ce) |
636 | 0 | { |
637 | 0 | if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED)) |
638 | 0 | return; |
639 | 0 | ce->ce_flags &= ~CE_HASHED; |
640 | 0 | hashmap_remove(&istate->name_hash, &ce->ent, ce); |
641 | |
|
642 | 0 | if (ignore_case) |
643 | 0 | remove_dir_entry(istate, ce); |
644 | 0 | } |
645 | | |
646 | | static int slow_same_name(const char *name1, int len1, const char *name2, int len2) |
647 | 0 | { |
648 | 0 | if (len1 != len2) |
649 | 0 | return 0; |
650 | | |
651 | 0 | while (len1) { |
652 | 0 | unsigned char c1 = *name1++; |
653 | 0 | unsigned char c2 = *name2++; |
654 | 0 | len1--; |
655 | 0 | if (c1 != c2) { |
656 | 0 | c1 = toupper(c1); |
657 | 0 | c2 = toupper(c2); |
658 | 0 | if (c1 != c2) |
659 | 0 | return 0; |
660 | 0 | } |
661 | 0 | } |
662 | 0 | return 1; |
663 | 0 | } |
664 | | |
665 | | static int same_name(const struct cache_entry *ce, const char *name, int namelen, int icase) |
666 | 31.1k | { |
667 | 31.1k | int len = ce_namelen(ce); |
668 | | |
669 | | /* |
670 | | * Always do exact compare, even if we want a case-ignoring comparison; |
671 | | * we do the quick exact one first, because it will be the common case. |
672 | | */ |
673 | 31.1k | if (len == namelen && !memcmp(name, ce->name, len)) |
674 | 31.1k | return 1; |
675 | | |
676 | 0 | if (!icase) |
677 | 0 | return 0; |
678 | | |
679 | 0 | return slow_same_name(name, namelen, ce->name, len); |
680 | 0 | } |
681 | | |
682 | | int index_dir_exists(struct index_state *istate, const char *name, int namelen) |
683 | 0 | { |
684 | 0 | struct dir_entry *dir; |
685 | |
|
686 | 0 | lazy_init_name_hash(istate); |
687 | 0 | expand_to_path(istate, name, namelen, 0); |
688 | 0 | dir = find_dir_entry(istate, name, namelen); |
689 | 0 | return dir && dir->nr; |
690 | 0 | } |
691 | | |
692 | | void adjust_dirname_case(struct index_state *istate, char *name) |
693 | 0 | { |
694 | 0 | const char *startPtr = name; |
695 | 0 | const char *ptr = startPtr; |
696 | |
|
697 | 0 | lazy_init_name_hash(istate); |
698 | 0 | expand_to_path(istate, name, strlen(name), 0); |
699 | 0 | while (*ptr) { |
700 | 0 | while (*ptr && *ptr != '/') |
701 | 0 | ptr++; |
702 | |
|
703 | 0 | if (*ptr == '/') { |
704 | 0 | struct dir_entry *dir; |
705 | |
|
706 | 0 | dir = find_dir_entry(istate, name, ptr - name); |
707 | 0 | if (dir) { |
708 | 0 | memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr); |
709 | 0 | startPtr = ptr + 1; |
710 | 0 | } |
711 | 0 | ptr++; |
712 | 0 | } |
713 | 0 | } |
714 | 0 | } |
715 | | |
716 | | struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase) |
717 | 62.5k | { |
718 | 62.5k | struct cache_entry *ce; |
719 | 62.5k | unsigned int hash = memihash(name, namelen); |
720 | | |
721 | 62.5k | lazy_init_name_hash(istate); |
722 | 62.5k | expand_to_path(istate, name, namelen, icase); |
723 | | |
724 | 62.5k | ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL, |
725 | 62.5k | struct cache_entry, ent); |
726 | 62.5k | hashmap_for_each_entry_from(&istate->name_hash, ce, ent) { |
727 | 31.1k | if (same_name(ce, name, namelen, icase)) |
728 | 31.1k | return ce; |
729 | 31.1k | } |
730 | 31.3k | return NULL; |
731 | 62.5k | } |
732 | | |
733 | | void free_name_hash(struct index_state *istate) |
734 | 23.3k | { |
735 | 23.3k | if (!istate->name_hash_initialized) |
736 | 22.0k | return; |
737 | 1.28k | istate->name_hash_initialized = 0; |
738 | | |
739 | 1.28k | hashmap_clear(&istate->name_hash); |
740 | 1.28k | hashmap_clear_and_free(&istate->dir_hash, struct dir_entry, ent); |
741 | 1.28k | } |