Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2008 Linus Torvalds |
3 | | */ |
4 | | #include "git-compat-util.h" |
5 | | #include "pathspec.h" |
6 | | #include "dir.h" |
7 | | #include "environment.h" |
8 | | #include "fsmonitor.h" |
9 | | #include "gettext.h" |
10 | | #include "parse.h" |
11 | | #include "preload-index.h" |
12 | | #include "progress.h" |
13 | | #include "read-cache.h" |
14 | | #include "thread-utils.h" |
15 | | #include "repository.h" |
16 | | #include "symlinks.h" |
17 | | #include "trace2.h" |
18 | | |
19 | | /* |
20 | | * Mostly randomly chosen maximum thread counts: we |
21 | | * cap the parallelism to 20 threads, and we want |
22 | | * to have at least 500 lstat's per thread for it to |
23 | | * be worth starting a thread. |
24 | | */ |
25 | 0 | #define MAX_PARALLEL (20) |
26 | 0 | #define THREAD_COST (500) |
27 | | |
28 | | struct progress_data { |
29 | | unsigned long n; |
30 | | struct progress *progress; |
31 | | pthread_mutex_t mutex; |
32 | | }; |
33 | | |
34 | | struct thread_data { |
35 | | pthread_t pthread; |
36 | | struct index_state *index; |
37 | | struct pathspec pathspec; |
38 | | struct progress_data *progress; |
39 | | int offset, nr; |
40 | | int t2_nr_lstat; |
41 | | }; |
42 | | |
43 | | static void *preload_thread(void *_data) |
44 | 0 | { |
45 | 0 | int nr, last_nr; |
46 | 0 | struct thread_data *p = _data; |
47 | 0 | struct index_state *index = p->index; |
48 | 0 | struct cache_entry **cep = index->cache + p->offset; |
49 | 0 | struct cache_def cache = CACHE_DEF_INIT; |
50 | |
|
51 | 0 | nr = p->nr; |
52 | 0 | if (nr + p->offset > index->cache_nr) |
53 | 0 | nr = index->cache_nr - p->offset; |
54 | 0 | last_nr = nr; |
55 | |
|
56 | 0 | do { |
57 | 0 | struct cache_entry *ce = *cep++; |
58 | 0 | struct stat st; |
59 | |
|
60 | 0 | if (ce_stage(ce)) |
61 | 0 | continue; |
62 | 0 | if (S_ISGITLINK(ce->ce_mode)) |
63 | 0 | continue; |
64 | 0 | if (ce_uptodate(ce)) |
65 | 0 | continue; |
66 | 0 | if (ce_skip_worktree(ce)) |
67 | 0 | continue; |
68 | 0 | if (ce->ce_flags & CE_FSMONITOR_VALID) |
69 | 0 | continue; |
70 | 0 | if (p->progress && !(nr & 31)) { |
71 | 0 | struct progress_data *pd = p->progress; |
72 | |
|
73 | 0 | pthread_mutex_lock(&pd->mutex); |
74 | 0 | pd->n += last_nr - nr; |
75 | 0 | display_progress(pd->progress, pd->n); |
76 | 0 | pthread_mutex_unlock(&pd->mutex); |
77 | 0 | last_nr = nr; |
78 | 0 | } |
79 | 0 | if (!ce_path_match(index, ce, &p->pathspec, NULL)) |
80 | 0 | continue; |
81 | 0 | if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce))) |
82 | 0 | continue; |
83 | 0 | p->t2_nr_lstat++; |
84 | 0 | if (lstat(ce->name, &st)) |
85 | 0 | continue; |
86 | 0 | if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY|CE_MATCH_IGNORE_FSMONITOR)) |
87 | 0 | continue; |
88 | 0 | ce_mark_uptodate(ce); |
89 | 0 | mark_fsmonitor_valid(index, ce); |
90 | 0 | } while (--nr > 0); |
91 | 0 | if (p->progress) { |
92 | 0 | struct progress_data *pd = p->progress; |
93 | |
|
94 | 0 | pthread_mutex_lock(&pd->mutex); |
95 | 0 | display_progress(pd->progress, pd->n + last_nr); |
96 | 0 | pthread_mutex_unlock(&pd->mutex); |
97 | 0 | } |
98 | 0 | cache_def_clear(&cache); |
99 | 0 | return NULL; |
100 | 0 | } |
101 | | |
102 | | void preload_index(struct index_state *index, |
103 | | const struct pathspec *pathspec, |
104 | | unsigned int refresh_flags) |
105 | 0 | { |
106 | 0 | int threads, i, work, offset; |
107 | 0 | struct thread_data data[MAX_PARALLEL]; |
108 | 0 | struct progress_data pd; |
109 | 0 | int t2_sum_lstat = 0; |
110 | |
|
111 | 0 | if (!HAVE_THREADS || !core_preload_index) |
112 | 0 | return; |
113 | | |
114 | 0 | threads = index->cache_nr / THREAD_COST; |
115 | 0 | if ((index->cache_nr > 1) && (threads < 2) && git_env_bool("GIT_TEST_PRELOAD_INDEX", 0)) |
116 | 0 | threads = 2; |
117 | 0 | if (threads < 2) |
118 | 0 | return; |
119 | | |
120 | 0 | trace2_region_enter("index", "preload", NULL); |
121 | |
|
122 | 0 | trace_performance_enter(); |
123 | 0 | if (threads > MAX_PARALLEL) |
124 | 0 | threads = MAX_PARALLEL; |
125 | 0 | offset = 0; |
126 | 0 | work = DIV_ROUND_UP(index->cache_nr, threads); |
127 | 0 | memset(&data, 0, sizeof(data)); |
128 | |
|
129 | 0 | memset(&pd, 0, sizeof(pd)); |
130 | 0 | if (refresh_flags & REFRESH_PROGRESS && isatty(2)) { |
131 | 0 | pd.progress = start_delayed_progress(_("Refreshing index"), index->cache_nr); |
132 | 0 | pthread_mutex_init(&pd.mutex, NULL); |
133 | 0 | } |
134 | |
|
135 | 0 | for (i = 0; i < threads; i++) { |
136 | 0 | struct thread_data *p = data+i; |
137 | 0 | int err; |
138 | |
|
139 | 0 | p->index = index; |
140 | 0 | if (pathspec) |
141 | 0 | copy_pathspec(&p->pathspec, pathspec); |
142 | 0 | p->offset = offset; |
143 | 0 | p->nr = work; |
144 | 0 | if (pd.progress) |
145 | 0 | p->progress = &pd; |
146 | 0 | offset += work; |
147 | 0 | err = pthread_create(&p->pthread, NULL, preload_thread, p); |
148 | |
|
149 | 0 | if (err) |
150 | 0 | die(_("unable to create threaded lstat: %s"), strerror(err)); |
151 | 0 | } |
152 | 0 | for (i = 0; i < threads; i++) { |
153 | 0 | struct thread_data *p = data+i; |
154 | 0 | if (pthread_join(p->pthread, NULL)) |
155 | 0 | die("unable to join threaded lstat"); |
156 | 0 | t2_sum_lstat += p->t2_nr_lstat; |
157 | 0 | } |
158 | 0 | stop_progress(&pd.progress); |
159 | |
|
160 | 0 | if (pathspec) { |
161 | | /* earlier we made deep copies for each thread to work with */ |
162 | 0 | for (i = 0; i < threads; i++) |
163 | 0 | clear_pathspec(&data[i].pathspec); |
164 | 0 | } |
165 | |
|
166 | 0 | trace_performance_leave("preload index"); |
167 | |
|
168 | 0 | trace2_data_intmax("index", NULL, "preload/sum_lstat", t2_sum_lstat); |
169 | 0 | trace2_region_leave("index", "preload", NULL); |
170 | 0 | } |
171 | | |
172 | | int repo_read_index_preload(struct repository *repo, |
173 | | const struct pathspec *pathspec, |
174 | | unsigned int refresh_flags) |
175 | 0 | { |
176 | 0 | int retval = repo_read_index(repo); |
177 | |
|
178 | 0 | preload_index(repo->index, pathspec, refresh_flags); |
179 | 0 | return retval; |
180 | 0 | } |