Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * GIT - The information manager from hell |
3 | | * |
4 | | * Copyright (C) Linus Torvalds, 2005 |
5 | | */ |
6 | | #include "cache.h" |
7 | | #include "config.h" |
8 | | #include "diff.h" |
9 | | #include "diffcore.h" |
10 | | #include "tempfile.h" |
11 | | #include "lockfile.h" |
12 | | #include "cache-tree.h" |
13 | | #include "refs.h" |
14 | | #include "dir.h" |
15 | | #include "object-store.h" |
16 | | #include "tree.h" |
17 | | #include "commit.h" |
18 | | #include "blob.h" |
19 | | #include "resolve-undo.h" |
20 | | #include "run-command.h" |
21 | | #include "strbuf.h" |
22 | | #include "varint.h" |
23 | | #include "split-index.h" |
24 | | #include "utf8.h" |
25 | | #include "fsmonitor.h" |
26 | | #include "thread-utils.h" |
27 | | #include "progress.h" |
28 | | #include "sparse-index.h" |
29 | | #include "csum-file.h" |
30 | | #include "promisor-remote.h" |
31 | | #include "hook.h" |
32 | | |
33 | | /* Mask for the name length in ce_flags in the on-disk index */ |
34 | | |
35 | 0 | #define CE_NAMEMASK (0x0fff) |
36 | | |
37 | | /* Index extensions. |
38 | | * |
39 | | * The first letter should be 'A'..'Z' for extensions that are not |
40 | | * necessary for a correct operation (i.e. optimization data). |
41 | | * When new extensions are added that _needs_ to be understood in |
42 | | * order to correctly interpret the index file, pick character that |
43 | | * is outside the range, to cause the reader to abort. |
44 | | */ |
45 | | |
46 | 0 | #define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) ) |
47 | 0 | #define CACHE_EXT_TREE 0x54524545 /* "TREE" */ |
48 | 0 | #define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */ |
49 | 0 | #define CACHE_EXT_LINK 0x6c696e6b /* "link" */ |
50 | 0 | #define CACHE_EXT_UNTRACKED 0x554E5452 /* "UNTR" */ |
51 | 0 | #define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */ |
52 | 0 | #define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945 /* "EOIE" */ |
53 | 0 | #define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */ |
54 | 0 | #define CACHE_EXT_SPARSE_DIRECTORIES 0x73646972 /* "sdir" */ |
55 | | |
56 | | /* changes that can be kept in $GIT_DIR/index (basically all extensions) */ |
57 | 0 | #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \ |
58 | 0 | CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \ |
59 | 0 | SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED) |
60 | | |
61 | | |
62 | | /* |
63 | | * This is an estimate of the pathname length in the index. We use |
64 | | * this for V4 index files to guess the un-deltafied size of the index |
65 | | * in memory because of pathname deltafication. This is not required |
66 | | * for V2/V3 index formats because their pathnames are not compressed. |
67 | | * If the initial amount of memory set aside is not sufficient, the |
68 | | * mem pool will allocate extra memory. |
69 | | */ |
70 | 0 | #define CACHE_ENTRY_PATH_LENGTH 80 |
71 | | |
72 | | enum index_search_mode { |
73 | | NO_EXPAND_SPARSE = 0, |
74 | | EXPAND_SPARSE = 1 |
75 | | }; |
76 | | |
77 | | static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len) |
78 | 0 | { |
79 | 0 | struct cache_entry *ce; |
80 | 0 | ce = mem_pool_alloc(mem_pool, cache_entry_size(len)); |
81 | 0 | ce->mem_pool_allocated = 1; |
82 | 0 | return ce; |
83 | 0 | } |
84 | | |
85 | | static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len) |
86 | 0 | { |
87 | 0 | struct cache_entry * ce; |
88 | 0 | ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len)); |
89 | 0 | ce->mem_pool_allocated = 1; |
90 | 0 | return ce; |
91 | 0 | } |
92 | | |
93 | | static struct mem_pool *find_mem_pool(struct index_state *istate) |
94 | 0 | { |
95 | 0 | struct mem_pool **pool_ptr; |
96 | |
|
97 | 0 | if (istate->split_index && istate->split_index->base) |
98 | 0 | pool_ptr = &istate->split_index->base->ce_mem_pool; |
99 | 0 | else |
100 | 0 | pool_ptr = &istate->ce_mem_pool; |
101 | |
|
102 | 0 | if (!*pool_ptr) { |
103 | 0 | *pool_ptr = xmalloc(sizeof(**pool_ptr)); |
104 | 0 | mem_pool_init(*pool_ptr, 0); |
105 | 0 | } |
106 | |
|
107 | 0 | return *pool_ptr; |
108 | 0 | } |
109 | | |
110 | | static const char *alternate_index_output; |
111 | | |
112 | | static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce) |
113 | 0 | { |
114 | 0 | if (S_ISSPARSEDIR(ce->ce_mode)) |
115 | 0 | istate->sparse_index = INDEX_COLLAPSED; |
116 | |
|
117 | 0 | istate->cache[nr] = ce; |
118 | 0 | add_name_hash(istate, ce); |
119 | 0 | } |
120 | | |
121 | | static void replace_index_entry(struct index_state *istate, int nr, struct cache_entry *ce) |
122 | 0 | { |
123 | 0 | struct cache_entry *old = istate->cache[nr]; |
124 | |
|
125 | 0 | replace_index_entry_in_base(istate, old, ce); |
126 | 0 | remove_name_hash(istate, old); |
127 | 0 | discard_cache_entry(old); |
128 | 0 | ce->ce_flags &= ~CE_HASHED; |
129 | 0 | set_index_entry(istate, nr, ce); |
130 | 0 | ce->ce_flags |= CE_UPDATE_IN_BASE; |
131 | 0 | mark_fsmonitor_invalid(istate, ce); |
132 | 0 | istate->cache_changed |= CE_ENTRY_CHANGED; |
133 | 0 | } |
134 | | |
135 | | void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name) |
136 | 0 | { |
137 | 0 | struct cache_entry *old_entry = istate->cache[nr], *new_entry, *refreshed; |
138 | 0 | int namelen = strlen(new_name); |
139 | |
|
140 | 0 | new_entry = make_empty_cache_entry(istate, namelen); |
141 | 0 | copy_cache_entry(new_entry, old_entry); |
142 | 0 | new_entry->ce_flags &= ~CE_HASHED; |
143 | 0 | new_entry->ce_namelen = namelen; |
144 | 0 | new_entry->index = 0; |
145 | 0 | memcpy(new_entry->name, new_name, namelen + 1); |
146 | |
|
147 | 0 | cache_tree_invalidate_path(istate, old_entry->name); |
148 | 0 | untracked_cache_remove_from_index(istate, old_entry->name); |
149 | 0 | remove_index_entry_at(istate, nr); |
150 | | |
151 | | /* |
152 | | * Refresh the new index entry. Using 'refresh_cache_entry' ensures |
153 | | * we only update stat info if the entry is otherwise up-to-date (i.e., |
154 | | * the contents/mode haven't changed). This ensures that we reflect the |
155 | | * 'ctime' of the rename in the index without (incorrectly) updating |
156 | | * the cached stat info to reflect unstaged changes on disk. |
157 | | */ |
158 | 0 | refreshed = refresh_cache_entry(istate, new_entry, CE_MATCH_REFRESH); |
159 | 0 | if (refreshed && refreshed != new_entry) { |
160 | 0 | add_index_entry(istate, refreshed, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE); |
161 | 0 | discard_cache_entry(new_entry); |
162 | 0 | } else |
163 | 0 | add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE); |
164 | 0 | } |
165 | | |
166 | | void fill_stat_data(struct stat_data *sd, struct stat *st) |
167 | 0 | { |
168 | 0 | sd->sd_ctime.sec = (unsigned int)st->st_ctime; |
169 | 0 | sd->sd_mtime.sec = (unsigned int)st->st_mtime; |
170 | 0 | sd->sd_ctime.nsec = ST_CTIME_NSEC(*st); |
171 | 0 | sd->sd_mtime.nsec = ST_MTIME_NSEC(*st); |
172 | 0 | sd->sd_dev = st->st_dev; |
173 | 0 | sd->sd_ino = st->st_ino; |
174 | 0 | sd->sd_uid = st->st_uid; |
175 | 0 | sd->sd_gid = st->st_gid; |
176 | 0 | sd->sd_size = st->st_size; |
177 | 0 | } |
178 | | |
179 | | int match_stat_data(const struct stat_data *sd, struct stat *st) |
180 | 0 | { |
181 | 0 | int changed = 0; |
182 | |
|
183 | 0 | if (sd->sd_mtime.sec != (unsigned int)st->st_mtime) |
184 | 0 | changed |= MTIME_CHANGED; |
185 | 0 | if (trust_ctime && check_stat && |
186 | 0 | sd->sd_ctime.sec != (unsigned int)st->st_ctime) |
187 | 0 | changed |= CTIME_CHANGED; |
188 | |
|
189 | | #ifdef USE_NSEC |
190 | | if (check_stat && sd->sd_mtime.nsec != ST_MTIME_NSEC(*st)) |
191 | | changed |= MTIME_CHANGED; |
192 | | if (trust_ctime && check_stat && |
193 | | sd->sd_ctime.nsec != ST_CTIME_NSEC(*st)) |
194 | | changed |= CTIME_CHANGED; |
195 | | #endif |
196 | |
|
197 | 0 | if (check_stat) { |
198 | 0 | if (sd->sd_uid != (unsigned int) st->st_uid || |
199 | 0 | sd->sd_gid != (unsigned int) st->st_gid) |
200 | 0 | changed |= OWNER_CHANGED; |
201 | 0 | if (sd->sd_ino != (unsigned int) st->st_ino) |
202 | 0 | changed |= INODE_CHANGED; |
203 | 0 | } |
204 | |
|
205 | | #ifdef USE_STDEV |
206 | | /* |
207 | | * st_dev breaks on network filesystems where different |
208 | | * clients will have different views of what "device" |
209 | | * the filesystem is on |
210 | | */ |
211 | | if (check_stat && sd->sd_dev != (unsigned int) st->st_dev) |
212 | | changed |= INODE_CHANGED; |
213 | | #endif |
214 | |
|
215 | 0 | if (sd->sd_size != (unsigned int) st->st_size) |
216 | 0 | changed |= DATA_CHANGED; |
217 | |
|
218 | 0 | return changed; |
219 | 0 | } |
220 | | |
221 | | /* |
222 | | * This only updates the "non-critical" parts of the directory |
223 | | * cache, ie the parts that aren't tracked by GIT, and only used |
224 | | * to validate the cache. |
225 | | */ |
226 | | void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st) |
227 | 0 | { |
228 | 0 | fill_stat_data(&ce->ce_stat_data, st); |
229 | |
|
230 | 0 | if (assume_unchanged) |
231 | 0 | ce->ce_flags |= CE_VALID; |
232 | |
|
233 | 0 | if (S_ISREG(st->st_mode)) { |
234 | 0 | ce_mark_uptodate(ce); |
235 | 0 | mark_fsmonitor_valid(istate, ce); |
236 | 0 | } |
237 | 0 | } |
238 | | |
239 | | static int ce_compare_data(struct index_state *istate, |
240 | | const struct cache_entry *ce, |
241 | | struct stat *st) |
242 | 0 | { |
243 | 0 | int match = -1; |
244 | 0 | int fd = git_open_cloexec(ce->name, O_RDONLY); |
245 | |
|
246 | 0 | if (fd >= 0) { |
247 | 0 | struct object_id oid; |
248 | 0 | if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0)) |
249 | 0 | match = !oideq(&oid, &ce->oid); |
250 | | /* index_fd() closed the file descriptor already */ |
251 | 0 | } |
252 | 0 | return match; |
253 | 0 | } |
254 | | |
255 | | static int ce_compare_link(const struct cache_entry *ce, size_t expected_size) |
256 | 0 | { |
257 | 0 | int match = -1; |
258 | 0 | void *buffer; |
259 | 0 | unsigned long size; |
260 | 0 | enum object_type type; |
261 | 0 | struct strbuf sb = STRBUF_INIT; |
262 | |
|
263 | 0 | if (strbuf_readlink(&sb, ce->name, expected_size)) |
264 | 0 | return -1; |
265 | | |
266 | 0 | buffer = read_object_file(&ce->oid, &type, &size); |
267 | 0 | if (buffer) { |
268 | 0 | if (size == sb.len) |
269 | 0 | match = memcmp(buffer, sb.buf, size); |
270 | 0 | free(buffer); |
271 | 0 | } |
272 | 0 | strbuf_release(&sb); |
273 | 0 | return match; |
274 | 0 | } |
275 | | |
276 | | static int ce_compare_gitlink(const struct cache_entry *ce) |
277 | 0 | { |
278 | 0 | struct object_id oid; |
279 | | |
280 | | /* |
281 | | * We don't actually require that the .git directory |
282 | | * under GITLINK directory be a valid git directory. It |
283 | | * might even be missing (in case nobody populated that |
284 | | * sub-project). |
285 | | * |
286 | | * If so, we consider it always to match. |
287 | | */ |
288 | 0 | if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0) |
289 | 0 | return 0; |
290 | 0 | return !oideq(&oid, &ce->oid); |
291 | 0 | } |
292 | | |
293 | | static int ce_modified_check_fs(struct index_state *istate, |
294 | | const struct cache_entry *ce, |
295 | | struct stat *st) |
296 | 0 | { |
297 | 0 | switch (st->st_mode & S_IFMT) { |
298 | 0 | case S_IFREG: |
299 | 0 | if (ce_compare_data(istate, ce, st)) |
300 | 0 | return DATA_CHANGED; |
301 | 0 | break; |
302 | 0 | case S_IFLNK: |
303 | 0 | if (ce_compare_link(ce, xsize_t(st->st_size))) |
304 | 0 | return DATA_CHANGED; |
305 | 0 | break; |
306 | 0 | case S_IFDIR: |
307 | 0 | if (S_ISGITLINK(ce->ce_mode)) |
308 | 0 | return ce_compare_gitlink(ce) ? DATA_CHANGED : 0; |
309 | | /* else fallthrough */ |
310 | 0 | default: |
311 | 0 | return TYPE_CHANGED; |
312 | 0 | } |
313 | 0 | return 0; |
314 | 0 | } |
315 | | |
316 | | static int ce_match_stat_basic(const struct cache_entry *ce, struct stat *st) |
317 | 0 | { |
318 | 0 | unsigned int changed = 0; |
319 | |
|
320 | 0 | if (ce->ce_flags & CE_REMOVE) |
321 | 0 | return MODE_CHANGED | DATA_CHANGED | TYPE_CHANGED; |
322 | | |
323 | 0 | switch (ce->ce_mode & S_IFMT) { |
324 | 0 | case S_IFREG: |
325 | 0 | changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0; |
326 | | /* We consider only the owner x bit to be relevant for |
327 | | * "mode changes" |
328 | | */ |
329 | 0 | if (trust_executable_bit && |
330 | 0 | (0100 & (ce->ce_mode ^ st->st_mode))) |
331 | 0 | changed |= MODE_CHANGED; |
332 | 0 | break; |
333 | 0 | case S_IFLNK: |
334 | 0 | if (!S_ISLNK(st->st_mode) && |
335 | 0 | (has_symlinks || !S_ISREG(st->st_mode))) |
336 | 0 | changed |= TYPE_CHANGED; |
337 | 0 | break; |
338 | 0 | case S_IFGITLINK: |
339 | | /* We ignore most of the st_xxx fields for gitlinks */ |
340 | 0 | if (!S_ISDIR(st->st_mode)) |
341 | 0 | changed |= TYPE_CHANGED; |
342 | 0 | else if (ce_compare_gitlink(ce)) |
343 | 0 | changed |= DATA_CHANGED; |
344 | 0 | return changed; |
345 | 0 | default: |
346 | 0 | BUG("unsupported ce_mode: %o", ce->ce_mode); |
347 | 0 | } |
348 | | |
349 | 0 | changed |= match_stat_data(&ce->ce_stat_data, st); |
350 | | |
351 | | /* Racily smudged entry? */ |
352 | 0 | if (!ce->ce_stat_data.sd_size) { |
353 | 0 | if (!is_empty_blob_sha1(ce->oid.hash)) |
354 | 0 | changed |= DATA_CHANGED; |
355 | 0 | } |
356 | |
|
357 | 0 | return changed; |
358 | 0 | } |
359 | | |
360 | | static int is_racy_stat(const struct index_state *istate, |
361 | | const struct stat_data *sd) |
362 | 0 | { |
363 | 0 | return (istate->timestamp.sec && |
364 | | #ifdef USE_NSEC |
365 | | /* nanosecond timestamped files can also be racy! */ |
366 | | (istate->timestamp.sec < sd->sd_mtime.sec || |
367 | | (istate->timestamp.sec == sd->sd_mtime.sec && |
368 | | istate->timestamp.nsec <= sd->sd_mtime.nsec)) |
369 | | #else |
370 | 0 | istate->timestamp.sec <= sd->sd_mtime.sec |
371 | 0 | #endif |
372 | 0 | ); |
373 | 0 | } |
374 | | |
375 | | int is_racy_timestamp(const struct index_state *istate, |
376 | | const struct cache_entry *ce) |
377 | 0 | { |
378 | 0 | return (!S_ISGITLINK(ce->ce_mode) && |
379 | 0 | is_racy_stat(istate, &ce->ce_stat_data)); |
380 | 0 | } |
381 | | |
382 | | int match_stat_data_racy(const struct index_state *istate, |
383 | | const struct stat_data *sd, struct stat *st) |
384 | 0 | { |
385 | 0 | if (is_racy_stat(istate, sd)) |
386 | 0 | return MTIME_CHANGED; |
387 | 0 | return match_stat_data(sd, st); |
388 | 0 | } |
389 | | |
390 | | int ie_match_stat(struct index_state *istate, |
391 | | const struct cache_entry *ce, struct stat *st, |
392 | | unsigned int options) |
393 | 0 | { |
394 | 0 | unsigned int changed; |
395 | 0 | int ignore_valid = options & CE_MATCH_IGNORE_VALID; |
396 | 0 | int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE; |
397 | 0 | int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY; |
398 | 0 | int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR; |
399 | |
|
400 | 0 | if (!ignore_fsmonitor) |
401 | 0 | refresh_fsmonitor(istate); |
402 | | /* |
403 | | * If it's marked as always valid in the index, it's |
404 | | * valid whatever the checked-out copy says. |
405 | | * |
406 | | * skip-worktree has the same effect with higher precedence |
407 | | */ |
408 | 0 | if (!ignore_skip_worktree && ce_skip_worktree(ce)) |
409 | 0 | return 0; |
410 | 0 | if (!ignore_valid && (ce->ce_flags & CE_VALID)) |
411 | 0 | return 0; |
412 | 0 | if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) |
413 | 0 | return 0; |
414 | | |
415 | | /* |
416 | | * Intent-to-add entries have not been added, so the index entry |
417 | | * by definition never matches what is in the work tree until it |
418 | | * actually gets added. |
419 | | */ |
420 | 0 | if (ce_intent_to_add(ce)) |
421 | 0 | return DATA_CHANGED | TYPE_CHANGED | MODE_CHANGED; |
422 | | |
423 | 0 | changed = ce_match_stat_basic(ce, st); |
424 | | |
425 | | /* |
426 | | * Within 1 second of this sequence: |
427 | | * echo xyzzy >file && git-update-index --add file |
428 | | * running this command: |
429 | | * echo frotz >file |
430 | | * would give a falsely clean cache entry. The mtime and |
431 | | * length match the cache, and other stat fields do not change. |
432 | | * |
433 | | * We could detect this at update-index time (the cache entry |
434 | | * being registered/updated records the same time as "now") |
435 | | * and delay the return from git-update-index, but that would |
436 | | * effectively mean we can make at most one commit per second, |
437 | | * which is not acceptable. Instead, we check cache entries |
438 | | * whose mtime are the same as the index file timestamp more |
439 | | * carefully than others. |
440 | | */ |
441 | 0 | if (!changed && is_racy_timestamp(istate, ce)) { |
442 | 0 | if (assume_racy_is_modified) |
443 | 0 | changed |= DATA_CHANGED; |
444 | 0 | else |
445 | 0 | changed |= ce_modified_check_fs(istate, ce, st); |
446 | 0 | } |
447 | |
|
448 | 0 | return changed; |
449 | 0 | } |
450 | | |
451 | | int ie_modified(struct index_state *istate, |
452 | | const struct cache_entry *ce, |
453 | | struct stat *st, unsigned int options) |
454 | 0 | { |
455 | 0 | int changed, changed_fs; |
456 | |
|
457 | 0 | changed = ie_match_stat(istate, ce, st, options); |
458 | 0 | if (!changed) |
459 | 0 | return 0; |
460 | | /* |
461 | | * If the mode or type has changed, there's no point in trying |
462 | | * to refresh the entry - it's not going to match |
463 | | */ |
464 | 0 | if (changed & (MODE_CHANGED | TYPE_CHANGED)) |
465 | 0 | return changed; |
466 | | |
467 | | /* |
468 | | * Immediately after read-tree or update-index --cacheinfo, |
469 | | * the length field is zero, as we have never even read the |
470 | | * lstat(2) information once, and we cannot trust DATA_CHANGED |
471 | | * returned by ie_match_stat() which in turn was returned by |
472 | | * ce_match_stat_basic() to signal that the filesize of the |
473 | | * blob changed. We have to actually go to the filesystem to |
474 | | * see if the contents match, and if so, should answer "unchanged". |
475 | | * |
476 | | * The logic does not apply to gitlinks, as ce_match_stat_basic() |
477 | | * already has checked the actual HEAD from the filesystem in the |
478 | | * subproject. If ie_match_stat() already said it is different, |
479 | | * then we know it is. |
480 | | */ |
481 | 0 | if ((changed & DATA_CHANGED) && |
482 | 0 | (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0)) |
483 | 0 | return changed; |
484 | | |
485 | 0 | changed_fs = ce_modified_check_fs(istate, ce, st); |
486 | 0 | if (changed_fs) |
487 | 0 | return changed | changed_fs; |
488 | 0 | return 0; |
489 | 0 | } |
490 | | |
491 | | int base_name_compare(const char *name1, size_t len1, int mode1, |
492 | | const char *name2, size_t len2, int mode2) |
493 | 0 | { |
494 | 0 | unsigned char c1, c2; |
495 | 0 | size_t len = len1 < len2 ? len1 : len2; |
496 | 0 | int cmp; |
497 | |
|
498 | 0 | cmp = memcmp(name1, name2, len); |
499 | 0 | if (cmp) |
500 | 0 | return cmp; |
501 | 0 | c1 = name1[len]; |
502 | 0 | c2 = name2[len]; |
503 | 0 | if (!c1 && S_ISDIR(mode1)) |
504 | 0 | c1 = '/'; |
505 | 0 | if (!c2 && S_ISDIR(mode2)) |
506 | 0 | c2 = '/'; |
507 | 0 | return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0; |
508 | 0 | } |
509 | | |
510 | | /* |
511 | | * df_name_compare() is identical to base_name_compare(), except it |
512 | | * compares conflicting directory/file entries as equal. Note that |
513 | | * while a directory name compares as equal to a regular file, they |
514 | | * then individually compare _differently_ to a filename that has |
515 | | * a dot after the basename (because '\0' < '.' < '/'). |
516 | | * |
517 | | * This is used by routines that want to traverse the git namespace |
518 | | * but then handle conflicting entries together when possible. |
519 | | */ |
520 | | int df_name_compare(const char *name1, size_t len1, int mode1, |
521 | | const char *name2, size_t len2, int mode2) |
522 | 0 | { |
523 | 0 | unsigned char c1, c2; |
524 | 0 | size_t len = len1 < len2 ? len1 : len2; |
525 | 0 | int cmp; |
526 | |
|
527 | 0 | cmp = memcmp(name1, name2, len); |
528 | 0 | if (cmp) |
529 | 0 | return cmp; |
530 | | /* Directories and files compare equal (same length, same name) */ |
531 | 0 | if (len1 == len2) |
532 | 0 | return 0; |
533 | 0 | c1 = name1[len]; |
534 | 0 | if (!c1 && S_ISDIR(mode1)) |
535 | 0 | c1 = '/'; |
536 | 0 | c2 = name2[len]; |
537 | 0 | if (!c2 && S_ISDIR(mode2)) |
538 | 0 | c2 = '/'; |
539 | 0 | if (c1 == '/' && !c2) |
540 | 0 | return 0; |
541 | 0 | if (c2 == '/' && !c1) |
542 | 0 | return 0; |
543 | 0 | return c1 - c2; |
544 | 0 | } |
545 | | |
546 | | int name_compare(const char *name1, size_t len1, const char *name2, size_t len2) |
547 | 0 | { |
548 | 0 | size_t min_len = (len1 < len2) ? len1 : len2; |
549 | 0 | int cmp = memcmp(name1, name2, min_len); |
550 | 0 | if (cmp) |
551 | 0 | return cmp; |
552 | 0 | if (len1 < len2) |
553 | 0 | return -1; |
554 | 0 | if (len1 > len2) |
555 | 0 | return 1; |
556 | 0 | return 0; |
557 | 0 | } |
558 | | |
559 | | int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2) |
560 | 0 | { |
561 | 0 | int cmp; |
562 | |
|
563 | 0 | cmp = name_compare(name1, len1, name2, len2); |
564 | 0 | if (cmp) |
565 | 0 | return cmp; |
566 | | |
567 | 0 | if (stage1 < stage2) |
568 | 0 | return -1; |
569 | 0 | if (stage1 > stage2) |
570 | 0 | return 1; |
571 | 0 | return 0; |
572 | 0 | } |
573 | | |
574 | | static int index_name_stage_pos(struct index_state *istate, |
575 | | const char *name, int namelen, |
576 | | int stage, |
577 | | enum index_search_mode search_mode) |
578 | 0 | { |
579 | 0 | int first, last; |
580 | |
|
581 | 0 | first = 0; |
582 | 0 | last = istate->cache_nr; |
583 | 0 | while (last > first) { |
584 | 0 | int next = first + ((last - first) >> 1); |
585 | 0 | struct cache_entry *ce = istate->cache[next]; |
586 | 0 | int cmp = cache_name_stage_compare(name, namelen, stage, ce->name, ce_namelen(ce), ce_stage(ce)); |
587 | 0 | if (!cmp) |
588 | 0 | return next; |
589 | 0 | if (cmp < 0) { |
590 | 0 | last = next; |
591 | 0 | continue; |
592 | 0 | } |
593 | 0 | first = next+1; |
594 | 0 | } |
595 | | |
596 | 0 | if (search_mode == EXPAND_SPARSE && istate->sparse_index && |
597 | 0 | first > 0) { |
598 | | /* Note: first <= istate->cache_nr */ |
599 | 0 | struct cache_entry *ce = istate->cache[first - 1]; |
600 | | |
601 | | /* |
602 | | * If we are in a sparse-index _and_ the entry before the |
603 | | * insertion position is a sparse-directory entry that is |
604 | | * an ancestor of 'name', then we need to expand the index |
605 | | * and search again. This will only trigger once, because |
606 | | * thereafter the index is fully expanded. |
607 | | */ |
608 | 0 | if (S_ISSPARSEDIR(ce->ce_mode) && |
609 | 0 | ce_namelen(ce) < namelen && |
610 | 0 | !strncmp(name, ce->name, ce_namelen(ce))) { |
611 | 0 | ensure_full_index(istate); |
612 | 0 | return index_name_stage_pos(istate, name, namelen, stage, search_mode); |
613 | 0 | } |
614 | 0 | } |
615 | | |
616 | 0 | return -first-1; |
617 | 0 | } |
618 | | |
619 | | int index_name_pos(struct index_state *istate, const char *name, int namelen) |
620 | 0 | { |
621 | 0 | return index_name_stage_pos(istate, name, namelen, 0, EXPAND_SPARSE); |
622 | 0 | } |
623 | | |
624 | | int index_name_pos_sparse(struct index_state *istate, const char *name, int namelen) |
625 | 0 | { |
626 | 0 | return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE); |
627 | 0 | } |
628 | | |
629 | | int index_entry_exists(struct index_state *istate, const char *name, int namelen) |
630 | 0 | { |
631 | 0 | return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE) >= 0; |
632 | 0 | } |
633 | | |
634 | | int remove_index_entry_at(struct index_state *istate, int pos) |
635 | 0 | { |
636 | 0 | struct cache_entry *ce = istate->cache[pos]; |
637 | |
|
638 | 0 | record_resolve_undo(istate, ce); |
639 | 0 | remove_name_hash(istate, ce); |
640 | 0 | save_or_free_index_entry(istate, ce); |
641 | 0 | istate->cache_changed |= CE_ENTRY_REMOVED; |
642 | 0 | istate->cache_nr--; |
643 | 0 | if (pos >= istate->cache_nr) |
644 | 0 | return 0; |
645 | 0 | MOVE_ARRAY(istate->cache + pos, istate->cache + pos + 1, |
646 | 0 | istate->cache_nr - pos); |
647 | 0 | return 1; |
648 | 0 | } |
649 | | |
650 | | /* |
651 | | * Remove all cache entries marked for removal, that is where |
652 | | * CE_REMOVE is set in ce_flags. This is much more effective than |
653 | | * calling remove_index_entry_at() for each entry to be removed. |
654 | | */ |
655 | | void remove_marked_cache_entries(struct index_state *istate, int invalidate) |
656 | 0 | { |
657 | 0 | struct cache_entry **ce_array = istate->cache; |
658 | 0 | unsigned int i, j; |
659 | |
|
660 | 0 | for (i = j = 0; i < istate->cache_nr; i++) { |
661 | 0 | if (ce_array[i]->ce_flags & CE_REMOVE) { |
662 | 0 | if (invalidate) { |
663 | 0 | cache_tree_invalidate_path(istate, |
664 | 0 | ce_array[i]->name); |
665 | 0 | untracked_cache_remove_from_index(istate, |
666 | 0 | ce_array[i]->name); |
667 | 0 | } |
668 | 0 | remove_name_hash(istate, ce_array[i]); |
669 | 0 | save_or_free_index_entry(istate, ce_array[i]); |
670 | 0 | } |
671 | 0 | else |
672 | 0 | ce_array[j++] = ce_array[i]; |
673 | 0 | } |
674 | 0 | if (j == istate->cache_nr) |
675 | 0 | return; |
676 | 0 | istate->cache_changed |= CE_ENTRY_REMOVED; |
677 | 0 | istate->cache_nr = j; |
678 | 0 | } |
679 | | |
680 | | int remove_file_from_index(struct index_state *istate, const char *path) |
681 | 0 | { |
682 | 0 | int pos = index_name_pos(istate, path, strlen(path)); |
683 | 0 | if (pos < 0) |
684 | 0 | pos = -pos-1; |
685 | 0 | cache_tree_invalidate_path(istate, path); |
686 | 0 | untracked_cache_remove_from_index(istate, path); |
687 | 0 | while (pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path)) |
688 | 0 | remove_index_entry_at(istate, pos); |
689 | 0 | return 0; |
690 | 0 | } |
691 | | |
692 | | static int compare_name(struct cache_entry *ce, const char *path, int namelen) |
693 | 0 | { |
694 | 0 | return namelen != ce_namelen(ce) || memcmp(path, ce->name, namelen); |
695 | 0 | } |
696 | | |
697 | | static int index_name_pos_also_unmerged(struct index_state *istate, |
698 | | const char *path, int namelen) |
699 | 0 | { |
700 | 0 | int pos = index_name_pos(istate, path, namelen); |
701 | 0 | struct cache_entry *ce; |
702 | |
|
703 | 0 | if (pos >= 0) |
704 | 0 | return pos; |
705 | | |
706 | | /* maybe unmerged? */ |
707 | 0 | pos = -1 - pos; |
708 | 0 | if (pos >= istate->cache_nr || |
709 | 0 | compare_name((ce = istate->cache[pos]), path, namelen)) |
710 | 0 | return -1; |
711 | | |
712 | | /* order of preference: stage 2, 1, 3 */ |
713 | 0 | if (ce_stage(ce) == 1 && pos + 1 < istate->cache_nr && |
714 | 0 | ce_stage((ce = istate->cache[pos + 1])) == 2 && |
715 | 0 | !compare_name(ce, path, namelen)) |
716 | 0 | pos++; |
717 | 0 | return pos; |
718 | 0 | } |
719 | | |
720 | | static int different_name(struct cache_entry *ce, struct cache_entry *alias) |
721 | 0 | { |
722 | 0 | int len = ce_namelen(ce); |
723 | 0 | return ce_namelen(alias) != len || memcmp(ce->name, alias->name, len); |
724 | 0 | } |
725 | | |
726 | | /* |
727 | | * If we add a filename that aliases in the cache, we will use the |
728 | | * name that we already have - but we don't want to update the same |
729 | | * alias twice, because that implies that there were actually two |
730 | | * different files with aliasing names! |
731 | | * |
732 | | * So we use the CE_ADDED flag to verify that the alias was an old |
733 | | * one before we accept it as |
734 | | */ |
735 | | static struct cache_entry *create_alias_ce(struct index_state *istate, |
736 | | struct cache_entry *ce, |
737 | | struct cache_entry *alias) |
738 | 0 | { |
739 | 0 | int len; |
740 | 0 | struct cache_entry *new_entry; |
741 | |
|
742 | 0 | if (alias->ce_flags & CE_ADDED) |
743 | 0 | die(_("will not add file alias '%s' ('%s' already exists in index)"), |
744 | 0 | ce->name, alias->name); |
745 | | |
746 | | /* Ok, create the new entry using the name of the existing alias */ |
747 | 0 | len = ce_namelen(alias); |
748 | 0 | new_entry = make_empty_cache_entry(istate, len); |
749 | 0 | memcpy(new_entry->name, alias->name, len); |
750 | 0 | copy_cache_entry(new_entry, ce); |
751 | 0 | save_or_free_index_entry(istate, ce); |
752 | 0 | return new_entry; |
753 | 0 | } |
754 | | |
755 | | void set_object_name_for_intent_to_add_entry(struct cache_entry *ce) |
756 | 0 | { |
757 | 0 | struct object_id oid; |
758 | 0 | if (write_object_file("", 0, OBJ_BLOB, &oid)) |
759 | 0 | die(_("cannot create an empty blob in the object database")); |
760 | 0 | oidcpy(&ce->oid, &oid); |
761 | 0 | } |
762 | | |
763 | | int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags) |
764 | 0 | { |
765 | 0 | int namelen, was_same; |
766 | 0 | mode_t st_mode = st->st_mode; |
767 | 0 | struct cache_entry *ce, *alias = NULL; |
768 | 0 | unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY; |
769 | 0 | int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND); |
770 | 0 | int pretend = flags & ADD_CACHE_PRETEND; |
771 | 0 | int intent_only = flags & ADD_CACHE_INTENT; |
772 | 0 | int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE| |
773 | 0 | (intent_only ? ADD_CACHE_NEW_ONLY : 0)); |
774 | 0 | unsigned hash_flags = pretend ? 0 : HASH_WRITE_OBJECT; |
775 | 0 | struct object_id oid; |
776 | |
|
777 | 0 | if (flags & ADD_CACHE_RENORMALIZE) |
778 | 0 | hash_flags |= HASH_RENORMALIZE; |
779 | |
|
780 | 0 | if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode)) |
781 | 0 | return error(_("%s: can only add regular files, symbolic links or git-directories"), path); |
782 | | |
783 | 0 | namelen = strlen(path); |
784 | 0 | if (S_ISDIR(st_mode)) { |
785 | 0 | if (resolve_gitlink_ref(path, "HEAD", &oid) < 0) |
786 | 0 | return error(_("'%s' does not have a commit checked out"), path); |
787 | 0 | while (namelen && path[namelen-1] == '/') |
788 | 0 | namelen--; |
789 | 0 | } |
790 | 0 | ce = make_empty_cache_entry(istate, namelen); |
791 | 0 | memcpy(ce->name, path, namelen); |
792 | 0 | ce->ce_namelen = namelen; |
793 | 0 | if (!intent_only) |
794 | 0 | fill_stat_cache_info(istate, ce, st); |
795 | 0 | else |
796 | 0 | ce->ce_flags |= CE_INTENT_TO_ADD; |
797 | | |
798 | |
|
799 | 0 | if (trust_executable_bit && has_symlinks) { |
800 | 0 | ce->ce_mode = create_ce_mode(st_mode); |
801 | 0 | } else { |
802 | | /* If there is an existing entry, pick the mode bits and type |
803 | | * from it, otherwise assume unexecutable regular file. |
804 | | */ |
805 | 0 | struct cache_entry *ent; |
806 | 0 | int pos = index_name_pos_also_unmerged(istate, path, namelen); |
807 | |
|
808 | 0 | ent = (0 <= pos) ? istate->cache[pos] : NULL; |
809 | 0 | ce->ce_mode = ce_mode_from_stat(ent, st_mode); |
810 | 0 | } |
811 | | |
812 | | /* When core.ignorecase=true, determine if a directory of the same name but differing |
813 | | * case already exists within the Git repository. If it does, ensure the directory |
814 | | * case of the file being added to the repository matches (is folded into) the existing |
815 | | * entry's directory case. |
816 | | */ |
817 | 0 | if (ignore_case) { |
818 | 0 | adjust_dirname_case(istate, ce->name); |
819 | 0 | } |
820 | 0 | if (!(flags & ADD_CACHE_RENORMALIZE)) { |
821 | 0 | alias = index_file_exists(istate, ce->name, |
822 | 0 | ce_namelen(ce), ignore_case); |
823 | 0 | if (alias && |
824 | 0 | !ce_stage(alias) && |
825 | 0 | !ie_match_stat(istate, alias, st, ce_option)) { |
826 | | /* Nothing changed, really */ |
827 | 0 | if (!S_ISGITLINK(alias->ce_mode)) |
828 | 0 | ce_mark_uptodate(alias); |
829 | 0 | alias->ce_flags |= CE_ADDED; |
830 | |
|
831 | 0 | discard_cache_entry(ce); |
832 | 0 | return 0; |
833 | 0 | } |
834 | 0 | } |
835 | 0 | if (!intent_only) { |
836 | 0 | if (index_path(istate, &ce->oid, path, st, hash_flags)) { |
837 | 0 | discard_cache_entry(ce); |
838 | 0 | return error(_("unable to index file '%s'"), path); |
839 | 0 | } |
840 | 0 | } else |
841 | 0 | set_object_name_for_intent_to_add_entry(ce); |
842 | | |
843 | 0 | if (ignore_case && alias && different_name(ce, alias)) |
844 | 0 | ce = create_alias_ce(istate, ce, alias); |
845 | 0 | ce->ce_flags |= CE_ADDED; |
846 | | |
847 | | /* It was suspected to be racily clean, but it turns out to be Ok */ |
848 | 0 | was_same = (alias && |
849 | 0 | !ce_stage(alias) && |
850 | 0 | oideq(&alias->oid, &ce->oid) && |
851 | 0 | ce->ce_mode == alias->ce_mode); |
852 | |
|
853 | 0 | if (pretend) |
854 | 0 | discard_cache_entry(ce); |
855 | 0 | else if (add_index_entry(istate, ce, add_option)) { |
856 | 0 | discard_cache_entry(ce); |
857 | 0 | return error(_("unable to add '%s' to index"), path); |
858 | 0 | } |
859 | 0 | if (verbose && !was_same) |
860 | 0 | printf("add '%s'\n", path); |
861 | 0 | return 0; |
862 | 0 | } |
863 | | |
864 | | int add_file_to_index(struct index_state *istate, const char *path, int flags) |
865 | 0 | { |
866 | 0 | struct stat st; |
867 | 0 | if (lstat(path, &st)) |
868 | 0 | die_errno(_("unable to stat '%s'"), path); |
869 | 0 | return add_to_index(istate, path, &st, flags); |
870 | 0 | } |
871 | | |
872 | | struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len) |
873 | 0 | { |
874 | 0 | return mem_pool__ce_calloc(find_mem_pool(istate), len); |
875 | 0 | } |
876 | | |
877 | | struct cache_entry *make_empty_transient_cache_entry(size_t len, |
878 | | struct mem_pool *ce_mem_pool) |
879 | 0 | { |
880 | 0 | if (ce_mem_pool) |
881 | 0 | return mem_pool__ce_calloc(ce_mem_pool, len); |
882 | 0 | return xcalloc(1, cache_entry_size(len)); |
883 | 0 | } |
884 | | |
885 | | enum verify_path_result { |
886 | | PATH_OK, |
887 | | PATH_INVALID, |
888 | | PATH_DIR_WITH_SEP, |
889 | | }; |
890 | | |
891 | | static enum verify_path_result verify_path_internal(const char *, unsigned); |
892 | | |
893 | | int verify_path(const char *path, unsigned mode) |
894 | 0 | { |
895 | 0 | return verify_path_internal(path, mode) == PATH_OK; |
896 | 0 | } |
897 | | |
898 | | struct cache_entry *make_cache_entry(struct index_state *istate, |
899 | | unsigned int mode, |
900 | | const struct object_id *oid, |
901 | | const char *path, |
902 | | int stage, |
903 | | unsigned int refresh_options) |
904 | 0 | { |
905 | 0 | struct cache_entry *ce, *ret; |
906 | 0 | int len; |
907 | |
|
908 | 0 | if (verify_path_internal(path, mode) == PATH_INVALID) { |
909 | 0 | error(_("invalid path '%s'"), path); |
910 | 0 | return NULL; |
911 | 0 | } |
912 | | |
913 | 0 | len = strlen(path); |
914 | 0 | ce = make_empty_cache_entry(istate, len); |
915 | |
|
916 | 0 | oidcpy(&ce->oid, oid); |
917 | 0 | memcpy(ce->name, path, len); |
918 | 0 | ce->ce_flags = create_ce_flags(stage); |
919 | 0 | ce->ce_namelen = len; |
920 | 0 | ce->ce_mode = create_ce_mode(mode); |
921 | |
|
922 | 0 | ret = refresh_cache_entry(istate, ce, refresh_options); |
923 | 0 | if (ret != ce) |
924 | 0 | discard_cache_entry(ce); |
925 | 0 | return ret; |
926 | 0 | } |
927 | | |
928 | | struct cache_entry *make_transient_cache_entry(unsigned int mode, |
929 | | const struct object_id *oid, |
930 | | const char *path, |
931 | | int stage, |
932 | | struct mem_pool *ce_mem_pool) |
933 | 0 | { |
934 | 0 | struct cache_entry *ce; |
935 | 0 | int len; |
936 | |
|
937 | 0 | if (!verify_path(path, mode)) { |
938 | 0 | error(_("invalid path '%s'"), path); |
939 | 0 | return NULL; |
940 | 0 | } |
941 | | |
942 | 0 | len = strlen(path); |
943 | 0 | ce = make_empty_transient_cache_entry(len, ce_mem_pool); |
944 | |
|
945 | 0 | oidcpy(&ce->oid, oid); |
946 | 0 | memcpy(ce->name, path, len); |
947 | 0 | ce->ce_flags = create_ce_flags(stage); |
948 | 0 | ce->ce_namelen = len; |
949 | 0 | ce->ce_mode = create_ce_mode(mode); |
950 | |
|
951 | 0 | return ce; |
952 | 0 | } |
953 | | |
954 | | /* |
955 | | * Chmod an index entry with either +x or -x. |
956 | | * |
957 | | * Returns -1 if the chmod for the particular cache entry failed (if it's |
958 | | * not a regular file), -2 if an invalid flip argument is passed in, 0 |
959 | | * otherwise. |
960 | | */ |
961 | | int chmod_index_entry(struct index_state *istate, struct cache_entry *ce, |
962 | | char flip) |
963 | 0 | { |
964 | 0 | if (!S_ISREG(ce->ce_mode)) |
965 | 0 | return -1; |
966 | 0 | switch (flip) { |
967 | 0 | case '+': |
968 | 0 | ce->ce_mode |= 0111; |
969 | 0 | break; |
970 | 0 | case '-': |
971 | 0 | ce->ce_mode &= ~0111; |
972 | 0 | break; |
973 | 0 | default: |
974 | 0 | return -2; |
975 | 0 | } |
976 | 0 | cache_tree_invalidate_path(istate, ce->name); |
977 | 0 | ce->ce_flags |= CE_UPDATE_IN_BASE; |
978 | 0 | mark_fsmonitor_invalid(istate, ce); |
979 | 0 | istate->cache_changed |= CE_ENTRY_CHANGED; |
980 | |
|
981 | 0 | return 0; |
982 | 0 | } |
983 | | |
984 | | int ce_same_name(const struct cache_entry *a, const struct cache_entry *b) |
985 | 0 | { |
986 | 0 | int len = ce_namelen(a); |
987 | 0 | return ce_namelen(b) == len && !memcmp(a->name, b->name, len); |
988 | 0 | } |
989 | | |
990 | | /* |
991 | | * We fundamentally don't like some paths: we don't want |
992 | | * dot or dot-dot anywhere, and for obvious reasons don't |
993 | | * want to recurse into ".git" either. |
994 | | * |
995 | | * Also, we don't want double slashes or slashes at the |
996 | | * end that can make pathnames ambiguous. |
997 | | */ |
998 | | static int verify_dotfile(const char *rest, unsigned mode) |
999 | 0 | { |
1000 | | /* |
1001 | | * The first character was '.', but that |
1002 | | * has already been discarded, we now test |
1003 | | * the rest. |
1004 | | */ |
1005 | | |
1006 | | /* "." is not allowed */ |
1007 | 0 | if (*rest == '\0' || is_dir_sep(*rest)) |
1008 | 0 | return 0; |
1009 | | |
1010 | 0 | switch (*rest) { |
1011 | | /* |
1012 | | * ".git" followed by NUL or slash is bad. Note that we match |
1013 | | * case-insensitively here, even if ignore_case is not set. |
1014 | | * This outlaws ".GIT" everywhere out of an abundance of caution, |
1015 | | * since there's really no good reason to allow it. |
1016 | | * |
1017 | | * Once we've seen ".git", we can also find ".gitmodules", etc (also |
1018 | | * case-insensitively). |
1019 | | */ |
1020 | 0 | case 'g': |
1021 | 0 | case 'G': |
1022 | 0 | if (rest[1] != 'i' && rest[1] != 'I') |
1023 | 0 | break; |
1024 | 0 | if (rest[2] != 't' && rest[2] != 'T') |
1025 | 0 | break; |
1026 | 0 | if (rest[3] == '\0' || is_dir_sep(rest[3])) |
1027 | 0 | return 0; |
1028 | 0 | if (S_ISLNK(mode)) { |
1029 | 0 | rest += 3; |
1030 | 0 | if (skip_iprefix(rest, "modules", &rest) && |
1031 | 0 | (*rest == '\0' || is_dir_sep(*rest))) |
1032 | 0 | return 0; |
1033 | 0 | } |
1034 | 0 | break; |
1035 | 0 | case '.': |
1036 | 0 | if (rest[1] == '\0' || is_dir_sep(rest[1])) |
1037 | 0 | return 0; |
1038 | 0 | } |
1039 | 0 | return 1; |
1040 | 0 | } |
1041 | | |
1042 | | static enum verify_path_result verify_path_internal(const char *path, |
1043 | | unsigned mode) |
1044 | 0 | { |
1045 | 0 | char c = 0; |
1046 | |
|
1047 | 0 | if (has_dos_drive_prefix(path)) |
1048 | 0 | return PATH_INVALID; |
1049 | | |
1050 | 0 | if (!is_valid_path(path)) |
1051 | 0 | return PATH_INVALID; |
1052 | | |
1053 | 0 | goto inside; |
1054 | 0 | for (;;) { |
1055 | 0 | if (!c) |
1056 | 0 | return PATH_OK; |
1057 | 0 | if (is_dir_sep(c)) { |
1058 | 0 | inside: |
1059 | 0 | if (protect_hfs) { |
1060 | |
|
1061 | 0 | if (is_hfs_dotgit(path)) |
1062 | 0 | return PATH_INVALID; |
1063 | 0 | if (S_ISLNK(mode)) { |
1064 | 0 | if (is_hfs_dotgitmodules(path)) |
1065 | 0 | return PATH_INVALID; |
1066 | 0 | } |
1067 | 0 | } |
1068 | 0 | if (protect_ntfs) { |
1069 | | #if defined GIT_WINDOWS_NATIVE || defined __CYGWIN__ |
1070 | | if (c == '\\') |
1071 | | return PATH_INVALID; |
1072 | | #endif |
1073 | 0 | if (is_ntfs_dotgit(path)) |
1074 | 0 | return PATH_INVALID; |
1075 | 0 | if (S_ISLNK(mode)) { |
1076 | 0 | if (is_ntfs_dotgitmodules(path)) |
1077 | 0 | return PATH_INVALID; |
1078 | 0 | } |
1079 | 0 | } |
1080 | | |
1081 | 0 | c = *path++; |
1082 | 0 | if ((c == '.' && !verify_dotfile(path, mode)) || |
1083 | 0 | is_dir_sep(c)) |
1084 | 0 | return PATH_INVALID; |
1085 | | /* |
1086 | | * allow terminating directory separators for |
1087 | | * sparse directory entries. |
1088 | | */ |
1089 | 0 | if (c == '\0') |
1090 | 0 | return S_ISDIR(mode) ? PATH_DIR_WITH_SEP : |
1091 | 0 | PATH_INVALID; |
1092 | 0 | } else if (c == '\\' && protect_ntfs) { |
1093 | 0 | if (is_ntfs_dotgit(path)) |
1094 | 0 | return PATH_INVALID; |
1095 | 0 | if (S_ISLNK(mode)) { |
1096 | 0 | if (is_ntfs_dotgitmodules(path)) |
1097 | 0 | return PATH_INVALID; |
1098 | 0 | } |
1099 | 0 | } |
1100 | | |
1101 | 0 | c = *path++; |
1102 | 0 | } |
1103 | 0 | } |
1104 | | |
1105 | | /* |
1106 | | * Do we have another file that has the beginning components being a |
1107 | | * proper superset of the name we're trying to add? |
1108 | | */ |
1109 | | static int has_file_name(struct index_state *istate, |
1110 | | const struct cache_entry *ce, int pos, int ok_to_replace) |
1111 | 0 | { |
1112 | 0 | int retval = 0; |
1113 | 0 | int len = ce_namelen(ce); |
1114 | 0 | int stage = ce_stage(ce); |
1115 | 0 | const char *name = ce->name; |
1116 | |
|
1117 | 0 | while (pos < istate->cache_nr) { |
1118 | 0 | struct cache_entry *p = istate->cache[pos++]; |
1119 | |
|
1120 | 0 | if (len >= ce_namelen(p)) |
1121 | 0 | break; |
1122 | 0 | if (memcmp(name, p->name, len)) |
1123 | 0 | break; |
1124 | 0 | if (ce_stage(p) != stage) |
1125 | 0 | continue; |
1126 | 0 | if (p->name[len] != '/') |
1127 | 0 | continue; |
1128 | 0 | if (p->ce_flags & CE_REMOVE) |
1129 | 0 | continue; |
1130 | 0 | retval = -1; |
1131 | 0 | if (!ok_to_replace) |
1132 | 0 | break; |
1133 | 0 | remove_index_entry_at(istate, --pos); |
1134 | 0 | } |
1135 | 0 | return retval; |
1136 | 0 | } |
1137 | | |
1138 | | |
1139 | | /* |
1140 | | * Like strcmp(), but also return the offset of the first change. |
1141 | | * If strings are equal, return the length. |
1142 | | */ |
1143 | | int strcmp_offset(const char *s1, const char *s2, size_t *first_change) |
1144 | 0 | { |
1145 | 0 | size_t k; |
1146 | |
|
1147 | 0 | if (!first_change) |
1148 | 0 | return strcmp(s1, s2); |
1149 | | |
1150 | 0 | for (k = 0; s1[k] == s2[k]; k++) |
1151 | 0 | if (s1[k] == '\0') |
1152 | 0 | break; |
1153 | |
|
1154 | 0 | *first_change = k; |
1155 | 0 | return (unsigned char)s1[k] - (unsigned char)s2[k]; |
1156 | 0 | } |
1157 | | |
1158 | | /* |
1159 | | * Do we have another file with a pathname that is a proper |
1160 | | * subset of the name we're trying to add? |
1161 | | * |
1162 | | * That is, is there another file in the index with a path |
1163 | | * that matches a sub-directory in the given entry? |
1164 | | */ |
1165 | | static int has_dir_name(struct index_state *istate, |
1166 | | const struct cache_entry *ce, int pos, int ok_to_replace) |
1167 | 0 | { |
1168 | 0 | int retval = 0; |
1169 | 0 | int stage = ce_stage(ce); |
1170 | 0 | const char *name = ce->name; |
1171 | 0 | const char *slash = name + ce_namelen(ce); |
1172 | 0 | size_t len_eq_last; |
1173 | 0 | int cmp_last = 0; |
1174 | | |
1175 | | /* |
1176 | | * We are frequently called during an iteration on a sorted |
1177 | | * list of pathnames and while building a new index. Therefore, |
1178 | | * there is a high probability that this entry will eventually |
1179 | | * be appended to the index, rather than inserted in the middle. |
1180 | | * If we can confirm that, we can avoid binary searches on the |
1181 | | * components of the pathname. |
1182 | | * |
1183 | | * Compare the entry's full path with the last path in the index. |
1184 | | */ |
1185 | 0 | if (istate->cache_nr > 0) { |
1186 | 0 | cmp_last = strcmp_offset(name, |
1187 | 0 | istate->cache[istate->cache_nr - 1]->name, |
1188 | 0 | &len_eq_last); |
1189 | 0 | if (cmp_last > 0) { |
1190 | 0 | if (len_eq_last == 0) { |
1191 | | /* |
1192 | | * The entry sorts AFTER the last one in the |
1193 | | * index and their paths have no common prefix, |
1194 | | * so there cannot be a F/D conflict. |
1195 | | */ |
1196 | 0 | return retval; |
1197 | 0 | } else { |
1198 | | /* |
1199 | | * The entry sorts AFTER the last one in the |
1200 | | * index, but has a common prefix. Fall through |
1201 | | * to the loop below to disect the entry's path |
1202 | | * and see where the difference is. |
1203 | | */ |
1204 | 0 | } |
1205 | 0 | } else if (cmp_last == 0) { |
1206 | | /* |
1207 | | * The entry exactly matches the last one in the |
1208 | | * index, but because of multiple stage and CE_REMOVE |
1209 | | * items, we fall through and let the regular search |
1210 | | * code handle it. |
1211 | | */ |
1212 | 0 | } |
1213 | 0 | } |
1214 | | |
1215 | 0 | for (;;) { |
1216 | 0 | size_t len; |
1217 | |
|
1218 | 0 | for (;;) { |
1219 | 0 | if (*--slash == '/') |
1220 | 0 | break; |
1221 | 0 | if (slash <= ce->name) |
1222 | 0 | return retval; |
1223 | 0 | } |
1224 | 0 | len = slash - name; |
1225 | |
|
1226 | 0 | if (cmp_last > 0) { |
1227 | | /* |
1228 | | * (len + 1) is a directory boundary (including |
1229 | | * the trailing slash). And since the loop is |
1230 | | * decrementing "slash", the first iteration is |
1231 | | * the longest directory prefix; subsequent |
1232 | | * iterations consider parent directories. |
1233 | | */ |
1234 | |
|
1235 | 0 | if (len + 1 <= len_eq_last) { |
1236 | | /* |
1237 | | * The directory prefix (including the trailing |
1238 | | * slash) also appears as a prefix in the last |
1239 | | * entry, so the remainder cannot collide (because |
1240 | | * strcmp said the whole path was greater). |
1241 | | * |
1242 | | * EQ: last: xxx/A |
1243 | | * this: xxx/B |
1244 | | * |
1245 | | * LT: last: xxx/file_A |
1246 | | * this: xxx/file_B |
1247 | | */ |
1248 | 0 | return retval; |
1249 | 0 | } |
1250 | | |
1251 | 0 | if (len > len_eq_last) { |
1252 | | /* |
1253 | | * This part of the directory prefix (excluding |
1254 | | * the trailing slash) is longer than the known |
1255 | | * equal portions, so this sub-directory cannot |
1256 | | * collide with a file. |
1257 | | * |
1258 | | * GT: last: xxxA |
1259 | | * this: xxxB/file |
1260 | | */ |
1261 | 0 | return retval; |
1262 | 0 | } |
1263 | | |
1264 | | /* |
1265 | | * This is a possible collision. Fall through and |
1266 | | * let the regular search code handle it. |
1267 | | * |
1268 | | * last: xxx |
1269 | | * this: xxx/file |
1270 | | */ |
1271 | 0 | } |
1272 | | |
1273 | 0 | pos = index_name_stage_pos(istate, name, len, stage, EXPAND_SPARSE); |
1274 | 0 | if (pos >= 0) { |
1275 | | /* |
1276 | | * Found one, but not so fast. This could |
1277 | | * be a marker that says "I was here, but |
1278 | | * I am being removed". Such an entry is |
1279 | | * not a part of the resulting tree, and |
1280 | | * it is Ok to have a directory at the same |
1281 | | * path. |
1282 | | */ |
1283 | 0 | if (!(istate->cache[pos]->ce_flags & CE_REMOVE)) { |
1284 | 0 | retval = -1; |
1285 | 0 | if (!ok_to_replace) |
1286 | 0 | break; |
1287 | 0 | remove_index_entry_at(istate, pos); |
1288 | 0 | continue; |
1289 | 0 | } |
1290 | 0 | } |
1291 | 0 | else |
1292 | 0 | pos = -pos-1; |
1293 | | |
1294 | | /* |
1295 | | * Trivial optimization: if we find an entry that |
1296 | | * already matches the sub-directory, then we know |
1297 | | * we're ok, and we can exit. |
1298 | | */ |
1299 | 0 | while (pos < istate->cache_nr) { |
1300 | 0 | struct cache_entry *p = istate->cache[pos]; |
1301 | 0 | if ((ce_namelen(p) <= len) || |
1302 | 0 | (p->name[len] != '/') || |
1303 | 0 | memcmp(p->name, name, len)) |
1304 | 0 | break; /* not our subdirectory */ |
1305 | 0 | if (ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE)) |
1306 | | /* |
1307 | | * p is at the same stage as our entry, and |
1308 | | * is a subdirectory of what we are looking |
1309 | | * at, so we cannot have conflicts at our |
1310 | | * level or anything shorter. |
1311 | | */ |
1312 | 0 | return retval; |
1313 | 0 | pos++; |
1314 | 0 | } |
1315 | 0 | } |
1316 | 0 | return retval; |
1317 | 0 | } |
1318 | | |
1319 | | /* We may be in a situation where we already have path/file and path |
1320 | | * is being added, or we already have path and path/file is being |
1321 | | * added. Either one would result in a nonsense tree that has path |
1322 | | * twice when git-write-tree tries to write it out. Prevent it. |
1323 | | * |
1324 | | * If ok-to-replace is specified, we remove the conflicting entries |
1325 | | * from the cache so the caller should recompute the insert position. |
1326 | | * When this happens, we return non-zero. |
1327 | | */ |
1328 | | static int check_file_directory_conflict(struct index_state *istate, |
1329 | | const struct cache_entry *ce, |
1330 | | int pos, int ok_to_replace) |
1331 | 0 | { |
1332 | 0 | int retval; |
1333 | | |
1334 | | /* |
1335 | | * When ce is an "I am going away" entry, we allow it to be added |
1336 | | */ |
1337 | 0 | if (ce->ce_flags & CE_REMOVE) |
1338 | 0 | return 0; |
1339 | | |
1340 | | /* |
1341 | | * We check if the path is a sub-path of a subsequent pathname |
1342 | | * first, since removing those will not change the position |
1343 | | * in the array. |
1344 | | */ |
1345 | 0 | retval = has_file_name(istate, ce, pos, ok_to_replace); |
1346 | | |
1347 | | /* |
1348 | | * Then check if the path might have a clashing sub-directory |
1349 | | * before it. |
1350 | | */ |
1351 | 0 | return retval + has_dir_name(istate, ce, pos, ok_to_replace); |
1352 | 0 | } |
1353 | | |
1354 | | static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option) |
1355 | 0 | { |
1356 | 0 | int pos; |
1357 | 0 | int ok_to_add = option & ADD_CACHE_OK_TO_ADD; |
1358 | 0 | int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE; |
1359 | 0 | int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK; |
1360 | 0 | int new_only = option & ADD_CACHE_NEW_ONLY; |
1361 | | |
1362 | | /* |
1363 | | * If this entry's path sorts after the last entry in the index, |
1364 | | * we can avoid searching for it. |
1365 | | */ |
1366 | 0 | if (istate->cache_nr > 0 && |
1367 | 0 | strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0) |
1368 | 0 | pos = index_pos_to_insert_pos(istate->cache_nr); |
1369 | 0 | else |
1370 | 0 | pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE); |
1371 | | |
1372 | | /* |
1373 | | * Cache tree path should be invalidated only after index_name_stage_pos, |
1374 | | * in case it expands a sparse index. |
1375 | | */ |
1376 | 0 | if (!(option & ADD_CACHE_KEEP_CACHE_TREE)) |
1377 | 0 | cache_tree_invalidate_path(istate, ce->name); |
1378 | | |
1379 | | /* existing match? Just replace it. */ |
1380 | 0 | if (pos >= 0) { |
1381 | 0 | if (!new_only) |
1382 | 0 | replace_index_entry(istate, pos, ce); |
1383 | 0 | return 0; |
1384 | 0 | } |
1385 | 0 | pos = -pos-1; |
1386 | |
|
1387 | 0 | if (!(option & ADD_CACHE_KEEP_CACHE_TREE)) |
1388 | 0 | untracked_cache_add_to_index(istate, ce->name); |
1389 | | |
1390 | | /* |
1391 | | * Inserting a merged entry ("stage 0") into the index |
1392 | | * will always replace all non-merged entries.. |
1393 | | */ |
1394 | 0 | if (pos < istate->cache_nr && ce_stage(ce) == 0) { |
1395 | 0 | while (ce_same_name(istate->cache[pos], ce)) { |
1396 | 0 | ok_to_add = 1; |
1397 | 0 | if (!remove_index_entry_at(istate, pos)) |
1398 | 0 | break; |
1399 | 0 | } |
1400 | 0 | } |
1401 | |
|
1402 | 0 | if (!ok_to_add) |
1403 | 0 | return -1; |
1404 | 0 | if (verify_path_internal(ce->name, ce->ce_mode) == PATH_INVALID) |
1405 | 0 | return error(_("invalid path '%s'"), ce->name); |
1406 | | |
1407 | 0 | if (!skip_df_check && |
1408 | 0 | check_file_directory_conflict(istate, ce, pos, ok_to_replace)) { |
1409 | 0 | if (!ok_to_replace) |
1410 | 0 | return error(_("'%s' appears as both a file and as a directory"), |
1411 | 0 | ce->name); |
1412 | 0 | pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE); |
1413 | 0 | pos = -pos-1; |
1414 | 0 | } |
1415 | 0 | return pos + 1; |
1416 | 0 | } |
1417 | | |
1418 | | int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option) |
1419 | 0 | { |
1420 | 0 | int pos; |
1421 | |
|
1422 | 0 | if (option & ADD_CACHE_JUST_APPEND) |
1423 | 0 | pos = istate->cache_nr; |
1424 | 0 | else { |
1425 | 0 | int ret; |
1426 | 0 | ret = add_index_entry_with_check(istate, ce, option); |
1427 | 0 | if (ret <= 0) |
1428 | 0 | return ret; |
1429 | 0 | pos = ret - 1; |
1430 | 0 | } |
1431 | | |
1432 | | /* Make sure the array is big enough .. */ |
1433 | 0 | ALLOC_GROW(istate->cache, istate->cache_nr + 1, istate->cache_alloc); |
1434 | | |
1435 | | /* Add it in.. */ |
1436 | 0 | istate->cache_nr++; |
1437 | 0 | if (istate->cache_nr > pos + 1) |
1438 | 0 | MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos, |
1439 | 0 | istate->cache_nr - pos - 1); |
1440 | 0 | set_index_entry(istate, pos, ce); |
1441 | 0 | istate->cache_changed |= CE_ENTRY_ADDED; |
1442 | 0 | return 0; |
1443 | 0 | } |
1444 | | |
1445 | | /* |
1446 | | * "refresh" does not calculate a new sha1 file or bring the |
1447 | | * cache up-to-date for mode/content changes. But what it |
1448 | | * _does_ do is to "re-match" the stat information of a file |
1449 | | * with the cache, so that you can refresh the cache for a |
1450 | | * file that hasn't been changed but where the stat entry is |
1451 | | * out of date. |
1452 | | * |
1453 | | * For example, you'd want to do this after doing a "git-read-tree", |
1454 | | * to link up the stat cache details with the proper files. |
1455 | | */ |
1456 | | static struct cache_entry *refresh_cache_ent(struct index_state *istate, |
1457 | | struct cache_entry *ce, |
1458 | | unsigned int options, int *err, |
1459 | | int *changed_ret, |
1460 | | int *t2_did_lstat, |
1461 | | int *t2_did_scan) |
1462 | 0 | { |
1463 | 0 | struct stat st; |
1464 | 0 | struct cache_entry *updated; |
1465 | 0 | int changed; |
1466 | 0 | int refresh = options & CE_MATCH_REFRESH; |
1467 | 0 | int ignore_valid = options & CE_MATCH_IGNORE_VALID; |
1468 | 0 | int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE; |
1469 | 0 | int ignore_missing = options & CE_MATCH_IGNORE_MISSING; |
1470 | 0 | int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR; |
1471 | |
|
1472 | 0 | if (!refresh || ce_uptodate(ce)) |
1473 | 0 | return ce; |
1474 | | |
1475 | 0 | if (!ignore_fsmonitor) |
1476 | 0 | refresh_fsmonitor(istate); |
1477 | | /* |
1478 | | * CE_VALID or CE_SKIP_WORKTREE means the user promised us |
1479 | | * that the change to the work tree does not matter and told |
1480 | | * us not to worry. |
1481 | | */ |
1482 | 0 | if (!ignore_skip_worktree && ce_skip_worktree(ce)) { |
1483 | 0 | ce_mark_uptodate(ce); |
1484 | 0 | return ce; |
1485 | 0 | } |
1486 | 0 | if (!ignore_valid && (ce->ce_flags & CE_VALID)) { |
1487 | 0 | ce_mark_uptodate(ce); |
1488 | 0 | return ce; |
1489 | 0 | } |
1490 | 0 | if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) { |
1491 | 0 | ce_mark_uptodate(ce); |
1492 | 0 | return ce; |
1493 | 0 | } |
1494 | | |
1495 | 0 | if (has_symlink_leading_path(ce->name, ce_namelen(ce))) { |
1496 | 0 | if (ignore_missing) |
1497 | 0 | return ce; |
1498 | 0 | if (err) |
1499 | 0 | *err = ENOENT; |
1500 | 0 | return NULL; |
1501 | 0 | } |
1502 | | |
1503 | 0 | if (t2_did_lstat) |
1504 | 0 | *t2_did_lstat = 1; |
1505 | 0 | if (lstat(ce->name, &st) < 0) { |
1506 | 0 | if (ignore_missing && errno == ENOENT) |
1507 | 0 | return ce; |
1508 | 0 | if (err) |
1509 | 0 | *err = errno; |
1510 | 0 | return NULL; |
1511 | 0 | } |
1512 | | |
1513 | 0 | changed = ie_match_stat(istate, ce, &st, options); |
1514 | 0 | if (changed_ret) |
1515 | 0 | *changed_ret = changed; |
1516 | 0 | if (!changed) { |
1517 | | /* |
1518 | | * The path is unchanged. If we were told to ignore |
1519 | | * valid bit, then we did the actual stat check and |
1520 | | * found that the entry is unmodified. If the entry |
1521 | | * is not marked VALID, this is the place to mark it |
1522 | | * valid again, under "assume unchanged" mode. |
1523 | | */ |
1524 | 0 | if (ignore_valid && assume_unchanged && |
1525 | 0 | !(ce->ce_flags & CE_VALID)) |
1526 | 0 | ; /* mark this one VALID again */ |
1527 | 0 | else { |
1528 | | /* |
1529 | | * We do not mark the index itself "modified" |
1530 | | * because CE_UPTODATE flag is in-core only; |
1531 | | * we are not going to write this change out. |
1532 | | */ |
1533 | 0 | if (!S_ISGITLINK(ce->ce_mode)) { |
1534 | 0 | ce_mark_uptodate(ce); |
1535 | 0 | mark_fsmonitor_valid(istate, ce); |
1536 | 0 | } |
1537 | 0 | return ce; |
1538 | 0 | } |
1539 | 0 | } |
1540 | | |
1541 | 0 | if (t2_did_scan) |
1542 | 0 | *t2_did_scan = 1; |
1543 | 0 | if (ie_modified(istate, ce, &st, options)) { |
1544 | 0 | if (err) |
1545 | 0 | *err = EINVAL; |
1546 | 0 | return NULL; |
1547 | 0 | } |
1548 | | |
1549 | 0 | updated = make_empty_cache_entry(istate, ce_namelen(ce)); |
1550 | 0 | copy_cache_entry(updated, ce); |
1551 | 0 | memcpy(updated->name, ce->name, ce->ce_namelen + 1); |
1552 | 0 | fill_stat_cache_info(istate, updated, &st); |
1553 | | /* |
1554 | | * If ignore_valid is not set, we should leave CE_VALID bit |
1555 | | * alone. Otherwise, paths marked with --no-assume-unchanged |
1556 | | * (i.e. things to be edited) will reacquire CE_VALID bit |
1557 | | * automatically, which is not really what we want. |
1558 | | */ |
1559 | 0 | if (!ignore_valid && assume_unchanged && |
1560 | 0 | !(ce->ce_flags & CE_VALID)) |
1561 | 0 | updated->ce_flags &= ~CE_VALID; |
1562 | | |
1563 | | /* istate->cache_changed is updated in the caller */ |
1564 | 0 | return updated; |
1565 | 0 | } |
1566 | | |
1567 | | static void show_file(const char * fmt, const char * name, int in_porcelain, |
1568 | | int * first, const char *header_msg) |
1569 | 0 | { |
1570 | 0 | if (in_porcelain && *first && header_msg) { |
1571 | 0 | printf("%s\n", header_msg); |
1572 | 0 | *first = 0; |
1573 | 0 | } |
1574 | 0 | printf(fmt, name); |
1575 | 0 | } |
1576 | | |
1577 | | int repo_refresh_and_write_index(struct repository *repo, |
1578 | | unsigned int refresh_flags, |
1579 | | unsigned int write_flags, |
1580 | | int gentle, |
1581 | | const struct pathspec *pathspec, |
1582 | | char *seen, const char *header_msg) |
1583 | 0 | { |
1584 | 0 | struct lock_file lock_file = LOCK_INIT; |
1585 | 0 | int fd, ret = 0; |
1586 | |
|
1587 | 0 | fd = repo_hold_locked_index(repo, &lock_file, 0); |
1588 | 0 | if (!gentle && fd < 0) |
1589 | 0 | return -1; |
1590 | 0 | if (refresh_index(repo->index, refresh_flags, pathspec, seen, header_msg)) |
1591 | 0 | ret = 1; |
1592 | 0 | if (0 <= fd && write_locked_index(repo->index, &lock_file, COMMIT_LOCK | write_flags)) |
1593 | 0 | ret = -1; |
1594 | 0 | return ret; |
1595 | 0 | } |
1596 | | |
1597 | | |
1598 | | int refresh_index(struct index_state *istate, unsigned int flags, |
1599 | | const struct pathspec *pathspec, |
1600 | | char *seen, const char *header_msg) |
1601 | 0 | { |
1602 | 0 | int i; |
1603 | 0 | int has_errors = 0; |
1604 | 0 | int really = (flags & REFRESH_REALLY) != 0; |
1605 | 0 | int allow_unmerged = (flags & REFRESH_UNMERGED) != 0; |
1606 | 0 | int quiet = (flags & REFRESH_QUIET) != 0; |
1607 | 0 | int not_new = (flags & REFRESH_IGNORE_MISSING) != 0; |
1608 | 0 | int ignore_submodules = (flags & REFRESH_IGNORE_SUBMODULES) != 0; |
1609 | 0 | int ignore_skip_worktree = (flags & REFRESH_IGNORE_SKIP_WORKTREE) != 0; |
1610 | 0 | int first = 1; |
1611 | 0 | int in_porcelain = (flags & REFRESH_IN_PORCELAIN); |
1612 | 0 | unsigned int options = (CE_MATCH_REFRESH | |
1613 | 0 | (really ? CE_MATCH_IGNORE_VALID : 0) | |
1614 | 0 | (not_new ? CE_MATCH_IGNORE_MISSING : 0)); |
1615 | 0 | const char *modified_fmt; |
1616 | 0 | const char *deleted_fmt; |
1617 | 0 | const char *typechange_fmt; |
1618 | 0 | const char *added_fmt; |
1619 | 0 | const char *unmerged_fmt; |
1620 | 0 | struct progress *progress = NULL; |
1621 | 0 | int t2_sum_lstat = 0; |
1622 | 0 | int t2_sum_scan = 0; |
1623 | |
|
1624 | 0 | if (flags & REFRESH_PROGRESS && isatty(2)) |
1625 | 0 | progress = start_delayed_progress(_("Refresh index"), |
1626 | 0 | istate->cache_nr); |
1627 | |
|
1628 | 0 | trace_performance_enter(); |
1629 | 0 | modified_fmt = in_porcelain ? "M\t%s\n" : "%s: needs update\n"; |
1630 | 0 | deleted_fmt = in_porcelain ? "D\t%s\n" : "%s: needs update\n"; |
1631 | 0 | typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n"; |
1632 | 0 | added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n"; |
1633 | 0 | unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n"; |
1634 | | /* |
1635 | | * Use the multi-threaded preload_index() to refresh most of the |
1636 | | * cache entries quickly then in the single threaded loop below, |
1637 | | * we only have to do the special cases that are left. |
1638 | | */ |
1639 | 0 | preload_index(istate, pathspec, 0); |
1640 | 0 | trace2_region_enter("index", "refresh", NULL); |
1641 | |
|
1642 | 0 | for (i = 0; i < istate->cache_nr; i++) { |
1643 | 0 | struct cache_entry *ce, *new_entry; |
1644 | 0 | int cache_errno = 0; |
1645 | 0 | int changed = 0; |
1646 | 0 | int filtered = 0; |
1647 | 0 | int t2_did_lstat = 0; |
1648 | 0 | int t2_did_scan = 0; |
1649 | |
|
1650 | 0 | ce = istate->cache[i]; |
1651 | 0 | if (ignore_submodules && S_ISGITLINK(ce->ce_mode)) |
1652 | 0 | continue; |
1653 | 0 | if (ignore_skip_worktree && ce_skip_worktree(ce)) |
1654 | 0 | continue; |
1655 | | |
1656 | | /* |
1657 | | * If this entry is a sparse directory, then there isn't |
1658 | | * any stat() information to update. Ignore the entry. |
1659 | | */ |
1660 | 0 | if (S_ISSPARSEDIR(ce->ce_mode)) |
1661 | 0 | continue; |
1662 | | |
1663 | 0 | if (pathspec && !ce_path_match(istate, ce, pathspec, seen)) |
1664 | 0 | filtered = 1; |
1665 | |
|
1666 | 0 | if (ce_stage(ce)) { |
1667 | 0 | while ((i < istate->cache_nr) && |
1668 | 0 | ! strcmp(istate->cache[i]->name, ce->name)) |
1669 | 0 | i++; |
1670 | 0 | i--; |
1671 | 0 | if (allow_unmerged) |
1672 | 0 | continue; |
1673 | 0 | if (!filtered) |
1674 | 0 | show_file(unmerged_fmt, ce->name, in_porcelain, |
1675 | 0 | &first, header_msg); |
1676 | 0 | has_errors = 1; |
1677 | 0 | continue; |
1678 | 0 | } |
1679 | | |
1680 | 0 | if (filtered) |
1681 | 0 | continue; |
1682 | | |
1683 | 0 | new_entry = refresh_cache_ent(istate, ce, options, |
1684 | 0 | &cache_errno, &changed, |
1685 | 0 | &t2_did_lstat, &t2_did_scan); |
1686 | 0 | t2_sum_lstat += t2_did_lstat; |
1687 | 0 | t2_sum_scan += t2_did_scan; |
1688 | 0 | if (new_entry == ce) |
1689 | 0 | continue; |
1690 | 0 | display_progress(progress, i); |
1691 | 0 | if (!new_entry) { |
1692 | 0 | const char *fmt; |
1693 | |
|
1694 | 0 | if (really && cache_errno == EINVAL) { |
1695 | | /* If we are doing --really-refresh that |
1696 | | * means the index is not valid anymore. |
1697 | | */ |
1698 | 0 | ce->ce_flags &= ~CE_VALID; |
1699 | 0 | ce->ce_flags |= CE_UPDATE_IN_BASE; |
1700 | 0 | mark_fsmonitor_invalid(istate, ce); |
1701 | 0 | istate->cache_changed |= CE_ENTRY_CHANGED; |
1702 | 0 | } |
1703 | 0 | if (quiet) |
1704 | 0 | continue; |
1705 | | |
1706 | 0 | if (cache_errno == ENOENT) |
1707 | 0 | fmt = deleted_fmt; |
1708 | 0 | else if (ce_intent_to_add(ce)) |
1709 | 0 | fmt = added_fmt; /* must be before other checks */ |
1710 | 0 | else if (changed & TYPE_CHANGED) |
1711 | 0 | fmt = typechange_fmt; |
1712 | 0 | else |
1713 | 0 | fmt = modified_fmt; |
1714 | 0 | show_file(fmt, |
1715 | 0 | ce->name, in_porcelain, &first, header_msg); |
1716 | 0 | has_errors = 1; |
1717 | 0 | continue; |
1718 | 0 | } |
1719 | | |
1720 | 0 | replace_index_entry(istate, i, new_entry); |
1721 | 0 | } |
1722 | 0 | trace2_data_intmax("index", NULL, "refresh/sum_lstat", t2_sum_lstat); |
1723 | 0 | trace2_data_intmax("index", NULL, "refresh/sum_scan", t2_sum_scan); |
1724 | 0 | trace2_region_leave("index", "refresh", NULL); |
1725 | 0 | display_progress(progress, istate->cache_nr); |
1726 | 0 | stop_progress(&progress); |
1727 | 0 | trace_performance_leave("refresh index"); |
1728 | 0 | return has_errors; |
1729 | 0 | } |
1730 | | |
1731 | | struct cache_entry *refresh_cache_entry(struct index_state *istate, |
1732 | | struct cache_entry *ce, |
1733 | | unsigned int options) |
1734 | 0 | { |
1735 | 0 | return refresh_cache_ent(istate, ce, options, NULL, NULL, NULL, NULL); |
1736 | 0 | } |
1737 | | |
1738 | | |
1739 | | /***************************************************************** |
1740 | | * Index File I/O |
1741 | | *****************************************************************/ |
1742 | | |
1743 | 0 | #define INDEX_FORMAT_DEFAULT 3 |
1744 | | |
1745 | | static unsigned int get_index_format_default(struct repository *r) |
1746 | 0 | { |
1747 | 0 | char *envversion = getenv("GIT_INDEX_VERSION"); |
1748 | 0 | char *endp; |
1749 | 0 | unsigned int version = INDEX_FORMAT_DEFAULT; |
1750 | |
|
1751 | 0 | if (!envversion) { |
1752 | 0 | prepare_repo_settings(r); |
1753 | |
|
1754 | 0 | if (r->settings.index_version >= 0) |
1755 | 0 | version = r->settings.index_version; |
1756 | 0 | if (version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) { |
1757 | 0 | warning(_("index.version set, but the value is invalid.\n" |
1758 | 0 | "Using version %i"), INDEX_FORMAT_DEFAULT); |
1759 | 0 | return INDEX_FORMAT_DEFAULT; |
1760 | 0 | } |
1761 | 0 | return version; |
1762 | 0 | } |
1763 | | |
1764 | 0 | version = strtoul(envversion, &endp, 10); |
1765 | 0 | if (*endp || |
1766 | 0 | version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) { |
1767 | 0 | warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n" |
1768 | 0 | "Using version %i"), INDEX_FORMAT_DEFAULT); |
1769 | 0 | version = INDEX_FORMAT_DEFAULT; |
1770 | 0 | } |
1771 | 0 | return version; |
1772 | 0 | } |
1773 | | |
1774 | | /* |
1775 | | * dev/ino/uid/gid/size are also just tracked to the low 32 bits |
1776 | | * Again - this is just a (very strong in practice) heuristic that |
1777 | | * the inode hasn't changed. |
1778 | | * |
1779 | | * We save the fields in big-endian order to allow using the |
1780 | | * index file over NFS transparently. |
1781 | | */ |
1782 | | struct ondisk_cache_entry { |
1783 | | struct cache_time ctime; |
1784 | | struct cache_time mtime; |
1785 | | uint32_t dev; |
1786 | | uint32_t ino; |
1787 | | uint32_t mode; |
1788 | | uint32_t uid; |
1789 | | uint32_t gid; |
1790 | | uint32_t size; |
1791 | | /* |
1792 | | * unsigned char hash[hashsz]; |
1793 | | * uint16_t flags; |
1794 | | * if (flags & CE_EXTENDED) |
1795 | | * uint16_t flags2; |
1796 | | */ |
1797 | | unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)]; |
1798 | | char name[FLEX_ARRAY]; |
1799 | | }; |
1800 | | |
1801 | | /* These are only used for v3 or lower */ |
1802 | 0 | #define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len) |
1803 | 0 | #define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7) |
1804 | 0 | #define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len) |
1805 | 0 | #define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \ |
1806 | 0 | ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len) |
1807 | | #define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len)) |
1808 | 0 | #define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce)))) |
1809 | | |
1810 | | /* Allow fsck to force verification of the index checksum. */ |
1811 | | int verify_index_checksum; |
1812 | | |
1813 | | /* Allow fsck to force verification of the cache entry order. */ |
1814 | | int verify_ce_order; |
1815 | | |
1816 | | static int verify_hdr(const struct cache_header *hdr, unsigned long size) |
1817 | 0 | { |
1818 | 0 | git_hash_ctx c; |
1819 | 0 | unsigned char hash[GIT_MAX_RAWSZ]; |
1820 | 0 | int hdr_version; |
1821 | 0 | unsigned char *start, *end; |
1822 | 0 | struct object_id oid; |
1823 | |
|
1824 | 0 | if (hdr->hdr_signature != htonl(CACHE_SIGNATURE)) |
1825 | 0 | return error(_("bad signature 0x%08x"), hdr->hdr_signature); |
1826 | 0 | hdr_version = ntohl(hdr->hdr_version); |
1827 | 0 | if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version) |
1828 | 0 | return error(_("bad index version %d"), hdr_version); |
1829 | | |
1830 | 0 | if (!verify_index_checksum) |
1831 | 0 | return 0; |
1832 | | |
1833 | 0 | end = (unsigned char *)hdr + size; |
1834 | 0 | start = end - the_hash_algo->rawsz; |
1835 | 0 | oidread(&oid, start); |
1836 | 0 | if (oideq(&oid, null_oid())) |
1837 | 0 | return 0; |
1838 | | |
1839 | 0 | the_hash_algo->init_fn(&c); |
1840 | 0 | the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz); |
1841 | 0 | the_hash_algo->final_fn(hash, &c); |
1842 | 0 | if (!hasheq(hash, start)) |
1843 | 0 | return error(_("bad index file sha1 signature")); |
1844 | 0 | return 0; |
1845 | 0 | } |
1846 | | |
1847 | | static int read_index_extension(struct index_state *istate, |
1848 | | const char *ext, const char *data, unsigned long sz) |
1849 | 0 | { |
1850 | 0 | switch (CACHE_EXT(ext)) { |
1851 | 0 | case CACHE_EXT_TREE: |
1852 | 0 | istate->cache_tree = cache_tree_read(data, sz); |
1853 | 0 | break; |
1854 | 0 | case CACHE_EXT_RESOLVE_UNDO: |
1855 | 0 | istate->resolve_undo = resolve_undo_read(data, sz); |
1856 | 0 | break; |
1857 | 0 | case CACHE_EXT_LINK: |
1858 | 0 | if (read_link_extension(istate, data, sz)) |
1859 | 0 | return -1; |
1860 | 0 | break; |
1861 | 0 | case CACHE_EXT_UNTRACKED: |
1862 | 0 | istate->untracked = read_untracked_extension(data, sz); |
1863 | 0 | break; |
1864 | 0 | case CACHE_EXT_FSMONITOR: |
1865 | 0 | read_fsmonitor_extension(istate, data, sz); |
1866 | 0 | break; |
1867 | 0 | case CACHE_EXT_ENDOFINDEXENTRIES: |
1868 | 0 | case CACHE_EXT_INDEXENTRYOFFSETTABLE: |
1869 | | /* already handled in do_read_index() */ |
1870 | 0 | break; |
1871 | 0 | case CACHE_EXT_SPARSE_DIRECTORIES: |
1872 | | /* no content, only an indicator */ |
1873 | 0 | istate->sparse_index = INDEX_COLLAPSED; |
1874 | 0 | break; |
1875 | 0 | default: |
1876 | 0 | if (*ext < 'A' || 'Z' < *ext) |
1877 | 0 | return error(_("index uses %.4s extension, which we do not understand"), |
1878 | 0 | ext); |
1879 | 0 | fprintf_ln(stderr, _("ignoring %.4s extension"), ext); |
1880 | 0 | break; |
1881 | 0 | } |
1882 | 0 | return 0; |
1883 | 0 | } |
1884 | | |
1885 | | /* |
1886 | | * Parses the contents of the cache entry contained within the 'ondisk' buffer |
1887 | | * into a new incore 'cache_entry'. |
1888 | | * |
1889 | | * Note that 'char *ondisk' may not be aligned to a 4-byte address interval in |
1890 | | * index v4, so we cannot cast it to 'struct ondisk_cache_entry *' and access |
1891 | | * its members. Instead, we use the byte offsets of members within the struct to |
1892 | | * identify where 'get_be16()', 'get_be32()', and 'oidread()' (which can all |
1893 | | * read from an unaligned memory buffer) should read from the 'ondisk' buffer |
1894 | | * into the corresponding incore 'cache_entry' members. |
1895 | | */ |
1896 | | static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool, |
1897 | | unsigned int version, |
1898 | | const char *ondisk, |
1899 | | unsigned long *ent_size, |
1900 | | const struct cache_entry *previous_ce) |
1901 | 0 | { |
1902 | 0 | struct cache_entry *ce; |
1903 | 0 | size_t len; |
1904 | 0 | const char *name; |
1905 | 0 | const unsigned hashsz = the_hash_algo->rawsz; |
1906 | 0 | const char *flagsp = ondisk + offsetof(struct ondisk_cache_entry, data) + hashsz; |
1907 | 0 | unsigned int flags; |
1908 | 0 | size_t copy_len = 0; |
1909 | | /* |
1910 | | * Adjacent cache entries tend to share the leading paths, so it makes |
1911 | | * sense to only store the differences in later entries. In the v4 |
1912 | | * on-disk format of the index, each on-disk cache entry stores the |
1913 | | * number of bytes to be stripped from the end of the previous name, |
1914 | | * and the bytes to append to the result, to come up with its name. |
1915 | | */ |
1916 | 0 | int expand_name_field = version == 4; |
1917 | | |
1918 | | /* On-disk flags are just 16 bits */ |
1919 | 0 | flags = get_be16(flagsp); |
1920 | 0 | len = flags & CE_NAMEMASK; |
1921 | |
|
1922 | 0 | if (flags & CE_EXTENDED) { |
1923 | 0 | int extended_flags; |
1924 | 0 | extended_flags = get_be16(flagsp + sizeof(uint16_t)) << 16; |
1925 | | /* We do not yet understand any bit out of CE_EXTENDED_FLAGS */ |
1926 | 0 | if (extended_flags & ~CE_EXTENDED_FLAGS) |
1927 | 0 | die(_("unknown index entry format 0x%08x"), extended_flags); |
1928 | 0 | flags |= extended_flags; |
1929 | 0 | name = (const char *)(flagsp + 2 * sizeof(uint16_t)); |
1930 | 0 | } |
1931 | 0 | else |
1932 | 0 | name = (const char *)(flagsp + sizeof(uint16_t)); |
1933 | | |
1934 | 0 | if (expand_name_field) { |
1935 | 0 | const unsigned char *cp = (const unsigned char *)name; |
1936 | 0 | size_t strip_len, previous_len; |
1937 | | |
1938 | | /* If we're at the beginning of a block, ignore the previous name */ |
1939 | 0 | strip_len = decode_varint(&cp); |
1940 | 0 | if (previous_ce) { |
1941 | 0 | previous_len = previous_ce->ce_namelen; |
1942 | 0 | if (previous_len < strip_len) |
1943 | 0 | die(_("malformed name field in the index, near path '%s'"), |
1944 | 0 | previous_ce->name); |
1945 | 0 | copy_len = previous_len - strip_len; |
1946 | 0 | } |
1947 | 0 | name = (const char *)cp; |
1948 | 0 | } |
1949 | | |
1950 | 0 | if (len == CE_NAMEMASK) { |
1951 | 0 | len = strlen(name); |
1952 | 0 | if (expand_name_field) |
1953 | 0 | len += copy_len; |
1954 | 0 | } |
1955 | |
|
1956 | 0 | ce = mem_pool__ce_alloc(ce_mem_pool, len); |
1957 | | |
1958 | | /* |
1959 | | * NEEDSWORK: using 'offsetof()' is cumbersome and should be replaced |
1960 | | * with something more akin to 'load_bitmap_entries_v1()'s use of |
1961 | | * 'read_be16'/'read_be32'. For consistency with the corresponding |
1962 | | * ondisk entry write function ('copy_cache_entry_to_ondisk()'), this |
1963 | | * should be done at the same time as removing references to |
1964 | | * 'ondisk_cache_entry' there. |
1965 | | */ |
1966 | 0 | ce->ce_stat_data.sd_ctime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime) |
1967 | 0 | + offsetof(struct cache_time, sec)); |
1968 | 0 | ce->ce_stat_data.sd_mtime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime) |
1969 | 0 | + offsetof(struct cache_time, sec)); |
1970 | 0 | ce->ce_stat_data.sd_ctime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime) |
1971 | 0 | + offsetof(struct cache_time, nsec)); |
1972 | 0 | ce->ce_stat_data.sd_mtime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime) |
1973 | 0 | + offsetof(struct cache_time, nsec)); |
1974 | 0 | ce->ce_stat_data.sd_dev = get_be32(ondisk + offsetof(struct ondisk_cache_entry, dev)); |
1975 | 0 | ce->ce_stat_data.sd_ino = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ino)); |
1976 | 0 | ce->ce_mode = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mode)); |
1977 | 0 | ce->ce_stat_data.sd_uid = get_be32(ondisk + offsetof(struct ondisk_cache_entry, uid)); |
1978 | 0 | ce->ce_stat_data.sd_gid = get_be32(ondisk + offsetof(struct ondisk_cache_entry, gid)); |
1979 | 0 | ce->ce_stat_data.sd_size = get_be32(ondisk + offsetof(struct ondisk_cache_entry, size)); |
1980 | 0 | ce->ce_flags = flags & ~CE_NAMEMASK; |
1981 | 0 | ce->ce_namelen = len; |
1982 | 0 | ce->index = 0; |
1983 | 0 | oidread(&ce->oid, (const unsigned char *)ondisk + offsetof(struct ondisk_cache_entry, data)); |
1984 | |
|
1985 | 0 | if (expand_name_field) { |
1986 | 0 | if (copy_len) |
1987 | 0 | memcpy(ce->name, previous_ce->name, copy_len); |
1988 | 0 | memcpy(ce->name + copy_len, name, len + 1 - copy_len); |
1989 | 0 | *ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len; |
1990 | 0 | } else { |
1991 | 0 | memcpy(ce->name, name, len + 1); |
1992 | 0 | *ent_size = ondisk_ce_size(ce); |
1993 | 0 | } |
1994 | 0 | return ce; |
1995 | 0 | } |
1996 | | |
1997 | | static void check_ce_order(struct index_state *istate) |
1998 | 0 | { |
1999 | 0 | unsigned int i; |
2000 | |
|
2001 | 0 | if (!verify_ce_order) |
2002 | 0 | return; |
2003 | | |
2004 | 0 | for (i = 1; i < istate->cache_nr; i++) { |
2005 | 0 | struct cache_entry *ce = istate->cache[i - 1]; |
2006 | 0 | struct cache_entry *next_ce = istate->cache[i]; |
2007 | 0 | int name_compare = strcmp(ce->name, next_ce->name); |
2008 | |
|
2009 | 0 | if (0 < name_compare) |
2010 | 0 | die(_("unordered stage entries in index")); |
2011 | 0 | if (!name_compare) { |
2012 | 0 | if (!ce_stage(ce)) |
2013 | 0 | die(_("multiple stage entries for merged file '%s'"), |
2014 | 0 | ce->name); |
2015 | 0 | if (ce_stage(ce) > ce_stage(next_ce)) |
2016 | 0 | die(_("unordered stage entries for '%s'"), |
2017 | 0 | ce->name); |
2018 | 0 | } |
2019 | 0 | } |
2020 | 0 | } |
2021 | | |
2022 | | static void tweak_untracked_cache(struct index_state *istate) |
2023 | 0 | { |
2024 | 0 | struct repository *r = the_repository; |
2025 | |
|
2026 | 0 | prepare_repo_settings(r); |
2027 | |
|
2028 | 0 | switch (r->settings.core_untracked_cache) { |
2029 | 0 | case UNTRACKED_CACHE_REMOVE: |
2030 | 0 | remove_untracked_cache(istate); |
2031 | 0 | break; |
2032 | 0 | case UNTRACKED_CACHE_WRITE: |
2033 | 0 | add_untracked_cache(istate); |
2034 | 0 | break; |
2035 | 0 | case UNTRACKED_CACHE_KEEP: |
2036 | | /* |
2037 | | * Either an explicit "core.untrackedCache=keep", the |
2038 | | * default if "core.untrackedCache" isn't configured, |
2039 | | * or a fallback on an unknown "core.untrackedCache" |
2040 | | * value. |
2041 | | */ |
2042 | 0 | break; |
2043 | 0 | } |
2044 | 0 | } |
2045 | | |
2046 | | static void tweak_split_index(struct index_state *istate) |
2047 | 0 | { |
2048 | 0 | switch (git_config_get_split_index()) { |
2049 | 0 | case -1: /* unset: do nothing */ |
2050 | 0 | break; |
2051 | 0 | case 0: /* false */ |
2052 | 0 | remove_split_index(istate); |
2053 | 0 | break; |
2054 | 0 | case 1: /* true */ |
2055 | 0 | add_split_index(istate); |
2056 | 0 | break; |
2057 | 0 | default: /* unknown value: do nothing */ |
2058 | 0 | break; |
2059 | 0 | } |
2060 | 0 | } |
2061 | | |
2062 | | static void post_read_index_from(struct index_state *istate) |
2063 | 0 | { |
2064 | 0 | check_ce_order(istate); |
2065 | 0 | tweak_untracked_cache(istate); |
2066 | 0 | tweak_split_index(istate); |
2067 | 0 | tweak_fsmonitor(istate); |
2068 | 0 | } |
2069 | | |
2070 | | static size_t estimate_cache_size_from_compressed(unsigned int entries) |
2071 | 0 | { |
2072 | 0 | return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH); |
2073 | 0 | } |
2074 | | |
2075 | | static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries) |
2076 | 0 | { |
2077 | 0 | long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry); |
2078 | | |
2079 | | /* |
2080 | | * Account for potential alignment differences. |
2081 | | */ |
2082 | 0 | per_entry += align_padding_size(per_entry, 0); |
2083 | 0 | return ondisk_size + entries * per_entry; |
2084 | 0 | } |
2085 | | |
2086 | | struct index_entry_offset |
2087 | | { |
2088 | | /* starting byte offset into index file, count of index entries in this block */ |
2089 | | int offset, nr; |
2090 | | }; |
2091 | | |
2092 | | struct index_entry_offset_table |
2093 | | { |
2094 | | int nr; |
2095 | | struct index_entry_offset entries[FLEX_ARRAY]; |
2096 | | }; |
2097 | | |
2098 | | static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset); |
2099 | | static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot); |
2100 | | |
2101 | | static size_t read_eoie_extension(const char *mmap, size_t mmap_size); |
2102 | | static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset); |
2103 | | |
2104 | | struct load_index_extensions |
2105 | | { |
2106 | | pthread_t pthread; |
2107 | | struct index_state *istate; |
2108 | | const char *mmap; |
2109 | | size_t mmap_size; |
2110 | | unsigned long src_offset; |
2111 | | }; |
2112 | | |
2113 | | static void *load_index_extensions(void *_data) |
2114 | 0 | { |
2115 | 0 | struct load_index_extensions *p = _data; |
2116 | 0 | unsigned long src_offset = p->src_offset; |
2117 | |
|
2118 | 0 | while (src_offset <= p->mmap_size - the_hash_algo->rawsz - 8) { |
2119 | | /* After an array of active_nr index entries, |
2120 | | * there can be arbitrary number of extended |
2121 | | * sections, each of which is prefixed with |
2122 | | * extension name (4-byte) and section length |
2123 | | * in 4-byte network byte order. |
2124 | | */ |
2125 | 0 | uint32_t extsize = get_be32(p->mmap + src_offset + 4); |
2126 | 0 | if (read_index_extension(p->istate, |
2127 | 0 | p->mmap + src_offset, |
2128 | 0 | p->mmap + src_offset + 8, |
2129 | 0 | extsize) < 0) { |
2130 | 0 | munmap((void *)p->mmap, p->mmap_size); |
2131 | 0 | die(_("index file corrupt")); |
2132 | 0 | } |
2133 | 0 | src_offset += 8; |
2134 | 0 | src_offset += extsize; |
2135 | 0 | } |
2136 | | |
2137 | 0 | return NULL; |
2138 | 0 | } |
2139 | | |
2140 | | /* |
2141 | | * A helper function that will load the specified range of cache entries |
2142 | | * from the memory mapped file and add them to the given index. |
2143 | | */ |
2144 | | static unsigned long load_cache_entry_block(struct index_state *istate, |
2145 | | struct mem_pool *ce_mem_pool, int offset, int nr, const char *mmap, |
2146 | | unsigned long start_offset, const struct cache_entry *previous_ce) |
2147 | 0 | { |
2148 | 0 | int i; |
2149 | 0 | unsigned long src_offset = start_offset; |
2150 | |
|
2151 | 0 | for (i = offset; i < offset + nr; i++) { |
2152 | 0 | struct cache_entry *ce; |
2153 | 0 | unsigned long consumed; |
2154 | |
|
2155 | 0 | ce = create_from_disk(ce_mem_pool, istate->version, |
2156 | 0 | mmap + src_offset, |
2157 | 0 | &consumed, previous_ce); |
2158 | 0 | set_index_entry(istate, i, ce); |
2159 | |
|
2160 | 0 | src_offset += consumed; |
2161 | 0 | previous_ce = ce; |
2162 | 0 | } |
2163 | 0 | return src_offset - start_offset; |
2164 | 0 | } |
2165 | | |
2166 | | static unsigned long load_all_cache_entries(struct index_state *istate, |
2167 | | const char *mmap, size_t mmap_size, unsigned long src_offset) |
2168 | 0 | { |
2169 | 0 | unsigned long consumed; |
2170 | |
|
2171 | 0 | istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool)); |
2172 | 0 | if (istate->version == 4) { |
2173 | 0 | mem_pool_init(istate->ce_mem_pool, |
2174 | 0 | estimate_cache_size_from_compressed(istate->cache_nr)); |
2175 | 0 | } else { |
2176 | 0 | mem_pool_init(istate->ce_mem_pool, |
2177 | 0 | estimate_cache_size(mmap_size, istate->cache_nr)); |
2178 | 0 | } |
2179 | |
|
2180 | 0 | consumed = load_cache_entry_block(istate, istate->ce_mem_pool, |
2181 | 0 | 0, istate->cache_nr, mmap, src_offset, NULL); |
2182 | 0 | return consumed; |
2183 | 0 | } |
2184 | | |
2185 | | /* |
2186 | | * Mostly randomly chosen maximum thread counts: we |
2187 | | * cap the parallelism to online_cpus() threads, and we want |
2188 | | * to have at least 10000 cache entries per thread for it to |
2189 | | * be worth starting a thread. |
2190 | | */ |
2191 | | |
2192 | 0 | #define THREAD_COST (10000) |
2193 | | |
2194 | | struct load_cache_entries_thread_data |
2195 | | { |
2196 | | pthread_t pthread; |
2197 | | struct index_state *istate; |
2198 | | struct mem_pool *ce_mem_pool; |
2199 | | int offset; |
2200 | | const char *mmap; |
2201 | | struct index_entry_offset_table *ieot; |
2202 | | int ieot_start; /* starting index into the ieot array */ |
2203 | | int ieot_blocks; /* count of ieot entries to process */ |
2204 | | unsigned long consumed; /* return # of bytes in index file processed */ |
2205 | | }; |
2206 | | |
2207 | | /* |
2208 | | * A thread proc to run the load_cache_entries() computation |
2209 | | * across multiple background threads. |
2210 | | */ |
2211 | | static void *load_cache_entries_thread(void *_data) |
2212 | 0 | { |
2213 | 0 | struct load_cache_entries_thread_data *p = _data; |
2214 | 0 | int i; |
2215 | | |
2216 | | /* iterate across all ieot blocks assigned to this thread */ |
2217 | 0 | for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) { |
2218 | 0 | p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool, |
2219 | 0 | p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL); |
2220 | 0 | p->offset += p->ieot->entries[i].nr; |
2221 | 0 | } |
2222 | 0 | return NULL; |
2223 | 0 | } |
2224 | | |
2225 | | static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size, |
2226 | | int nr_threads, struct index_entry_offset_table *ieot) |
2227 | 0 | { |
2228 | 0 | int i, offset, ieot_blocks, ieot_start, err; |
2229 | 0 | struct load_cache_entries_thread_data *data; |
2230 | 0 | unsigned long consumed = 0; |
2231 | | |
2232 | | /* a little sanity checking */ |
2233 | 0 | if (istate->name_hash_initialized) |
2234 | 0 | BUG("the name hash isn't thread safe"); |
2235 | | |
2236 | 0 | istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool)); |
2237 | 0 | mem_pool_init(istate->ce_mem_pool, 0); |
2238 | | |
2239 | | /* ensure we have no more threads than we have blocks to process */ |
2240 | 0 | if (nr_threads > ieot->nr) |
2241 | 0 | nr_threads = ieot->nr; |
2242 | 0 | CALLOC_ARRAY(data, nr_threads); |
2243 | |
|
2244 | 0 | offset = ieot_start = 0; |
2245 | 0 | ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads); |
2246 | 0 | for (i = 0; i < nr_threads; i++) { |
2247 | 0 | struct load_cache_entries_thread_data *p = &data[i]; |
2248 | 0 | int nr, j; |
2249 | |
|
2250 | 0 | if (ieot_start + ieot_blocks > ieot->nr) |
2251 | 0 | ieot_blocks = ieot->nr - ieot_start; |
2252 | |
|
2253 | 0 | p->istate = istate; |
2254 | 0 | p->offset = offset; |
2255 | 0 | p->mmap = mmap; |
2256 | 0 | p->ieot = ieot; |
2257 | 0 | p->ieot_start = ieot_start; |
2258 | 0 | p->ieot_blocks = ieot_blocks; |
2259 | | |
2260 | | /* create a mem_pool for each thread */ |
2261 | 0 | nr = 0; |
2262 | 0 | for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++) |
2263 | 0 | nr += p->ieot->entries[j].nr; |
2264 | 0 | p->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool)); |
2265 | 0 | if (istate->version == 4) { |
2266 | 0 | mem_pool_init(p->ce_mem_pool, |
2267 | 0 | estimate_cache_size_from_compressed(nr)); |
2268 | 0 | } else { |
2269 | 0 | mem_pool_init(p->ce_mem_pool, |
2270 | 0 | estimate_cache_size(mmap_size, nr)); |
2271 | 0 | } |
2272 | |
|
2273 | 0 | err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p); |
2274 | 0 | if (err) |
2275 | 0 | die(_("unable to create load_cache_entries thread: %s"), strerror(err)); |
2276 | | |
2277 | | /* increment by the number of cache entries in the ieot block being processed */ |
2278 | 0 | for (j = 0; j < ieot_blocks; j++) |
2279 | 0 | offset += ieot->entries[ieot_start + j].nr; |
2280 | 0 | ieot_start += ieot_blocks; |
2281 | 0 | } |
2282 | | |
2283 | 0 | for (i = 0; i < nr_threads; i++) { |
2284 | 0 | struct load_cache_entries_thread_data *p = &data[i]; |
2285 | |
|
2286 | 0 | err = pthread_join(p->pthread, NULL); |
2287 | 0 | if (err) |
2288 | 0 | die(_("unable to join load_cache_entries thread: %s"), strerror(err)); |
2289 | 0 | mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool); |
2290 | 0 | consumed += p->consumed; |
2291 | 0 | } |
2292 | | |
2293 | 0 | free(data); |
2294 | |
|
2295 | 0 | return consumed; |
2296 | 0 | } |
2297 | | |
2298 | | static void set_new_index_sparsity(struct index_state *istate) |
2299 | 0 | { |
2300 | | /* |
2301 | | * If the index's repo exists, mark it sparse according to |
2302 | | * repo settings. |
2303 | | */ |
2304 | 0 | prepare_repo_settings(istate->repo); |
2305 | 0 | if (!istate->repo->settings.command_requires_full_index && |
2306 | 0 | is_sparse_index_allowed(istate, 0)) |
2307 | 0 | istate->sparse_index = 1; |
2308 | 0 | } |
2309 | | |
2310 | | /* remember to discard_cache() before reading a different cache! */ |
2311 | | int do_read_index(struct index_state *istate, const char *path, int must_exist) |
2312 | 0 | { |
2313 | 0 | int fd; |
2314 | 0 | struct stat st; |
2315 | 0 | unsigned long src_offset; |
2316 | 0 | const struct cache_header *hdr; |
2317 | 0 | const char *mmap; |
2318 | 0 | size_t mmap_size; |
2319 | 0 | struct load_index_extensions p; |
2320 | 0 | size_t extension_offset = 0; |
2321 | 0 | int nr_threads, cpus; |
2322 | 0 | struct index_entry_offset_table *ieot = NULL; |
2323 | |
|
2324 | 0 | if (istate->initialized) |
2325 | 0 | return istate->cache_nr; |
2326 | | |
2327 | 0 | istate->timestamp.sec = 0; |
2328 | 0 | istate->timestamp.nsec = 0; |
2329 | 0 | fd = open(path, O_RDONLY); |
2330 | 0 | if (fd < 0) { |
2331 | 0 | if (!must_exist && errno == ENOENT) { |
2332 | 0 | set_new_index_sparsity(istate); |
2333 | 0 | return 0; |
2334 | 0 | } |
2335 | 0 | die_errno(_("%s: index file open failed"), path); |
2336 | 0 | } |
2337 | | |
2338 | 0 | if (fstat(fd, &st)) |
2339 | 0 | die_errno(_("%s: cannot stat the open index"), path); |
2340 | | |
2341 | 0 | mmap_size = xsize_t(st.st_size); |
2342 | 0 | if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz) |
2343 | 0 | die(_("%s: index file smaller than expected"), path); |
2344 | | |
2345 | 0 | mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0); |
2346 | 0 | if (mmap == MAP_FAILED) |
2347 | 0 | die_errno(_("%s: unable to map index file%s"), path, |
2348 | 0 | mmap_os_err()); |
2349 | 0 | close(fd); |
2350 | |
|
2351 | 0 | hdr = (const struct cache_header *)mmap; |
2352 | 0 | if (verify_hdr(hdr, mmap_size) < 0) |
2353 | 0 | goto unmap; |
2354 | | |
2355 | 0 | oidread(&istate->oid, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz); |
2356 | 0 | istate->version = ntohl(hdr->hdr_version); |
2357 | 0 | istate->cache_nr = ntohl(hdr->hdr_entries); |
2358 | 0 | istate->cache_alloc = alloc_nr(istate->cache_nr); |
2359 | 0 | CALLOC_ARRAY(istate->cache, istate->cache_alloc); |
2360 | 0 | istate->initialized = 1; |
2361 | |
|
2362 | 0 | p.istate = istate; |
2363 | 0 | p.mmap = mmap; |
2364 | 0 | p.mmap_size = mmap_size; |
2365 | |
|
2366 | 0 | src_offset = sizeof(*hdr); |
2367 | |
|
2368 | 0 | if (git_config_get_index_threads(&nr_threads)) |
2369 | 0 | nr_threads = 1; |
2370 | | |
2371 | | /* TODO: does creating more threads than cores help? */ |
2372 | 0 | if (!nr_threads) { |
2373 | 0 | nr_threads = istate->cache_nr / THREAD_COST; |
2374 | 0 | cpus = online_cpus(); |
2375 | 0 | if (nr_threads > cpus) |
2376 | 0 | nr_threads = cpus; |
2377 | 0 | } |
2378 | |
|
2379 | 0 | if (!HAVE_THREADS) |
2380 | 0 | nr_threads = 1; |
2381 | |
|
2382 | 0 | if (nr_threads > 1) { |
2383 | 0 | extension_offset = read_eoie_extension(mmap, mmap_size); |
2384 | 0 | if (extension_offset) { |
2385 | 0 | int err; |
2386 | |
|
2387 | 0 | p.src_offset = extension_offset; |
2388 | 0 | err = pthread_create(&p.pthread, NULL, load_index_extensions, &p); |
2389 | 0 | if (err) |
2390 | 0 | die(_("unable to create load_index_extensions thread: %s"), strerror(err)); |
2391 | | |
2392 | 0 | nr_threads--; |
2393 | 0 | } |
2394 | 0 | } |
2395 | | |
2396 | | /* |
2397 | | * Locate and read the index entry offset table so that we can use it |
2398 | | * to multi-thread the reading of the cache entries. |
2399 | | */ |
2400 | 0 | if (extension_offset && nr_threads > 1) |
2401 | 0 | ieot = read_ieot_extension(mmap, mmap_size, extension_offset); |
2402 | |
|
2403 | 0 | if (ieot) { |
2404 | 0 | src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, nr_threads, ieot); |
2405 | 0 | free(ieot); |
2406 | 0 | } else { |
2407 | 0 | src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset); |
2408 | 0 | } |
2409 | |
|
2410 | 0 | istate->timestamp.sec = st.st_mtime; |
2411 | 0 | istate->timestamp.nsec = ST_MTIME_NSEC(st); |
2412 | | |
2413 | | /* if we created a thread, join it otherwise load the extensions on the primary thread */ |
2414 | 0 | if (extension_offset) { |
2415 | 0 | int ret = pthread_join(p.pthread, NULL); |
2416 | 0 | if (ret) |
2417 | 0 | die(_("unable to join load_index_extensions thread: %s"), strerror(ret)); |
2418 | 0 | } else { |
2419 | 0 | p.src_offset = src_offset; |
2420 | 0 | load_index_extensions(&p); |
2421 | 0 | } |
2422 | 0 | munmap((void *)mmap, mmap_size); |
2423 | | |
2424 | | /* |
2425 | | * TODO trace2: replace "the_repository" with the actual repo instance |
2426 | | * that is associated with the given "istate". |
2427 | | */ |
2428 | 0 | trace2_data_intmax("index", the_repository, "read/version", |
2429 | 0 | istate->version); |
2430 | 0 | trace2_data_intmax("index", the_repository, "read/cache_nr", |
2431 | 0 | istate->cache_nr); |
2432 | | |
2433 | | /* |
2434 | | * If the command explicitly requires a full index, force it |
2435 | | * to be full. Otherwise, correct the sparsity based on repository |
2436 | | * settings and other properties of the index (if necessary). |
2437 | | */ |
2438 | 0 | prepare_repo_settings(istate->repo); |
2439 | 0 | if (istate->repo->settings.command_requires_full_index) |
2440 | 0 | ensure_full_index(istate); |
2441 | 0 | else |
2442 | 0 | ensure_correct_sparsity(istate); |
2443 | |
|
2444 | 0 | return istate->cache_nr; |
2445 | | |
2446 | 0 | unmap: |
2447 | 0 | munmap((void *)mmap, mmap_size); |
2448 | 0 | die(_("index file corrupt")); |
2449 | 0 | } |
2450 | | |
2451 | | /* |
2452 | | * Signal that the shared index is used by updating its mtime. |
2453 | | * |
2454 | | * This way, shared index can be removed if they have not been used |
2455 | | * for some time. |
2456 | | */ |
2457 | | static void freshen_shared_index(const char *shared_index, int warn) |
2458 | 0 | { |
2459 | 0 | if (!check_and_freshen_file(shared_index, 1) && warn) |
2460 | 0 | warning(_("could not freshen shared index '%s'"), shared_index); |
2461 | 0 | } |
2462 | | |
2463 | | int read_index_from(struct index_state *istate, const char *path, |
2464 | | const char *gitdir) |
2465 | 0 | { |
2466 | 0 | struct split_index *split_index; |
2467 | 0 | int ret; |
2468 | 0 | char *base_oid_hex; |
2469 | 0 | char *base_path; |
2470 | | |
2471 | | /* istate->initialized covers both .git/index and .git/sharedindex.xxx */ |
2472 | 0 | if (istate->initialized) |
2473 | 0 | return istate->cache_nr; |
2474 | | |
2475 | | /* |
2476 | | * TODO trace2: replace "the_repository" with the actual repo instance |
2477 | | * that is associated with the given "istate". |
2478 | | */ |
2479 | 0 | trace2_region_enter_printf("index", "do_read_index", the_repository, |
2480 | 0 | "%s", path); |
2481 | 0 | trace_performance_enter(); |
2482 | 0 | ret = do_read_index(istate, path, 0); |
2483 | 0 | trace_performance_leave("read cache %s", path); |
2484 | 0 | trace2_region_leave_printf("index", "do_read_index", the_repository, |
2485 | 0 | "%s", path); |
2486 | |
|
2487 | 0 | split_index = istate->split_index; |
2488 | 0 | if (!split_index || is_null_oid(&split_index->base_oid)) { |
2489 | 0 | post_read_index_from(istate); |
2490 | 0 | return ret; |
2491 | 0 | } |
2492 | | |
2493 | 0 | trace_performance_enter(); |
2494 | 0 | if (split_index->base) |
2495 | 0 | release_index(split_index->base); |
2496 | 0 | else |
2497 | 0 | ALLOC_ARRAY(split_index->base, 1); |
2498 | 0 | index_state_init(split_index->base, istate->repo); |
2499 | |
|
2500 | 0 | base_oid_hex = oid_to_hex(&split_index->base_oid); |
2501 | 0 | base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex); |
2502 | 0 | trace2_region_enter_printf("index", "shared/do_read_index", |
2503 | 0 | the_repository, "%s", base_path); |
2504 | 0 | ret = do_read_index(split_index->base, base_path, 0); |
2505 | 0 | trace2_region_leave_printf("index", "shared/do_read_index", |
2506 | 0 | the_repository, "%s", base_path); |
2507 | 0 | if (!ret) { |
2508 | 0 | char *path_copy = xstrdup(path); |
2509 | 0 | char *base_path2 = xstrfmt("%s/sharedindex.%s", |
2510 | 0 | dirname(path_copy), base_oid_hex); |
2511 | 0 | free(path_copy); |
2512 | 0 | trace2_region_enter_printf("index", "shared/do_read_index", |
2513 | 0 | the_repository, "%s", base_path2); |
2514 | 0 | ret = do_read_index(split_index->base, base_path2, 1); |
2515 | 0 | trace2_region_leave_printf("index", "shared/do_read_index", |
2516 | 0 | the_repository, "%s", base_path2); |
2517 | 0 | free(base_path2); |
2518 | 0 | } |
2519 | 0 | if (!oideq(&split_index->base_oid, &split_index->base->oid)) |
2520 | 0 | die(_("broken index, expect %s in %s, got %s"), |
2521 | 0 | base_oid_hex, base_path, |
2522 | 0 | oid_to_hex(&split_index->base->oid)); |
2523 | | |
2524 | 0 | freshen_shared_index(base_path, 0); |
2525 | 0 | merge_base_index(istate); |
2526 | 0 | post_read_index_from(istate); |
2527 | 0 | trace_performance_leave("read cache %s", base_path); |
2528 | 0 | free(base_path); |
2529 | 0 | return ret; |
2530 | 0 | } |
2531 | | |
2532 | | int is_index_unborn(struct index_state *istate) |
2533 | 0 | { |
2534 | 0 | return (!istate->cache_nr && !istate->timestamp.sec); |
2535 | 0 | } |
2536 | | |
2537 | | void index_state_init(struct index_state *istate, struct repository *r) |
2538 | 920 | { |
2539 | 920 | struct index_state blank = INDEX_STATE_INIT(r); |
2540 | 920 | memcpy(istate, &blank, sizeof(*istate)); |
2541 | 920 | } |
2542 | | |
2543 | | void release_index(struct index_state *istate) |
2544 | 460 | { |
2545 | | /* |
2546 | | * Cache entries in istate->cache[] should have been allocated |
2547 | | * from the memory pool associated with this index, or from an |
2548 | | * associated split_index. There is no need to free individual |
2549 | | * cache entries. validate_cache_entries can detect when this |
2550 | | * assertion does not hold. |
2551 | | */ |
2552 | 460 | validate_cache_entries(istate); |
2553 | | |
2554 | 460 | resolve_undo_clear_index(istate); |
2555 | 460 | free_name_hash(istate); |
2556 | 460 | cache_tree_free(&(istate->cache_tree)); |
2557 | 460 | free(istate->fsmonitor_last_update); |
2558 | 460 | free(istate->cache); |
2559 | 460 | discard_split_index(istate); |
2560 | 460 | free_untracked_cache(istate->untracked); |
2561 | | |
2562 | 460 | if (istate->sparse_checkout_patterns) { |
2563 | 0 | clear_pattern_list(istate->sparse_checkout_patterns); |
2564 | 0 | FREE_AND_NULL(istate->sparse_checkout_patterns); |
2565 | 0 | } |
2566 | | |
2567 | 460 | if (istate->ce_mem_pool) { |
2568 | 0 | mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries()); |
2569 | 0 | FREE_AND_NULL(istate->ce_mem_pool); |
2570 | 0 | } |
2571 | 460 | } |
2572 | | |
2573 | | void discard_index(struct index_state *istate) |
2574 | 460 | { |
2575 | 460 | release_index(istate); |
2576 | 460 | index_state_init(istate, istate->repo); |
2577 | 460 | } |
2578 | | |
2579 | | /* |
2580 | | * Validate the cache entries of this index. |
2581 | | * All cache entries associated with this index |
2582 | | * should have been allocated by the memory pool |
2583 | | * associated with this index, or by a referenced |
2584 | | * split index. |
2585 | | */ |
2586 | | void validate_cache_entries(const struct index_state *istate) |
2587 | 460 | { |
2588 | 460 | int i; |
2589 | | |
2590 | 460 | if (!should_validate_cache_entries() ||!istate || !istate->initialized) |
2591 | 460 | return; |
2592 | | |
2593 | 0 | for (i = 0; i < istate->cache_nr; i++) { |
2594 | 0 | if (!istate) { |
2595 | 0 | BUG("cache entry is not allocated from expected memory pool"); |
2596 | 0 | } else if (!istate->ce_mem_pool || |
2597 | 0 | !mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) { |
2598 | 0 | if (!istate->split_index || |
2599 | 0 | !istate->split_index->base || |
2600 | 0 | !istate->split_index->base->ce_mem_pool || |
2601 | 0 | !mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) { |
2602 | 0 | BUG("cache entry is not allocated from expected memory pool"); |
2603 | 0 | } |
2604 | 0 | } |
2605 | 0 | } |
2606 | | |
2607 | 0 | if (istate->split_index) |
2608 | 0 | validate_cache_entries(istate->split_index->base); |
2609 | 0 | } |
2610 | | |
2611 | | int unmerged_index(const struct index_state *istate) |
2612 | 0 | { |
2613 | 0 | int i; |
2614 | 0 | for (i = 0; i < istate->cache_nr; i++) { |
2615 | 0 | if (ce_stage(istate->cache[i])) |
2616 | 0 | return 1; |
2617 | 0 | } |
2618 | 0 | return 0; |
2619 | 0 | } |
2620 | | |
2621 | | int repo_index_has_changes(struct repository *repo, |
2622 | | struct tree *tree, |
2623 | | struct strbuf *sb) |
2624 | 0 | { |
2625 | 0 | struct index_state *istate = repo->index; |
2626 | 0 | struct object_id cmp; |
2627 | 0 | int i; |
2628 | |
|
2629 | 0 | if (tree) |
2630 | 0 | cmp = tree->object.oid; |
2631 | 0 | if (tree || !get_oid_tree("HEAD", &cmp)) { |
2632 | 0 | struct diff_options opt; |
2633 | |
|
2634 | 0 | repo_diff_setup(repo, &opt); |
2635 | 0 | opt.flags.exit_with_status = 1; |
2636 | 0 | if (!sb) |
2637 | 0 | opt.flags.quick = 1; |
2638 | 0 | diff_setup_done(&opt); |
2639 | 0 | do_diff_cache(&cmp, &opt); |
2640 | 0 | diffcore_std(&opt); |
2641 | 0 | for (i = 0; sb && i < diff_queued_diff.nr; i++) { |
2642 | 0 | if (i) |
2643 | 0 | strbuf_addch(sb, ' '); |
2644 | 0 | strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path); |
2645 | 0 | } |
2646 | 0 | diff_flush(&opt); |
2647 | 0 | return opt.flags.has_changes != 0; |
2648 | 0 | } else { |
2649 | | /* TODO: audit for interaction with sparse-index. */ |
2650 | 0 | ensure_full_index(istate); |
2651 | 0 | for (i = 0; sb && i < istate->cache_nr; i++) { |
2652 | 0 | if (i) |
2653 | 0 | strbuf_addch(sb, ' '); |
2654 | 0 | strbuf_addstr(sb, istate->cache[i]->name); |
2655 | 0 | } |
2656 | 0 | return !!istate->cache_nr; |
2657 | 0 | } |
2658 | 0 | } |
2659 | | |
2660 | | static int write_index_ext_header(struct hashfile *f, |
2661 | | git_hash_ctx *eoie_f, |
2662 | | unsigned int ext, |
2663 | | unsigned int sz) |
2664 | 0 | { |
2665 | 0 | hashwrite_be32(f, ext); |
2666 | 0 | hashwrite_be32(f, sz); |
2667 | |
|
2668 | 0 | if (eoie_f) { |
2669 | 0 | ext = htonl(ext); |
2670 | 0 | sz = htonl(sz); |
2671 | 0 | the_hash_algo->update_fn(eoie_f, &ext, sizeof(ext)); |
2672 | 0 | the_hash_algo->update_fn(eoie_f, &sz, sizeof(sz)); |
2673 | 0 | } |
2674 | 0 | return 0; |
2675 | 0 | } |
2676 | | |
2677 | | static void ce_smudge_racily_clean_entry(struct index_state *istate, |
2678 | | struct cache_entry *ce) |
2679 | 0 | { |
2680 | | /* |
2681 | | * The only thing we care about in this function is to smudge the |
2682 | | * falsely clean entry due to touch-update-touch race, so we leave |
2683 | | * everything else as they are. We are called for entries whose |
2684 | | * ce_stat_data.sd_mtime match the index file mtime. |
2685 | | * |
2686 | | * Note that this actually does not do much for gitlinks, for |
2687 | | * which ce_match_stat_basic() always goes to the actual |
2688 | | * contents. The caller checks with is_racy_timestamp() which |
2689 | | * always says "no" for gitlinks, so we are not called for them ;-) |
2690 | | */ |
2691 | 0 | struct stat st; |
2692 | |
|
2693 | 0 | if (lstat(ce->name, &st) < 0) |
2694 | 0 | return; |
2695 | 0 | if (ce_match_stat_basic(ce, &st)) |
2696 | 0 | return; |
2697 | 0 | if (ce_modified_check_fs(istate, ce, &st)) { |
2698 | | /* This is "racily clean"; smudge it. Note that this |
2699 | | * is a tricky code. At first glance, it may appear |
2700 | | * that it can break with this sequence: |
2701 | | * |
2702 | | * $ echo xyzzy >frotz |
2703 | | * $ git-update-index --add frotz |
2704 | | * $ : >frotz |
2705 | | * $ sleep 3 |
2706 | | * $ echo filfre >nitfol |
2707 | | * $ git-update-index --add nitfol |
2708 | | * |
2709 | | * but it does not. When the second update-index runs, |
2710 | | * it notices that the entry "frotz" has the same timestamp |
2711 | | * as index, and if we were to smudge it by resetting its |
2712 | | * size to zero here, then the object name recorded |
2713 | | * in index is the 6-byte file but the cached stat information |
2714 | | * becomes zero --- which would then match what we would |
2715 | | * obtain from the filesystem next time we stat("frotz"). |
2716 | | * |
2717 | | * However, the second update-index, before calling |
2718 | | * this function, notices that the cached size is 6 |
2719 | | * bytes and what is on the filesystem is an empty |
2720 | | * file, and never calls us, so the cached size information |
2721 | | * for "frotz" stays 6 which does not match the filesystem. |
2722 | | */ |
2723 | 0 | ce->ce_stat_data.sd_size = 0; |
2724 | 0 | } |
2725 | 0 | } |
2726 | | |
2727 | | /* Copy miscellaneous fields but not the name */ |
2728 | | static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk, |
2729 | | struct cache_entry *ce) |
2730 | 0 | { |
2731 | 0 | short flags; |
2732 | 0 | const unsigned hashsz = the_hash_algo->rawsz; |
2733 | 0 | uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz); |
2734 | |
|
2735 | 0 | ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec); |
2736 | 0 | ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec); |
2737 | 0 | ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec); |
2738 | 0 | ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec); |
2739 | 0 | ondisk->dev = htonl(ce->ce_stat_data.sd_dev); |
2740 | 0 | ondisk->ino = htonl(ce->ce_stat_data.sd_ino); |
2741 | 0 | ondisk->mode = htonl(ce->ce_mode); |
2742 | 0 | ondisk->uid = htonl(ce->ce_stat_data.sd_uid); |
2743 | 0 | ondisk->gid = htonl(ce->ce_stat_data.sd_gid); |
2744 | 0 | ondisk->size = htonl(ce->ce_stat_data.sd_size); |
2745 | 0 | hashcpy(ondisk->data, ce->oid.hash); |
2746 | |
|
2747 | 0 | flags = ce->ce_flags & ~CE_NAMEMASK; |
2748 | 0 | flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce)); |
2749 | 0 | flagsp[0] = htons(flags); |
2750 | 0 | if (ce->ce_flags & CE_EXTENDED) { |
2751 | 0 | flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16); |
2752 | 0 | } |
2753 | 0 | } |
2754 | | |
2755 | | static int ce_write_entry(struct hashfile *f, struct cache_entry *ce, |
2756 | | struct strbuf *previous_name, struct ondisk_cache_entry *ondisk) |
2757 | 0 | { |
2758 | 0 | int size; |
2759 | 0 | unsigned int saved_namelen; |
2760 | 0 | int stripped_name = 0; |
2761 | 0 | static unsigned char padding[8] = { 0x00 }; |
2762 | |
|
2763 | 0 | if (ce->ce_flags & CE_STRIP_NAME) { |
2764 | 0 | saved_namelen = ce_namelen(ce); |
2765 | 0 | ce->ce_namelen = 0; |
2766 | 0 | stripped_name = 1; |
2767 | 0 | } |
2768 | |
|
2769 | 0 | size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0); |
2770 | |
|
2771 | 0 | if (!previous_name) { |
2772 | 0 | int len = ce_namelen(ce); |
2773 | 0 | copy_cache_entry_to_ondisk(ondisk, ce); |
2774 | 0 | hashwrite(f, ondisk, size); |
2775 | 0 | hashwrite(f, ce->name, len); |
2776 | 0 | hashwrite(f, padding, align_padding_size(size, len)); |
2777 | 0 | } else { |
2778 | 0 | int common, to_remove, prefix_size; |
2779 | 0 | unsigned char to_remove_vi[16]; |
2780 | 0 | for (common = 0; |
2781 | 0 | (ce->name[common] && |
2782 | 0 | common < previous_name->len && |
2783 | 0 | ce->name[common] == previous_name->buf[common]); |
2784 | 0 | common++) |
2785 | 0 | ; /* still matching */ |
2786 | 0 | to_remove = previous_name->len - common; |
2787 | 0 | prefix_size = encode_varint(to_remove, to_remove_vi); |
2788 | |
|
2789 | 0 | copy_cache_entry_to_ondisk(ondisk, ce); |
2790 | 0 | hashwrite(f, ondisk, size); |
2791 | 0 | hashwrite(f, to_remove_vi, prefix_size); |
2792 | 0 | hashwrite(f, ce->name + common, ce_namelen(ce) - common); |
2793 | 0 | hashwrite(f, padding, 1); |
2794 | |
|
2795 | 0 | strbuf_splice(previous_name, common, to_remove, |
2796 | 0 | ce->name + common, ce_namelen(ce) - common); |
2797 | 0 | } |
2798 | 0 | if (stripped_name) { |
2799 | 0 | ce->ce_namelen = saved_namelen; |
2800 | 0 | ce->ce_flags &= ~CE_STRIP_NAME; |
2801 | 0 | } |
2802 | |
|
2803 | 0 | return 0; |
2804 | 0 | } |
2805 | | |
2806 | | /* |
2807 | | * This function verifies if index_state has the correct sha1 of the |
2808 | | * index file. Don't die if we have any other failure, just return 0. |
2809 | | */ |
2810 | | static int verify_index_from(const struct index_state *istate, const char *path) |
2811 | 0 | { |
2812 | 0 | int fd; |
2813 | 0 | ssize_t n; |
2814 | 0 | struct stat st; |
2815 | 0 | unsigned char hash[GIT_MAX_RAWSZ]; |
2816 | |
|
2817 | 0 | if (!istate->initialized) |
2818 | 0 | return 0; |
2819 | | |
2820 | 0 | fd = open(path, O_RDONLY); |
2821 | 0 | if (fd < 0) |
2822 | 0 | return 0; |
2823 | | |
2824 | 0 | if (fstat(fd, &st)) |
2825 | 0 | goto out; |
2826 | | |
2827 | 0 | if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz) |
2828 | 0 | goto out; |
2829 | | |
2830 | 0 | n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz); |
2831 | 0 | if (n != the_hash_algo->rawsz) |
2832 | 0 | goto out; |
2833 | | |
2834 | 0 | if (!hasheq(istate->oid.hash, hash)) |
2835 | 0 | goto out; |
2836 | | |
2837 | 0 | close(fd); |
2838 | 0 | return 1; |
2839 | | |
2840 | 0 | out: |
2841 | 0 | close(fd); |
2842 | 0 | return 0; |
2843 | 0 | } |
2844 | | |
2845 | | static int repo_verify_index(struct repository *repo) |
2846 | 0 | { |
2847 | 0 | return verify_index_from(repo->index, repo->index_file); |
2848 | 0 | } |
2849 | | |
2850 | | int has_racy_timestamp(struct index_state *istate) |
2851 | 0 | { |
2852 | 0 | int entries = istate->cache_nr; |
2853 | 0 | int i; |
2854 | |
|
2855 | 0 | for (i = 0; i < entries; i++) { |
2856 | 0 | struct cache_entry *ce = istate->cache[i]; |
2857 | 0 | if (is_racy_timestamp(istate, ce)) |
2858 | 0 | return 1; |
2859 | 0 | } |
2860 | 0 | return 0; |
2861 | 0 | } |
2862 | | |
2863 | | void repo_update_index_if_able(struct repository *repo, |
2864 | | struct lock_file *lockfile) |
2865 | 0 | { |
2866 | 0 | if ((repo->index->cache_changed || |
2867 | 0 | has_racy_timestamp(repo->index)) && |
2868 | 0 | repo_verify_index(repo)) |
2869 | 0 | write_locked_index(repo->index, lockfile, COMMIT_LOCK); |
2870 | 0 | else |
2871 | 0 | rollback_lock_file(lockfile); |
2872 | 0 | } |
2873 | | |
2874 | | static int record_eoie(void) |
2875 | 0 | { |
2876 | 0 | int val; |
2877 | |
|
2878 | 0 | if (!git_config_get_bool("index.recordendofindexentries", &val)) |
2879 | 0 | return val; |
2880 | | |
2881 | | /* |
2882 | | * As a convenience, the end of index entries extension |
2883 | | * used for threading is written by default if the user |
2884 | | * explicitly requested threaded index reads. |
2885 | | */ |
2886 | 0 | return !git_config_get_index_threads(&val) && val != 1; |
2887 | 0 | } |
2888 | | |
2889 | | static int record_ieot(void) |
2890 | 0 | { |
2891 | 0 | int val; |
2892 | |
|
2893 | 0 | if (!git_config_get_bool("index.recordoffsettable", &val)) |
2894 | 0 | return val; |
2895 | | |
2896 | | /* |
2897 | | * As a convenience, the offset table used for threading is |
2898 | | * written by default if the user explicitly requested |
2899 | | * threaded index reads. |
2900 | | */ |
2901 | 0 | return !git_config_get_index_threads(&val) && val != 1; |
2902 | 0 | } |
2903 | | |
2904 | | /* |
2905 | | * On success, `tempfile` is closed. If it is the temporary file |
2906 | | * of a `struct lock_file`, we will therefore effectively perform |
2907 | | * a 'close_lock_file_gently()`. Since that is an implementation |
2908 | | * detail of lockfiles, callers of `do_write_index()` should not |
2909 | | * rely on it. |
2910 | | */ |
2911 | | static int do_write_index(struct index_state *istate, struct tempfile *tempfile, |
2912 | | int strip_extensions, unsigned flags) |
2913 | 0 | { |
2914 | 0 | uint64_t start = getnanotime(); |
2915 | 0 | struct hashfile *f; |
2916 | 0 | git_hash_ctx *eoie_c = NULL; |
2917 | 0 | struct cache_header hdr; |
2918 | 0 | int i, err = 0, removed, extended, hdr_version; |
2919 | 0 | struct cache_entry **cache = istate->cache; |
2920 | 0 | int entries = istate->cache_nr; |
2921 | 0 | struct stat st; |
2922 | 0 | struct ondisk_cache_entry ondisk; |
2923 | 0 | struct strbuf previous_name_buf = STRBUF_INIT, *previous_name; |
2924 | 0 | int drop_cache_tree = istate->drop_cache_tree; |
2925 | 0 | off_t offset; |
2926 | 0 | int csum_fsync_flag; |
2927 | 0 | int ieot_entries = 1; |
2928 | 0 | struct index_entry_offset_table *ieot = NULL; |
2929 | 0 | int nr, nr_threads; |
2930 | 0 | struct repository *r = istate->repo; |
2931 | |
|
2932 | 0 | f = hashfd(tempfile->fd, tempfile->filename.buf); |
2933 | |
|
2934 | 0 | prepare_repo_settings(r); |
2935 | 0 | f->skip_hash = r->settings.index_skip_hash; |
2936 | |
|
2937 | 0 | for (i = removed = extended = 0; i < entries; i++) { |
2938 | 0 | if (cache[i]->ce_flags & CE_REMOVE) |
2939 | 0 | removed++; |
2940 | | |
2941 | | /* reduce extended entries if possible */ |
2942 | 0 | cache[i]->ce_flags &= ~CE_EXTENDED; |
2943 | 0 | if (cache[i]->ce_flags & CE_EXTENDED_FLAGS) { |
2944 | 0 | extended++; |
2945 | 0 | cache[i]->ce_flags |= CE_EXTENDED; |
2946 | 0 | } |
2947 | 0 | } |
2948 | |
|
2949 | 0 | if (!istate->version) |
2950 | 0 | istate->version = get_index_format_default(the_repository); |
2951 | | |
2952 | | /* demote version 3 to version 2 when the latter suffices */ |
2953 | 0 | if (istate->version == 3 || istate->version == 2) |
2954 | 0 | istate->version = extended ? 3 : 2; |
2955 | |
|
2956 | 0 | hdr_version = istate->version; |
2957 | |
|
2958 | 0 | hdr.hdr_signature = htonl(CACHE_SIGNATURE); |
2959 | 0 | hdr.hdr_version = htonl(hdr_version); |
2960 | 0 | hdr.hdr_entries = htonl(entries - removed); |
2961 | |
|
2962 | 0 | hashwrite(f, &hdr, sizeof(hdr)); |
2963 | |
|
2964 | 0 | if (!HAVE_THREADS || git_config_get_index_threads(&nr_threads)) |
2965 | 0 | nr_threads = 1; |
2966 | |
|
2967 | 0 | if (nr_threads != 1 && record_ieot()) { |
2968 | 0 | int ieot_blocks, cpus; |
2969 | | |
2970 | | /* |
2971 | | * ensure default number of ieot blocks maps evenly to the |
2972 | | * default number of threads that will process them leaving |
2973 | | * room for the thread to load the index extensions. |
2974 | | */ |
2975 | 0 | if (!nr_threads) { |
2976 | 0 | ieot_blocks = istate->cache_nr / THREAD_COST; |
2977 | 0 | cpus = online_cpus(); |
2978 | 0 | if (ieot_blocks > cpus - 1) |
2979 | 0 | ieot_blocks = cpus - 1; |
2980 | 0 | } else { |
2981 | 0 | ieot_blocks = nr_threads; |
2982 | 0 | if (ieot_blocks > istate->cache_nr) |
2983 | 0 | ieot_blocks = istate->cache_nr; |
2984 | 0 | } |
2985 | | |
2986 | | /* |
2987 | | * no reason to write out the IEOT extension if we don't |
2988 | | * have enough blocks to utilize multi-threading |
2989 | | */ |
2990 | 0 | if (ieot_blocks > 1) { |
2991 | 0 | ieot = xcalloc(1, sizeof(struct index_entry_offset_table) |
2992 | 0 | + (ieot_blocks * sizeof(struct index_entry_offset))); |
2993 | 0 | ieot_entries = DIV_ROUND_UP(entries, ieot_blocks); |
2994 | 0 | } |
2995 | 0 | } |
2996 | |
|
2997 | 0 | offset = hashfile_total(f); |
2998 | |
|
2999 | 0 | nr = 0; |
3000 | 0 | previous_name = (hdr_version == 4) ? &previous_name_buf : NULL; |
3001 | |
|
3002 | 0 | for (i = 0; i < entries; i++) { |
3003 | 0 | struct cache_entry *ce = cache[i]; |
3004 | 0 | if (ce->ce_flags & CE_REMOVE) |
3005 | 0 | continue; |
3006 | 0 | if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce)) |
3007 | 0 | ce_smudge_racily_clean_entry(istate, ce); |
3008 | 0 | if (is_null_oid(&ce->oid)) { |
3009 | 0 | static const char msg[] = "cache entry has null sha1: %s"; |
3010 | 0 | static int allow = -1; |
3011 | |
|
3012 | 0 | if (allow < 0) |
3013 | 0 | allow = git_env_bool("GIT_ALLOW_NULL_SHA1", 0); |
3014 | 0 | if (allow) |
3015 | 0 | warning(msg, ce->name); |
3016 | 0 | else |
3017 | 0 | err = error(msg, ce->name); |
3018 | |
|
3019 | 0 | drop_cache_tree = 1; |
3020 | 0 | } |
3021 | 0 | if (ieot && i && (i % ieot_entries == 0)) { |
3022 | 0 | ieot->entries[ieot->nr].nr = nr; |
3023 | 0 | ieot->entries[ieot->nr].offset = offset; |
3024 | 0 | ieot->nr++; |
3025 | | /* |
3026 | | * If we have a V4 index, set the first byte to an invalid |
3027 | | * character to ensure there is nothing common with the previous |
3028 | | * entry |
3029 | | */ |
3030 | 0 | if (previous_name) |
3031 | 0 | previous_name->buf[0] = 0; |
3032 | 0 | nr = 0; |
3033 | |
|
3034 | 0 | offset = hashfile_total(f); |
3035 | 0 | } |
3036 | 0 | if (ce_write_entry(f, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0) |
3037 | 0 | err = -1; |
3038 | |
|
3039 | 0 | if (err) |
3040 | 0 | break; |
3041 | 0 | nr++; |
3042 | 0 | } |
3043 | 0 | if (ieot && nr) { |
3044 | 0 | ieot->entries[ieot->nr].nr = nr; |
3045 | 0 | ieot->entries[ieot->nr].offset = offset; |
3046 | 0 | ieot->nr++; |
3047 | 0 | } |
3048 | 0 | strbuf_release(&previous_name_buf); |
3049 | |
|
3050 | 0 | if (err) { |
3051 | 0 | free(ieot); |
3052 | 0 | return err; |
3053 | 0 | } |
3054 | | |
3055 | 0 | offset = hashfile_total(f); |
3056 | | |
3057 | | /* |
3058 | | * The extension headers must be hashed on their own for the |
3059 | | * EOIE extension. Create a hashfile here to compute that hash. |
3060 | | */ |
3061 | 0 | if (offset && record_eoie()) { |
3062 | 0 | CALLOC_ARRAY(eoie_c, 1); |
3063 | 0 | the_hash_algo->init_fn(eoie_c); |
3064 | 0 | } |
3065 | | |
3066 | | /* |
3067 | | * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we |
3068 | | * can minimize the number of extensions we have to scan through to |
3069 | | * find it during load. Write it out regardless of the |
3070 | | * strip_extensions parameter as we need it when loading the shared |
3071 | | * index. |
3072 | | */ |
3073 | 0 | if (ieot) { |
3074 | 0 | struct strbuf sb = STRBUF_INIT; |
3075 | |
|
3076 | 0 | write_ieot_extension(&sb, ieot); |
3077 | 0 | err = write_index_ext_header(f, eoie_c, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0; |
3078 | 0 | hashwrite(f, sb.buf, sb.len); |
3079 | 0 | strbuf_release(&sb); |
3080 | 0 | free(ieot); |
3081 | 0 | if (err) |
3082 | 0 | return -1; |
3083 | 0 | } |
3084 | | |
3085 | 0 | if (!strip_extensions && istate->split_index && |
3086 | 0 | !is_null_oid(&istate->split_index->base_oid)) { |
3087 | 0 | struct strbuf sb = STRBUF_INIT; |
3088 | |
|
3089 | 0 | if (istate->sparse_index) |
3090 | 0 | die(_("cannot write split index for a sparse index")); |
3091 | | |
3092 | 0 | err = write_link_extension(&sb, istate) < 0 || |
3093 | 0 | write_index_ext_header(f, eoie_c, CACHE_EXT_LINK, |
3094 | 0 | sb.len) < 0; |
3095 | 0 | hashwrite(f, sb.buf, sb.len); |
3096 | 0 | strbuf_release(&sb); |
3097 | 0 | if (err) |
3098 | 0 | return -1; |
3099 | 0 | } |
3100 | 0 | if (!strip_extensions && !drop_cache_tree && istate->cache_tree) { |
3101 | 0 | struct strbuf sb = STRBUF_INIT; |
3102 | |
|
3103 | 0 | cache_tree_write(&sb, istate->cache_tree); |
3104 | 0 | err = write_index_ext_header(f, eoie_c, CACHE_EXT_TREE, sb.len) < 0; |
3105 | 0 | hashwrite(f, sb.buf, sb.len); |
3106 | 0 | strbuf_release(&sb); |
3107 | 0 | if (err) |
3108 | 0 | return -1; |
3109 | 0 | } |
3110 | 0 | if (!strip_extensions && istate->resolve_undo) { |
3111 | 0 | struct strbuf sb = STRBUF_INIT; |
3112 | |
|
3113 | 0 | resolve_undo_write(&sb, istate->resolve_undo); |
3114 | 0 | err = write_index_ext_header(f, eoie_c, CACHE_EXT_RESOLVE_UNDO, |
3115 | 0 | sb.len) < 0; |
3116 | 0 | hashwrite(f, sb.buf, sb.len); |
3117 | 0 | strbuf_release(&sb); |
3118 | 0 | if (err) |
3119 | 0 | return -1; |
3120 | 0 | } |
3121 | 0 | if (!strip_extensions && istate->untracked) { |
3122 | 0 | struct strbuf sb = STRBUF_INIT; |
3123 | |
|
3124 | 0 | write_untracked_extension(&sb, istate->untracked); |
3125 | 0 | err = write_index_ext_header(f, eoie_c, CACHE_EXT_UNTRACKED, |
3126 | 0 | sb.len) < 0; |
3127 | 0 | hashwrite(f, sb.buf, sb.len); |
3128 | 0 | strbuf_release(&sb); |
3129 | 0 | if (err) |
3130 | 0 | return -1; |
3131 | 0 | } |
3132 | 0 | if (!strip_extensions && istate->fsmonitor_last_update) { |
3133 | 0 | struct strbuf sb = STRBUF_INIT; |
3134 | |
|
3135 | 0 | write_fsmonitor_extension(&sb, istate); |
3136 | 0 | err = write_index_ext_header(f, eoie_c, CACHE_EXT_FSMONITOR, sb.len) < 0; |
3137 | 0 | hashwrite(f, sb.buf, sb.len); |
3138 | 0 | strbuf_release(&sb); |
3139 | 0 | if (err) |
3140 | 0 | return -1; |
3141 | 0 | } |
3142 | 0 | if (istate->sparse_index) { |
3143 | 0 | if (write_index_ext_header(f, eoie_c, CACHE_EXT_SPARSE_DIRECTORIES, 0) < 0) |
3144 | 0 | return -1; |
3145 | 0 | } |
3146 | | |
3147 | | /* |
3148 | | * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1 |
3149 | | * so that it can be found and processed before all the index entries are |
3150 | | * read. Write it out regardless of the strip_extensions parameter as we need it |
3151 | | * when loading the shared index. |
3152 | | */ |
3153 | 0 | if (eoie_c) { |
3154 | 0 | struct strbuf sb = STRBUF_INIT; |
3155 | |
|
3156 | 0 | write_eoie_extension(&sb, eoie_c, offset); |
3157 | 0 | err = write_index_ext_header(f, NULL, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0; |
3158 | 0 | hashwrite(f, sb.buf, sb.len); |
3159 | 0 | strbuf_release(&sb); |
3160 | 0 | if (err) |
3161 | 0 | return -1; |
3162 | 0 | } |
3163 | | |
3164 | 0 | csum_fsync_flag = 0; |
3165 | 0 | if (!alternate_index_output && (flags & COMMIT_LOCK)) |
3166 | 0 | csum_fsync_flag = CSUM_FSYNC; |
3167 | |
|
3168 | 0 | finalize_hashfile(f, istate->oid.hash, FSYNC_COMPONENT_INDEX, |
3169 | 0 | CSUM_HASH_IN_STREAM | csum_fsync_flag); |
3170 | |
|
3171 | 0 | if (close_tempfile_gently(tempfile)) { |
3172 | 0 | error(_("could not close '%s'"), get_tempfile_path(tempfile)); |
3173 | 0 | return -1; |
3174 | 0 | } |
3175 | 0 | if (stat(get_tempfile_path(tempfile), &st)) |
3176 | 0 | return -1; |
3177 | 0 | istate->timestamp.sec = (unsigned int)st.st_mtime; |
3178 | 0 | istate->timestamp.nsec = ST_MTIME_NSEC(st); |
3179 | 0 | trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed); |
3180 | | |
3181 | | /* |
3182 | | * TODO trace2: replace "the_repository" with the actual repo instance |
3183 | | * that is associated with the given "istate". |
3184 | | */ |
3185 | 0 | trace2_data_intmax("index", the_repository, "write/version", |
3186 | 0 | istate->version); |
3187 | 0 | trace2_data_intmax("index", the_repository, "write/cache_nr", |
3188 | 0 | istate->cache_nr); |
3189 | |
|
3190 | 0 | return 0; |
3191 | 0 | } |
3192 | | |
3193 | | void set_alternate_index_output(const char *name) |
3194 | 0 | { |
3195 | 0 | alternate_index_output = name; |
3196 | 0 | } |
3197 | | |
3198 | | static int commit_locked_index(struct lock_file *lk) |
3199 | 0 | { |
3200 | 0 | if (alternate_index_output) |
3201 | 0 | return commit_lock_file_to(lk, alternate_index_output); |
3202 | 0 | else |
3203 | 0 | return commit_lock_file(lk); |
3204 | 0 | } |
3205 | | |
3206 | | static int do_write_locked_index(struct index_state *istate, struct lock_file *lock, |
3207 | | unsigned flags) |
3208 | 0 | { |
3209 | 0 | int ret; |
3210 | 0 | int was_full = istate->sparse_index == INDEX_EXPANDED; |
3211 | |
|
3212 | 0 | ret = convert_to_sparse(istate, 0); |
3213 | |
|
3214 | 0 | if (ret) { |
3215 | 0 | warning(_("failed to convert to a sparse-index")); |
3216 | 0 | return ret; |
3217 | 0 | } |
3218 | | |
3219 | | /* |
3220 | | * TODO trace2: replace "the_repository" with the actual repo instance |
3221 | | * that is associated with the given "istate". |
3222 | | */ |
3223 | 0 | trace2_region_enter_printf("index", "do_write_index", the_repository, |
3224 | 0 | "%s", get_lock_file_path(lock)); |
3225 | 0 | ret = do_write_index(istate, lock->tempfile, 0, flags); |
3226 | 0 | trace2_region_leave_printf("index", "do_write_index", the_repository, |
3227 | 0 | "%s", get_lock_file_path(lock)); |
3228 | |
|
3229 | 0 | if (was_full) |
3230 | 0 | ensure_full_index(istate); |
3231 | |
|
3232 | 0 | if (ret) |
3233 | 0 | return ret; |
3234 | 0 | if (flags & COMMIT_LOCK) |
3235 | 0 | ret = commit_locked_index(lock); |
3236 | 0 | else |
3237 | 0 | ret = close_lock_file_gently(lock); |
3238 | |
|
3239 | 0 | run_hooks_l("post-index-change", |
3240 | 0 | istate->updated_workdir ? "1" : "0", |
3241 | 0 | istate->updated_skipworktree ? "1" : "0", NULL); |
3242 | 0 | istate->updated_workdir = 0; |
3243 | 0 | istate->updated_skipworktree = 0; |
3244 | |
|
3245 | 0 | return ret; |
3246 | 0 | } |
3247 | | |
3248 | | static int write_split_index(struct index_state *istate, |
3249 | | struct lock_file *lock, |
3250 | | unsigned flags) |
3251 | 0 | { |
3252 | 0 | int ret; |
3253 | 0 | prepare_to_write_split_index(istate); |
3254 | 0 | ret = do_write_locked_index(istate, lock, flags); |
3255 | 0 | finish_writing_split_index(istate); |
3256 | 0 | return ret; |
3257 | 0 | } |
3258 | | |
3259 | | static const char *shared_index_expire = "2.weeks.ago"; |
3260 | | |
3261 | | static unsigned long get_shared_index_expire_date(void) |
3262 | 0 | { |
3263 | 0 | static unsigned long shared_index_expire_date; |
3264 | 0 | static int shared_index_expire_date_prepared; |
3265 | |
|
3266 | 0 | if (!shared_index_expire_date_prepared) { |
3267 | 0 | git_config_get_expiry("splitindex.sharedindexexpire", |
3268 | 0 | &shared_index_expire); |
3269 | 0 | shared_index_expire_date = approxidate(shared_index_expire); |
3270 | 0 | shared_index_expire_date_prepared = 1; |
3271 | 0 | } |
3272 | |
|
3273 | 0 | return shared_index_expire_date; |
3274 | 0 | } |
3275 | | |
3276 | | static int should_delete_shared_index(const char *shared_index_path) |
3277 | 0 | { |
3278 | 0 | struct stat st; |
3279 | 0 | unsigned long expiration; |
3280 | | |
3281 | | /* Check timestamp */ |
3282 | 0 | expiration = get_shared_index_expire_date(); |
3283 | 0 | if (!expiration) |
3284 | 0 | return 0; |
3285 | 0 | if (stat(shared_index_path, &st)) |
3286 | 0 | return error_errno(_("could not stat '%s'"), shared_index_path); |
3287 | 0 | if (st.st_mtime > expiration) |
3288 | 0 | return 0; |
3289 | | |
3290 | 0 | return 1; |
3291 | 0 | } |
3292 | | |
3293 | | static int clean_shared_index_files(const char *current_hex) |
3294 | 0 | { |
3295 | 0 | struct dirent *de; |
3296 | 0 | DIR *dir = opendir(get_git_dir()); |
3297 | |
|
3298 | 0 | if (!dir) |
3299 | 0 | return error_errno(_("unable to open git dir: %s"), get_git_dir()); |
3300 | | |
3301 | 0 | while ((de = readdir(dir)) != NULL) { |
3302 | 0 | const char *sha1_hex; |
3303 | 0 | const char *shared_index_path; |
3304 | 0 | if (!skip_prefix(de->d_name, "sharedindex.", &sha1_hex)) |
3305 | 0 | continue; |
3306 | 0 | if (!strcmp(sha1_hex, current_hex)) |
3307 | 0 | continue; |
3308 | 0 | shared_index_path = git_path("%s", de->d_name); |
3309 | 0 | if (should_delete_shared_index(shared_index_path) > 0 && |
3310 | 0 | unlink(shared_index_path)) |
3311 | 0 | warning_errno(_("unable to unlink: %s"), shared_index_path); |
3312 | 0 | } |
3313 | 0 | closedir(dir); |
3314 | |
|
3315 | 0 | return 0; |
3316 | 0 | } |
3317 | | |
3318 | | static int write_shared_index(struct index_state *istate, |
3319 | | struct tempfile **temp, unsigned flags) |
3320 | 0 | { |
3321 | 0 | struct split_index *si = istate->split_index; |
3322 | 0 | int ret, was_full = !istate->sparse_index; |
3323 | |
|
3324 | 0 | move_cache_to_base_index(istate); |
3325 | 0 | convert_to_sparse(istate, 0); |
3326 | |
|
3327 | 0 | trace2_region_enter_printf("index", "shared/do_write_index", |
3328 | 0 | the_repository, "%s", get_tempfile_path(*temp)); |
3329 | 0 | ret = do_write_index(si->base, *temp, 1, flags); |
3330 | 0 | trace2_region_leave_printf("index", "shared/do_write_index", |
3331 | 0 | the_repository, "%s", get_tempfile_path(*temp)); |
3332 | |
|
3333 | 0 | if (was_full) |
3334 | 0 | ensure_full_index(istate); |
3335 | |
|
3336 | 0 | if (ret) |
3337 | 0 | return ret; |
3338 | 0 | ret = adjust_shared_perm(get_tempfile_path(*temp)); |
3339 | 0 | if (ret) { |
3340 | 0 | error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp)); |
3341 | 0 | return ret; |
3342 | 0 | } |
3343 | 0 | ret = rename_tempfile(temp, |
3344 | 0 | git_path("sharedindex.%s", oid_to_hex(&si->base->oid))); |
3345 | 0 | if (!ret) { |
3346 | 0 | oidcpy(&si->base_oid, &si->base->oid); |
3347 | 0 | clean_shared_index_files(oid_to_hex(&si->base->oid)); |
3348 | 0 | } |
3349 | |
|
3350 | 0 | return ret; |
3351 | 0 | } |
3352 | | |
3353 | | static const int default_max_percent_split_change = 20; |
3354 | | |
3355 | | static int too_many_not_shared_entries(struct index_state *istate) |
3356 | 0 | { |
3357 | 0 | int i, not_shared = 0; |
3358 | 0 | int max_split = git_config_get_max_percent_split_change(); |
3359 | |
|
3360 | 0 | switch (max_split) { |
3361 | 0 | case -1: |
3362 | | /* not or badly configured: use the default value */ |
3363 | 0 | max_split = default_max_percent_split_change; |
3364 | 0 | break; |
3365 | 0 | case 0: |
3366 | 0 | return 1; /* 0% means always write a new shared index */ |
3367 | 0 | case 100: |
3368 | 0 | return 0; /* 100% means never write a new shared index */ |
3369 | 0 | default: |
3370 | 0 | break; /* just use the configured value */ |
3371 | 0 | } |
3372 | | |
3373 | | /* Count not shared entries */ |
3374 | 0 | for (i = 0; i < istate->cache_nr; i++) { |
3375 | 0 | struct cache_entry *ce = istate->cache[i]; |
3376 | 0 | if (!ce->index) |
3377 | 0 | not_shared++; |
3378 | 0 | } |
3379 | |
|
3380 | 0 | return (int64_t)istate->cache_nr * max_split < (int64_t)not_shared * 100; |
3381 | 0 | } |
3382 | | |
3383 | | int write_locked_index(struct index_state *istate, struct lock_file *lock, |
3384 | | unsigned flags) |
3385 | 0 | { |
3386 | 0 | int new_shared_index, ret, test_split_index_env; |
3387 | 0 | struct split_index *si = istate->split_index; |
3388 | |
|
3389 | 0 | if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0)) |
3390 | 0 | cache_tree_verify(the_repository, istate); |
3391 | |
|
3392 | 0 | if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) { |
3393 | 0 | if (flags & COMMIT_LOCK) |
3394 | 0 | rollback_lock_file(lock); |
3395 | 0 | return 0; |
3396 | 0 | } |
3397 | | |
3398 | 0 | if (istate->fsmonitor_last_update) |
3399 | 0 | fill_fsmonitor_bitmap(istate); |
3400 | |
|
3401 | 0 | test_split_index_env = git_env_bool("GIT_TEST_SPLIT_INDEX", 0); |
3402 | |
|
3403 | 0 | if ((!si && !test_split_index_env) || |
3404 | 0 | alternate_index_output || |
3405 | 0 | (istate->cache_changed & ~EXTMASK)) { |
3406 | 0 | if (si) |
3407 | 0 | oidclr(&si->base_oid); |
3408 | 0 | ret = do_write_locked_index(istate, lock, flags); |
3409 | 0 | goto out; |
3410 | 0 | } |
3411 | | |
3412 | 0 | if (test_split_index_env) { |
3413 | 0 | if (!si) { |
3414 | 0 | si = init_split_index(istate); |
3415 | 0 | istate->cache_changed |= SPLIT_INDEX_ORDERED; |
3416 | 0 | } else { |
3417 | 0 | int v = si->base_oid.hash[0]; |
3418 | 0 | if ((v & 15) < 6) |
3419 | 0 | istate->cache_changed |= SPLIT_INDEX_ORDERED; |
3420 | 0 | } |
3421 | 0 | } |
3422 | 0 | if (too_many_not_shared_entries(istate)) |
3423 | 0 | istate->cache_changed |= SPLIT_INDEX_ORDERED; |
3424 | |
|
3425 | 0 | new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED; |
3426 | |
|
3427 | 0 | if (new_shared_index) { |
3428 | 0 | struct tempfile *temp; |
3429 | 0 | int saved_errno; |
3430 | | |
3431 | | /* Same initial permissions as the main .git/index file */ |
3432 | 0 | temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666); |
3433 | 0 | if (!temp) { |
3434 | 0 | oidclr(&si->base_oid); |
3435 | 0 | ret = do_write_locked_index(istate, lock, flags); |
3436 | 0 | goto out; |
3437 | 0 | } |
3438 | 0 | ret = write_shared_index(istate, &temp, flags); |
3439 | |
|
3440 | 0 | saved_errno = errno; |
3441 | 0 | if (is_tempfile_active(temp)) |
3442 | 0 | delete_tempfile(&temp); |
3443 | 0 | errno = saved_errno; |
3444 | |
|
3445 | 0 | if (ret) |
3446 | 0 | goto out; |
3447 | 0 | } |
3448 | | |
3449 | 0 | ret = write_split_index(istate, lock, flags); |
3450 | | |
3451 | | /* Freshen the shared index only if the split-index was written */ |
3452 | 0 | if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) { |
3453 | 0 | const char *shared_index = git_path("sharedindex.%s", |
3454 | 0 | oid_to_hex(&si->base_oid)); |
3455 | 0 | freshen_shared_index(shared_index, 1); |
3456 | 0 | } |
3457 | |
|
3458 | 0 | out: |
3459 | 0 | if (flags & COMMIT_LOCK) |
3460 | 0 | rollback_lock_file(lock); |
3461 | 0 | return ret; |
3462 | 0 | } |
3463 | | |
3464 | | /* |
3465 | | * Read the index file that is potentially unmerged into given |
3466 | | * index_state, dropping any unmerged entries to stage #0 (potentially |
3467 | | * resulting in a path appearing as both a file and a directory in the |
3468 | | * index; the caller is responsible to clear out the extra entries |
3469 | | * before writing the index to a tree). Returns true if the index is |
3470 | | * unmerged. Callers who want to refuse to work from an unmerged |
3471 | | * state can call this and check its return value, instead of calling |
3472 | | * read_cache(). |
3473 | | */ |
3474 | | int repo_read_index_unmerged(struct repository *repo) |
3475 | 0 | { |
3476 | 0 | struct index_state *istate; |
3477 | 0 | int i; |
3478 | 0 | int unmerged = 0; |
3479 | |
|
3480 | 0 | repo_read_index(repo); |
3481 | 0 | istate = repo->index; |
3482 | 0 | for (i = 0; i < istate->cache_nr; i++) { |
3483 | 0 | struct cache_entry *ce = istate->cache[i]; |
3484 | 0 | struct cache_entry *new_ce; |
3485 | 0 | int len; |
3486 | |
|
3487 | 0 | if (!ce_stage(ce)) |
3488 | 0 | continue; |
3489 | 0 | unmerged = 1; |
3490 | 0 | len = ce_namelen(ce); |
3491 | 0 | new_ce = make_empty_cache_entry(istate, len); |
3492 | 0 | memcpy(new_ce->name, ce->name, len); |
3493 | 0 | new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED; |
3494 | 0 | new_ce->ce_namelen = len; |
3495 | 0 | new_ce->ce_mode = ce->ce_mode; |
3496 | 0 | if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK)) |
3497 | 0 | return error(_("%s: cannot drop to stage #0"), |
3498 | 0 | new_ce->name); |
3499 | 0 | } |
3500 | 0 | return unmerged; |
3501 | 0 | } |
3502 | | |
3503 | | /* |
3504 | | * Returns 1 if the path is an "other" path with respect to |
3505 | | * the index; that is, the path is not mentioned in the index at all, |
3506 | | * either as a file, a directory with some files in the index, |
3507 | | * or as an unmerged entry. |
3508 | | * |
3509 | | * We helpfully remove a trailing "/" from directories so that |
3510 | | * the output of read_directory can be used as-is. |
3511 | | */ |
3512 | | int index_name_is_other(struct index_state *istate, const char *name, |
3513 | | int namelen) |
3514 | 0 | { |
3515 | 0 | int pos; |
3516 | 0 | if (namelen && name[namelen - 1] == '/') |
3517 | 0 | namelen--; |
3518 | 0 | pos = index_name_pos(istate, name, namelen); |
3519 | 0 | if (0 <= pos) |
3520 | 0 | return 0; /* exact match */ |
3521 | 0 | pos = -pos - 1; |
3522 | 0 | if (pos < istate->cache_nr) { |
3523 | 0 | struct cache_entry *ce = istate->cache[pos]; |
3524 | 0 | if (ce_namelen(ce) == namelen && |
3525 | 0 | !memcmp(ce->name, name, namelen)) |
3526 | 0 | return 0; /* Yup, this one exists unmerged */ |
3527 | 0 | } |
3528 | 0 | return 1; |
3529 | 0 | } |
3530 | | |
3531 | | void *read_blob_data_from_index(struct index_state *istate, |
3532 | | const char *path, unsigned long *size) |
3533 | 0 | { |
3534 | 0 | int pos, len; |
3535 | 0 | unsigned long sz; |
3536 | 0 | enum object_type type; |
3537 | 0 | void *data; |
3538 | |
|
3539 | 0 | len = strlen(path); |
3540 | 0 | pos = index_name_pos(istate, path, len); |
3541 | 0 | if (pos < 0) { |
3542 | | /* |
3543 | | * We might be in the middle of a merge, in which |
3544 | | * case we would read stage #2 (ours). |
3545 | | */ |
3546 | 0 | int i; |
3547 | 0 | for (i = -pos - 1; |
3548 | 0 | (pos < 0 && i < istate->cache_nr && |
3549 | 0 | !strcmp(istate->cache[i]->name, path)); |
3550 | 0 | i++) |
3551 | 0 | if (ce_stage(istate->cache[i]) == 2) |
3552 | 0 | pos = i; |
3553 | 0 | } |
3554 | 0 | if (pos < 0) |
3555 | 0 | return NULL; |
3556 | 0 | data = read_object_file(&istate->cache[pos]->oid, &type, &sz); |
3557 | 0 | if (!data || type != OBJ_BLOB) { |
3558 | 0 | free(data); |
3559 | 0 | return NULL; |
3560 | 0 | } |
3561 | 0 | if (size) |
3562 | 0 | *size = sz; |
3563 | 0 | return data; |
3564 | 0 | } |
3565 | | |
3566 | | void stat_validity_clear(struct stat_validity *sv) |
3567 | 460 | { |
3568 | 460 | FREE_AND_NULL(sv->sd); |
3569 | 460 | } |
3570 | | |
3571 | | int stat_validity_check(struct stat_validity *sv, const char *path) |
3572 | 0 | { |
3573 | 0 | struct stat st; |
3574 | |
|
3575 | 0 | if (stat(path, &st) < 0) |
3576 | 0 | return sv->sd == NULL; |
3577 | 0 | if (!sv->sd) |
3578 | 0 | return 0; |
3579 | 0 | return S_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st); |
3580 | 0 | } |
3581 | | |
3582 | | void stat_validity_update(struct stat_validity *sv, int fd) |
3583 | 0 | { |
3584 | 0 | struct stat st; |
3585 | |
|
3586 | 0 | if (fstat(fd, &st) < 0 || !S_ISREG(st.st_mode)) |
3587 | 0 | stat_validity_clear(sv); |
3588 | 0 | else { |
3589 | 0 | if (!sv->sd) |
3590 | 0 | CALLOC_ARRAY(sv->sd, 1); |
3591 | 0 | fill_stat_data(sv->sd, &st); |
3592 | 0 | } |
3593 | 0 | } |
3594 | | |
3595 | | void move_index_extensions(struct index_state *dst, struct index_state *src) |
3596 | 0 | { |
3597 | 0 | dst->untracked = src->untracked; |
3598 | 0 | src->untracked = NULL; |
3599 | 0 | dst->cache_tree = src->cache_tree; |
3600 | 0 | src->cache_tree = NULL; |
3601 | 0 | } |
3602 | | |
3603 | | struct cache_entry *dup_cache_entry(const struct cache_entry *ce, |
3604 | | struct index_state *istate) |
3605 | 0 | { |
3606 | 0 | unsigned int size = ce_size(ce); |
3607 | 0 | int mem_pool_allocated; |
3608 | 0 | struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce)); |
3609 | 0 | mem_pool_allocated = new_entry->mem_pool_allocated; |
3610 | |
|
3611 | 0 | memcpy(new_entry, ce, size); |
3612 | 0 | new_entry->mem_pool_allocated = mem_pool_allocated; |
3613 | 0 | return new_entry; |
3614 | 0 | } |
3615 | | |
3616 | | void discard_cache_entry(struct cache_entry *ce) |
3617 | 0 | { |
3618 | 0 | if (ce && should_validate_cache_entries()) |
3619 | 0 | memset(ce, 0xCD, cache_entry_size(ce->ce_namelen)); |
3620 | |
|
3621 | 0 | if (ce && ce->mem_pool_allocated) |
3622 | 0 | return; |
3623 | | |
3624 | 0 | free(ce); |
3625 | 0 | } |
3626 | | |
3627 | | int should_validate_cache_entries(void) |
3628 | 460 | { |
3629 | 460 | static int validate_index_cache_entries = -1; |
3630 | | |
3631 | 460 | if (validate_index_cache_entries < 0) { |
3632 | 1 | if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES")) |
3633 | 0 | validate_index_cache_entries = 1; |
3634 | 1 | else |
3635 | 1 | validate_index_cache_entries = 0; |
3636 | 1 | } |
3637 | | |
3638 | 460 | return validate_index_cache_entries; |
3639 | 460 | } |
3640 | | |
3641 | 0 | #define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */ |
3642 | 0 | #define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */ |
3643 | | |
3644 | | static size_t read_eoie_extension(const char *mmap, size_t mmap_size) |
3645 | 0 | { |
3646 | | /* |
3647 | | * The end of index entries (EOIE) extension is guaranteed to be last |
3648 | | * so that it can be found by scanning backwards from the EOF. |
3649 | | * |
3650 | | * "EOIE" |
3651 | | * <4-byte length> |
3652 | | * <4-byte offset> |
3653 | | * <20-byte hash> |
3654 | | */ |
3655 | 0 | const char *index, *eoie; |
3656 | 0 | uint32_t extsize; |
3657 | 0 | size_t offset, src_offset; |
3658 | 0 | unsigned char hash[GIT_MAX_RAWSZ]; |
3659 | 0 | git_hash_ctx c; |
3660 | | |
3661 | | /* ensure we have an index big enough to contain an EOIE extension */ |
3662 | 0 | if (mmap_size < sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz) |
3663 | 0 | return 0; |
3664 | | |
3665 | | /* validate the extension signature */ |
3666 | 0 | index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz; |
3667 | 0 | if (CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES) |
3668 | 0 | return 0; |
3669 | 0 | index += sizeof(uint32_t); |
3670 | | |
3671 | | /* validate the extension size */ |
3672 | 0 | extsize = get_be32(index); |
3673 | 0 | if (extsize != EOIE_SIZE) |
3674 | 0 | return 0; |
3675 | 0 | index += sizeof(uint32_t); |
3676 | | |
3677 | | /* |
3678 | | * Validate the offset we're going to look for the first extension |
3679 | | * signature is after the index header and before the eoie extension. |
3680 | | */ |
3681 | 0 | offset = get_be32(index); |
3682 | 0 | if (mmap + offset < mmap + sizeof(struct cache_header)) |
3683 | 0 | return 0; |
3684 | 0 | if (mmap + offset >= eoie) |
3685 | 0 | return 0; |
3686 | 0 | index += sizeof(uint32_t); |
3687 | | |
3688 | | /* |
3689 | | * The hash is computed over extension types and their sizes (but not |
3690 | | * their contents). E.g. if we have "TREE" extension that is N-bytes |
3691 | | * long, "REUC" extension that is M-bytes long, followed by "EOIE", |
3692 | | * then the hash would be: |
3693 | | * |
3694 | | * SHA-1("TREE" + <binary representation of N> + |
3695 | | * "REUC" + <binary representation of M>) |
3696 | | */ |
3697 | 0 | src_offset = offset; |
3698 | 0 | the_hash_algo->init_fn(&c); |
3699 | 0 | while (src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) { |
3700 | | /* After an array of active_nr index entries, |
3701 | | * there can be arbitrary number of extended |
3702 | | * sections, each of which is prefixed with |
3703 | | * extension name (4-byte) and section length |
3704 | | * in 4-byte network byte order. |
3705 | | */ |
3706 | 0 | uint32_t extsize; |
3707 | 0 | memcpy(&extsize, mmap + src_offset + 4, 4); |
3708 | 0 | extsize = ntohl(extsize); |
3709 | | |
3710 | | /* verify the extension size isn't so large it will wrap around */ |
3711 | 0 | if (src_offset + 8 + extsize < src_offset) |
3712 | 0 | return 0; |
3713 | | |
3714 | 0 | the_hash_algo->update_fn(&c, mmap + src_offset, 8); |
3715 | |
|
3716 | 0 | src_offset += 8; |
3717 | 0 | src_offset += extsize; |
3718 | 0 | } |
3719 | 0 | the_hash_algo->final_fn(hash, &c); |
3720 | 0 | if (!hasheq(hash, (const unsigned char *)index)) |
3721 | 0 | return 0; |
3722 | | |
3723 | | /* Validate that the extension offsets returned us back to the eoie extension. */ |
3724 | 0 | if (src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) |
3725 | 0 | return 0; |
3726 | | |
3727 | 0 | return offset; |
3728 | 0 | } |
3729 | | |
3730 | | static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset) |
3731 | 0 | { |
3732 | 0 | uint32_t buffer; |
3733 | 0 | unsigned char hash[GIT_MAX_RAWSZ]; |
3734 | | |
3735 | | /* offset */ |
3736 | 0 | put_be32(&buffer, offset); |
3737 | 0 | strbuf_add(sb, &buffer, sizeof(uint32_t)); |
3738 | | |
3739 | | /* hash */ |
3740 | 0 | the_hash_algo->final_fn(hash, eoie_context); |
3741 | 0 | strbuf_add(sb, hash, the_hash_algo->rawsz); |
3742 | 0 | } |
3743 | | |
3744 | 0 | #define IEOT_VERSION (1) |
3745 | | |
3746 | | static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset) |
3747 | 0 | { |
3748 | 0 | const char *index = NULL; |
3749 | 0 | uint32_t extsize, ext_version; |
3750 | 0 | struct index_entry_offset_table *ieot; |
3751 | 0 | int i, nr; |
3752 | | |
3753 | | /* find the IEOT extension */ |
3754 | 0 | if (!offset) |
3755 | 0 | return NULL; |
3756 | 0 | while (offset <= mmap_size - the_hash_algo->rawsz - 8) { |
3757 | 0 | extsize = get_be32(mmap + offset + 4); |
3758 | 0 | if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) { |
3759 | 0 | index = mmap + offset + 4 + 4; |
3760 | 0 | break; |
3761 | 0 | } |
3762 | 0 | offset += 8; |
3763 | 0 | offset += extsize; |
3764 | 0 | } |
3765 | 0 | if (!index) |
3766 | 0 | return NULL; |
3767 | | |
3768 | | /* validate the version is IEOT_VERSION */ |
3769 | 0 | ext_version = get_be32(index); |
3770 | 0 | if (ext_version != IEOT_VERSION) { |
3771 | 0 | error("invalid IEOT version %d", ext_version); |
3772 | 0 | return NULL; |
3773 | 0 | } |
3774 | 0 | index += sizeof(uint32_t); |
3775 | | |
3776 | | /* extension size - version bytes / bytes per entry */ |
3777 | 0 | nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t)); |
3778 | 0 | if (!nr) { |
3779 | 0 | error("invalid number of IEOT entries %d", nr); |
3780 | 0 | return NULL; |
3781 | 0 | } |
3782 | 0 | ieot = xmalloc(sizeof(struct index_entry_offset_table) |
3783 | 0 | + (nr * sizeof(struct index_entry_offset))); |
3784 | 0 | ieot->nr = nr; |
3785 | 0 | for (i = 0; i < nr; i++) { |
3786 | 0 | ieot->entries[i].offset = get_be32(index); |
3787 | 0 | index += sizeof(uint32_t); |
3788 | 0 | ieot->entries[i].nr = get_be32(index); |
3789 | 0 | index += sizeof(uint32_t); |
3790 | 0 | } |
3791 | |
|
3792 | 0 | return ieot; |
3793 | 0 | } |
3794 | | |
3795 | | static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot) |
3796 | 0 | { |
3797 | 0 | uint32_t buffer; |
3798 | 0 | int i; |
3799 | | |
3800 | | /* version */ |
3801 | 0 | put_be32(&buffer, IEOT_VERSION); |
3802 | 0 | strbuf_add(sb, &buffer, sizeof(uint32_t)); |
3803 | | |
3804 | | /* ieot */ |
3805 | 0 | for (i = 0; i < ieot->nr; i++) { |
3806 | | |
3807 | | /* offset */ |
3808 | 0 | put_be32(&buffer, ieot->entries[i].offset); |
3809 | 0 | strbuf_add(sb, &buffer, sizeof(uint32_t)); |
3810 | | |
3811 | | /* count */ |
3812 | 0 | put_be32(&buffer, ieot->entries[i].nr); |
3813 | 0 | strbuf_add(sb, &buffer, sizeof(uint32_t)); |
3814 | 0 | } |
3815 | 0 | } |
3816 | | |
3817 | | void prefetch_cache_entries(const struct index_state *istate, |
3818 | | must_prefetch_predicate must_prefetch) |
3819 | 0 | { |
3820 | 0 | int i; |
3821 | 0 | struct oid_array to_fetch = OID_ARRAY_INIT; |
3822 | |
|
3823 | 0 | for (i = 0; i < istate->cache_nr; i++) { |
3824 | 0 | struct cache_entry *ce = istate->cache[i]; |
3825 | |
|
3826 | 0 | if (S_ISGITLINK(ce->ce_mode) || !must_prefetch(ce)) |
3827 | 0 | continue; |
3828 | 0 | if (!oid_object_info_extended(the_repository, &ce->oid, |
3829 | 0 | NULL, |
3830 | 0 | OBJECT_INFO_FOR_PREFETCH)) |
3831 | 0 | continue; |
3832 | 0 | oid_array_append(&to_fetch, &ce->oid); |
3833 | 0 | } |
3834 | 0 | promisor_remote_get_direct(the_repository, |
3835 | 0 | to_fetch.oid, to_fetch.nr); |
3836 | 0 | oid_array_clear(&to_fetch); |
3837 | 0 | } |