Coverage Report

Created: 2023-11-19 07:08

/src/git/read-cache.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * GIT - The information manager from hell
3
 *
4
 * Copyright (C) Linus Torvalds, 2005
5
 */
6
#include "git-compat-util.h"
7
#include "bulk-checkin.h"
8
#include "config.h"
9
#include "date.h"
10
#include "diff.h"
11
#include "diffcore.h"
12
#include "hex.h"
13
#include "tempfile.h"
14
#include "lockfile.h"
15
#include "cache-tree.h"
16
#include "refs.h"
17
#include "dir.h"
18
#include "object-file.h"
19
#include "object-store-ll.h"
20
#include "oid-array.h"
21
#include "tree.h"
22
#include "commit.h"
23
#include "blob.h"
24
#include "environment.h"
25
#include "gettext.h"
26
#include "mem-pool.h"
27
#include "name-hash.h"
28
#include "object-name.h"
29
#include "path.h"
30
#include "preload-index.h"
31
#include "read-cache.h"
32
#include "resolve-undo.h"
33
#include "revision.h"
34
#include "run-command.h"
35
#include "strbuf.h"
36
#include "trace2.h"
37
#include "varint.h"
38
#include "split-index.h"
39
#include "symlinks.h"
40
#include "utf8.h"
41
#include "fsmonitor.h"
42
#include "thread-utils.h"
43
#include "progress.h"
44
#include "sparse-index.h"
45
#include "csum-file.h"
46
#include "promisor-remote.h"
47
#include "hook.h"
48
49
/* Mask for the name length in ce_flags in the on-disk index */
50
51
201k
#define CE_NAMEMASK  (0x0fff)
52
53
/* Index extensions.
54
 *
55
 * The first letter should be 'A'..'Z' for extensions that are not
56
 * necessary for a correct operation (i.e. optimization data).
57
 * When new extensions are added that _needs_ to be understood in
58
 * order to correctly interpret the index file, pick character that
59
 * is outside the range, to cause the reader to abort.
60
 */
61
62
0
#define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
63
13.4k
#define CACHE_EXT_TREE 0x54524545  /* "TREE" */
64
0
#define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
65
0
#define CACHE_EXT_LINK 0x6c696e6b    /* "link" */
66
0
#define CACHE_EXT_UNTRACKED 0x554E5452    /* "UNTR" */
67
0
#define CACHE_EXT_FSMONITOR 0x46534D4E    /* "FSMN" */
68
0
#define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945  /* "EOIE" */
69
0
#define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */
70
0
#define CACHE_EXT_SPARSE_DIRECTORIES 0x73646972 /* "sdir" */
71
72
/* changes that can be kept in $GIT_DIR/index (basically all extensions) */
73
0
#define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
74
0
     CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
75
0
     SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
76
77
78
/*
79
 * This is an estimate of the pathname length in the index.  We use
80
 * this for V4 index files to guess the un-deltafied size of the index
81
 * in memory because of pathname deltafication.  This is not required
82
 * for V2/V3 index formats because their pathnames are not compressed.
83
 * If the initial amount of memory set aside is not sufficient, the
84
 * mem pool will allocate extra memory.
85
 */
86
0
#define CACHE_ENTRY_PATH_LENGTH 80
87
88
enum index_search_mode {
89
  NO_EXPAND_SPARSE = 0,
90
  EXPAND_SPARSE = 1
91
};
92
93
static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
94
0
{
95
0
  struct cache_entry *ce;
96
0
  ce = mem_pool_alloc(mem_pool, cache_entry_size(len));
97
0
  ce->mem_pool_allocated = 1;
98
0
  return ce;
99
0
}
100
101
static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)
102
8.44k
{
103
8.44k
  struct cache_entry * ce;
104
8.44k
  ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));
105
8.44k
  ce->mem_pool_allocated = 1;
106
8.44k
  return ce;
107
8.44k
}
108
109
static struct mem_pool *find_mem_pool(struct index_state *istate)
110
8.44k
{
111
8.44k
  struct mem_pool **pool_ptr;
112
113
8.44k
  if (istate->split_index && istate->split_index->base)
114
0
    pool_ptr = &istate->split_index->base->ce_mem_pool;
115
8.44k
  else
116
8.44k
    pool_ptr = &istate->ce_mem_pool;
117
118
8.44k
  if (!*pool_ptr) {
119
1.22k
    *pool_ptr = xmalloc(sizeof(**pool_ptr));
120
1.22k
    mem_pool_init(*pool_ptr, 0);
121
1.22k
  }
122
123
8.44k
  return *pool_ptr;
124
8.44k
}
125
126
static const char *alternate_index_output;
127
128
static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
129
8.44k
{
130
8.44k
  if (S_ISSPARSEDIR(ce->ce_mode))
131
0
    istate->sparse_index = INDEX_COLLAPSED;
132
133
8.44k
  istate->cache[nr] = ce;
134
8.44k
  add_name_hash(istate, ce);
135
8.44k
}
136
137
static void replace_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
138
0
{
139
0
  struct cache_entry *old = istate->cache[nr];
140
141
0
  replace_index_entry_in_base(istate, old, ce);
142
0
  remove_name_hash(istate, old);
143
0
  discard_cache_entry(old);
144
0
  ce->ce_flags &= ~CE_HASHED;
145
0
  set_index_entry(istate, nr, ce);
146
0
  ce->ce_flags |= CE_UPDATE_IN_BASE;
147
0
  mark_fsmonitor_invalid(istate, ce);
148
0
  istate->cache_changed |= CE_ENTRY_CHANGED;
149
0
}
150
151
void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
152
0
{
153
0
  struct cache_entry *old_entry = istate->cache[nr], *new_entry, *refreshed;
154
0
  int namelen = strlen(new_name);
155
156
0
  new_entry = make_empty_cache_entry(istate, namelen);
157
0
  copy_cache_entry(new_entry, old_entry);
158
0
  new_entry->ce_flags &= ~CE_HASHED;
159
0
  new_entry->ce_namelen = namelen;
160
0
  new_entry->index = 0;
161
0
  memcpy(new_entry->name, new_name, namelen + 1);
162
163
0
  cache_tree_invalidate_path(istate, old_entry->name);
164
0
  untracked_cache_remove_from_index(istate, old_entry->name);
165
0
  remove_index_entry_at(istate, nr);
166
167
  /*
168
   * Refresh the new index entry. Using 'refresh_cache_entry' ensures
169
   * we only update stat info if the entry is otherwise up-to-date (i.e.,
170
   * the contents/mode haven't changed). This ensures that we reflect the
171
   * 'ctime' of the rename in the index without (incorrectly) updating
172
   * the cached stat info to reflect unstaged changes on disk.
173
   */
174
0
  refreshed = refresh_cache_entry(istate, new_entry, CE_MATCH_REFRESH);
175
0
  if (refreshed && refreshed != new_entry) {
176
0
    add_index_entry(istate, refreshed, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
177
0
    discard_cache_entry(new_entry);
178
0
  } else
179
0
    add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
180
0
}
181
182
/*
183
 * This only updates the "non-critical" parts of the directory
184
 * cache, ie the parts that aren't tracked by GIT, and only used
185
 * to validate the cache.
186
 */
187
void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st)
188
8.44k
{
189
8.44k
  fill_stat_data(&ce->ce_stat_data, st);
190
191
8.44k
  if (assume_unchanged)
192
0
    ce->ce_flags |= CE_VALID;
193
194
8.44k
  if (S_ISREG(st->st_mode)) {
195
8.44k
    ce_mark_uptodate(ce);
196
8.44k
    mark_fsmonitor_valid(istate, ce);
197
8.44k
  }
198
8.44k
}
199
200
static int ce_compare_data(struct index_state *istate,
201
         const struct cache_entry *ce,
202
         struct stat *st)
203
0
{
204
0
  int match = -1;
205
0
  int fd = git_open_cloexec(ce->name, O_RDONLY);
206
207
0
  if (fd >= 0) {
208
0
    struct object_id oid;
209
0
    if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0))
210
0
      match = !oideq(&oid, &ce->oid);
211
    /* index_fd() closed the file descriptor already */
212
0
  }
213
0
  return match;
214
0
}
215
216
static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
217
0
{
218
0
  int match = -1;
219
0
  void *buffer;
220
0
  unsigned long size;
221
0
  enum object_type type;
222
0
  struct strbuf sb = STRBUF_INIT;
223
224
0
  if (strbuf_readlink(&sb, ce->name, expected_size))
225
0
    return -1;
226
227
0
  buffer = repo_read_object_file(the_repository, &ce->oid, &type, &size);
228
0
  if (buffer) {
229
0
    if (size == sb.len)
230
0
      match = memcmp(buffer, sb.buf, size);
231
0
    free(buffer);
232
0
  }
233
0
  strbuf_release(&sb);
234
0
  return match;
235
0
}
236
237
static int ce_compare_gitlink(const struct cache_entry *ce)
238
0
{
239
0
  struct object_id oid;
240
241
  /*
242
   * We don't actually require that the .git directory
243
   * under GITLINK directory be a valid git directory. It
244
   * might even be missing (in case nobody populated that
245
   * sub-project).
246
   *
247
   * If so, we consider it always to match.
248
   */
249
0
  if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
250
0
    return 0;
251
0
  return !oideq(&oid, &ce->oid);
252
0
}
253
254
static int ce_modified_check_fs(struct index_state *istate,
255
        const struct cache_entry *ce,
256
        struct stat *st)
257
0
{
258
0
  switch (st->st_mode & S_IFMT) {
259
0
  case S_IFREG:
260
0
    if (ce_compare_data(istate, ce, st))
261
0
      return DATA_CHANGED;
262
0
    break;
263
0
  case S_IFLNK:
264
0
    if (ce_compare_link(ce, xsize_t(st->st_size)))
265
0
      return DATA_CHANGED;
266
0
    break;
267
0
  case S_IFDIR:
268
0
    if (S_ISGITLINK(ce->ce_mode))
269
0
      return ce_compare_gitlink(ce) ? DATA_CHANGED : 0;
270
    /* else fallthrough */
271
0
  default:
272
0
    return TYPE_CHANGED;
273
0
  }
274
0
  return 0;
275
0
}
276
277
static int ce_match_stat_basic(const struct cache_entry *ce, struct stat *st)
278
0
{
279
0
  unsigned int changed = 0;
280
281
0
  if (ce->ce_flags & CE_REMOVE)
282
0
    return MODE_CHANGED | DATA_CHANGED | TYPE_CHANGED;
283
284
0
  switch (ce->ce_mode & S_IFMT) {
285
0
  case S_IFREG:
286
0
    changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0;
287
    /* We consider only the owner x bit to be relevant for
288
     * "mode changes"
289
     */
290
0
    if (trust_executable_bit &&
291
0
        (0100 & (ce->ce_mode ^ st->st_mode)))
292
0
      changed |= MODE_CHANGED;
293
0
    break;
294
0
  case S_IFLNK:
295
0
    if (!S_ISLNK(st->st_mode) &&
296
0
        (has_symlinks || !S_ISREG(st->st_mode)))
297
0
      changed |= TYPE_CHANGED;
298
0
    break;
299
0
  case S_IFGITLINK:
300
    /* We ignore most of the st_xxx fields for gitlinks */
301
0
    if (!S_ISDIR(st->st_mode))
302
0
      changed |= TYPE_CHANGED;
303
0
    else if (ce_compare_gitlink(ce))
304
0
      changed |= DATA_CHANGED;
305
0
    return changed;
306
0
  default:
307
0
    BUG("unsupported ce_mode: %o", ce->ce_mode);
308
0
  }
309
310
0
  changed |= match_stat_data(&ce->ce_stat_data, st);
311
312
  /* Racily smudged entry? */
313
0
  if (!ce->ce_stat_data.sd_size) {
314
0
    if (!is_empty_blob_sha1(ce->oid.hash))
315
0
      changed |= DATA_CHANGED;
316
0
  }
317
318
0
  return changed;
319
0
}
320
321
static int is_racy_stat(const struct index_state *istate,
322
      const struct stat_data *sd)
323
0
{
324
0
  return (istate->timestamp.sec &&
325
#ifdef USE_NSEC
326
     /* nanosecond timestamped files can also be racy! */
327
    (istate->timestamp.sec < sd->sd_mtime.sec ||
328
     (istate->timestamp.sec == sd->sd_mtime.sec &&
329
      istate->timestamp.nsec <= sd->sd_mtime.nsec))
330
#else
331
0
    istate->timestamp.sec <= sd->sd_mtime.sec
332
0
#endif
333
0
    );
334
0
}
335
336
int is_racy_timestamp(const struct index_state *istate,
337
           const struct cache_entry *ce)
338
0
{
339
0
  return (!S_ISGITLINK(ce->ce_mode) &&
340
0
    is_racy_stat(istate, &ce->ce_stat_data));
341
0
}
342
343
int match_stat_data_racy(const struct index_state *istate,
344
       const struct stat_data *sd, struct stat *st)
345
0
{
346
0
  if (is_racy_stat(istate, sd))
347
0
    return MTIME_CHANGED;
348
0
  return match_stat_data(sd, st);
349
0
}
350
351
int ie_match_stat(struct index_state *istate,
352
      const struct cache_entry *ce, struct stat *st,
353
      unsigned int options)
354
0
{
355
0
  unsigned int changed;
356
0
  int ignore_valid = options & CE_MATCH_IGNORE_VALID;
357
0
  int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
358
0
  int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY;
359
0
  int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
360
361
0
  if (!ignore_fsmonitor)
362
0
    refresh_fsmonitor(istate);
363
  /*
364
   * If it's marked as always valid in the index, it's
365
   * valid whatever the checked-out copy says.
366
   *
367
   * skip-worktree has the same effect with higher precedence
368
   */
369
0
  if (!ignore_skip_worktree && ce_skip_worktree(ce))
370
0
    return 0;
371
0
  if (!ignore_valid && (ce->ce_flags & CE_VALID))
372
0
    return 0;
373
0
  if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID))
374
0
    return 0;
375
376
  /*
377
   * Intent-to-add entries have not been added, so the index entry
378
   * by definition never matches what is in the work tree until it
379
   * actually gets added.
380
   */
381
0
  if (ce_intent_to_add(ce))
382
0
    return DATA_CHANGED | TYPE_CHANGED | MODE_CHANGED;
383
384
0
  changed = ce_match_stat_basic(ce, st);
385
386
  /*
387
   * Within 1 second of this sequence:
388
   *  echo xyzzy >file && git-update-index --add file
389
   * running this command:
390
   *  echo frotz >file
391
   * would give a falsely clean cache entry.  The mtime and
392
   * length match the cache, and other stat fields do not change.
393
   *
394
   * We could detect this at update-index time (the cache entry
395
   * being registered/updated records the same time as "now")
396
   * and delay the return from git-update-index, but that would
397
   * effectively mean we can make at most one commit per second,
398
   * which is not acceptable.  Instead, we check cache entries
399
   * whose mtime are the same as the index file timestamp more
400
   * carefully than others.
401
   */
402
0
  if (!changed && is_racy_timestamp(istate, ce)) {
403
0
    if (assume_racy_is_modified)
404
0
      changed |= DATA_CHANGED;
405
0
    else
406
0
      changed |= ce_modified_check_fs(istate, ce, st);
407
0
  }
408
409
0
  return changed;
410
0
}
411
412
int ie_modified(struct index_state *istate,
413
    const struct cache_entry *ce,
414
    struct stat *st, unsigned int options)
415
0
{
416
0
  int changed, changed_fs;
417
418
0
  changed = ie_match_stat(istate, ce, st, options);
419
0
  if (!changed)
420
0
    return 0;
421
  /*
422
   * If the mode or type has changed, there's no point in trying
423
   * to refresh the entry - it's not going to match
424
   */
425
0
  if (changed & (MODE_CHANGED | TYPE_CHANGED))
426
0
    return changed;
427
428
  /*
429
   * Immediately after read-tree or update-index --cacheinfo,
430
   * the length field is zero, as we have never even read the
431
   * lstat(2) information once, and we cannot trust DATA_CHANGED
432
   * returned by ie_match_stat() which in turn was returned by
433
   * ce_match_stat_basic() to signal that the filesize of the
434
   * blob changed.  We have to actually go to the filesystem to
435
   * see if the contents match, and if so, should answer "unchanged".
436
   *
437
   * The logic does not apply to gitlinks, as ce_match_stat_basic()
438
   * already has checked the actual HEAD from the filesystem in the
439
   * subproject.  If ie_match_stat() already said it is different,
440
   * then we know it is.
441
   */
442
0
  if ((changed & DATA_CHANGED) &&
443
0
      (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
444
0
    return changed;
445
446
0
  changed_fs = ce_modified_check_fs(istate, ce, st);
447
0
  if (changed_fs)
448
0
    return changed | changed_fs;
449
0
  return 0;
450
0
}
451
452
static int cache_name_stage_compare(const char *name1, int len1, int stage1,
453
            const char *name2, int len2, int stage2)
454
67.0k
{
455
67.0k
  int cmp;
456
457
67.0k
  cmp = name_compare(name1, len1, name2, len2);
458
67.0k
  if (cmp)
459
58.5k
    return cmp;
460
461
8.44k
  if (stage1 < stage2)
462
0
    return -1;
463
8.44k
  if (stage1 > stage2)
464
0
    return 1;
465
8.44k
  return 0;
466
8.44k
}
467
468
int cmp_cache_name_compare(const void *a_, const void *b_)
469
0
{
470
0
  const struct cache_entry *ce1, *ce2;
471
472
0
  ce1 = *((const struct cache_entry **)a_);
473
0
  ce2 = *((const struct cache_entry **)b_);
474
0
  return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
475
0
          ce2->name, ce2->ce_namelen, ce_stage(ce2));
476
0
}
477
478
static int index_name_stage_pos(struct index_state *istate,
479
        const char *name, int namelen,
480
        int stage,
481
        enum index_search_mode search_mode)
482
27.0k
{
483
27.0k
  int first, last;
484
485
27.0k
  first = 0;
486
27.0k
  last = istate->cache_nr;
487
85.6k
  while (last > first) {
488
67.0k
    int next = first + ((last - first) >> 1);
489
67.0k
    struct cache_entry *ce = istate->cache[next];
490
67.0k
    int cmp = cache_name_stage_compare(name, namelen, stage, ce->name, ce_namelen(ce), ce_stage(ce));
491
67.0k
    if (!cmp)
492
8.44k
      return next;
493
58.5k
    if (cmp < 0) {
494
37.8k
      last = next;
495
37.8k
      continue;
496
37.8k
    }
497
20.7k
    first = next+1;
498
20.7k
  }
499
500
18.6k
  if (search_mode == EXPAND_SPARSE && istate->sparse_index &&
501
18.6k
      first > 0) {
502
    /* Note: first <= istate->cache_nr */
503
0
    struct cache_entry *ce = istate->cache[first - 1];
504
505
    /*
506
     * If we are in a sparse-index _and_ the entry before the
507
     * insertion position is a sparse-directory entry that is
508
     * an ancestor of 'name', then we need to expand the index
509
     * and search again. This will only trigger once, because
510
     * thereafter the index is fully expanded.
511
     */
512
0
    if (S_ISSPARSEDIR(ce->ce_mode) &&
513
0
        ce_namelen(ce) < namelen &&
514
0
        !strncmp(name, ce->name, ce_namelen(ce))) {
515
0
      ensure_full_index(istate);
516
0
      return index_name_stage_pos(istate, name, namelen, stage, search_mode);
517
0
    }
518
0
  }
519
520
18.6k
  return -first-1;
521
18.6k
}
522
523
int index_name_pos(struct index_state *istate, const char *name, int namelen)
524
19.8k
{
525
19.8k
  return index_name_stage_pos(istate, name, namelen, 0, EXPAND_SPARSE);
526
19.8k
}
527
528
int index_name_pos_sparse(struct index_state *istate, const char *name, int namelen)
529
0
{
530
0
  return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE);
531
0
}
532
533
int index_entry_exists(struct index_state *istate, const char *name, int namelen)
534
0
{
535
0
  return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE) >= 0;
536
0
}
537
538
int remove_index_entry_at(struct index_state *istate, int pos)
539
0
{
540
0
  struct cache_entry *ce = istate->cache[pos];
541
542
0
  record_resolve_undo(istate, ce);
543
0
  remove_name_hash(istate, ce);
544
0
  save_or_free_index_entry(istate, ce);
545
0
  istate->cache_changed |= CE_ENTRY_REMOVED;
546
0
  istate->cache_nr--;
547
0
  if (pos >= istate->cache_nr)
548
0
    return 0;
549
0
  MOVE_ARRAY(istate->cache + pos, istate->cache + pos + 1,
550
0
       istate->cache_nr - pos);
551
0
  return 1;
552
0
}
553
554
/*
555
 * Remove all cache entries marked for removal, that is where
556
 * CE_REMOVE is set in ce_flags.  This is much more effective than
557
 * calling remove_index_entry_at() for each entry to be removed.
558
 */
559
void remove_marked_cache_entries(struct index_state *istate, int invalidate)
560
9.19k
{
561
9.19k
  struct cache_entry **ce_array = istate->cache;
562
9.19k
  unsigned int i, j;
563
564
9.19k
  for (i = j = 0; i < istate->cache_nr; i++) {
565
0
    if (ce_array[i]->ce_flags & CE_REMOVE) {
566
0
      if (invalidate) {
567
0
        cache_tree_invalidate_path(istate,
568
0
                 ce_array[i]->name);
569
0
        untracked_cache_remove_from_index(istate,
570
0
                  ce_array[i]->name);
571
0
      }
572
0
      remove_name_hash(istate, ce_array[i]);
573
0
      save_or_free_index_entry(istate, ce_array[i]);
574
0
    }
575
0
    else
576
0
      ce_array[j++] = ce_array[i];
577
0
  }
578
9.19k
  if (j == istate->cache_nr)
579
9.19k
    return;
580
0
  istate->cache_changed |= CE_ENTRY_REMOVED;
581
0
  istate->cache_nr = j;
582
0
}
583
584
int remove_file_from_index(struct index_state *istate, const char *path)
585
0
{
586
0
  int pos = index_name_pos(istate, path, strlen(path));
587
0
  if (pos < 0)
588
0
    pos = -pos-1;
589
0
  cache_tree_invalidate_path(istate, path);
590
0
  untracked_cache_remove_from_index(istate, path);
591
0
  while (pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path))
592
0
    remove_index_entry_at(istate, pos);
593
0
  return 0;
594
0
}
595
596
static int compare_name(struct cache_entry *ce, const char *path, int namelen)
597
0
{
598
0
  return namelen != ce_namelen(ce) || memcmp(path, ce->name, namelen);
599
0
}
600
601
static int index_name_pos_also_unmerged(struct index_state *istate,
602
  const char *path, int namelen)
603
0
{
604
0
  int pos = index_name_pos(istate, path, namelen);
605
0
  struct cache_entry *ce;
606
607
0
  if (pos >= 0)
608
0
    return pos;
609
610
  /* maybe unmerged? */
611
0
  pos = -1 - pos;
612
0
  if (pos >= istate->cache_nr ||
613
0
      compare_name((ce = istate->cache[pos]), path, namelen))
614
0
    return -1;
615
616
  /* order of preference: stage 2, 1, 3 */
617
0
  if (ce_stage(ce) == 1 && pos + 1 < istate->cache_nr &&
618
0
      ce_stage((ce = istate->cache[pos + 1])) == 2 &&
619
0
      !compare_name(ce, path, namelen))
620
0
    pos++;
621
0
  return pos;
622
0
}
623
624
static int different_name(struct cache_entry *ce, struct cache_entry *alias)
625
0
{
626
0
  int len = ce_namelen(ce);
627
0
  return ce_namelen(alias) != len || memcmp(ce->name, alias->name, len);
628
0
}
629
630
/*
631
 * If we add a filename that aliases in the cache, we will use the
632
 * name that we already have - but we don't want to update the same
633
 * alias twice, because that implies that there were actually two
634
 * different files with aliasing names!
635
 *
636
 * So we use the CE_ADDED flag to verify that the alias was an old
637
 * one before we accept it as
638
 */
639
static struct cache_entry *create_alias_ce(struct index_state *istate,
640
             struct cache_entry *ce,
641
             struct cache_entry *alias)
642
0
{
643
0
  int len;
644
0
  struct cache_entry *new_entry;
645
646
0
  if (alias->ce_flags & CE_ADDED)
647
0
    die(_("will not add file alias '%s' ('%s' already exists in index)"),
648
0
        ce->name, alias->name);
649
650
  /* Ok, create the new entry using the name of the existing alias */
651
0
  len = ce_namelen(alias);
652
0
  new_entry = make_empty_cache_entry(istate, len);
653
0
  memcpy(new_entry->name, alias->name, len);
654
0
  copy_cache_entry(new_entry, ce);
655
0
  save_or_free_index_entry(istate, ce);
656
0
  return new_entry;
657
0
}
658
659
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
660
0
{
661
0
  struct object_id oid;
662
0
  if (write_object_file("", 0, OBJ_BLOB, &oid))
663
0
    die(_("cannot create an empty blob in the object database"));
664
0
  oidcpy(&ce->oid, &oid);
665
0
}
666
667
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
668
8.44k
{
669
8.44k
  int namelen, was_same;
670
8.44k
  mode_t st_mode = st->st_mode;
671
8.44k
  struct cache_entry *ce, *alias = NULL;
672
8.44k
  unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;
673
8.44k
  int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND);
674
8.44k
  int pretend = flags & ADD_CACHE_PRETEND;
675
8.44k
  int intent_only = flags & ADD_CACHE_INTENT;
676
8.44k
  int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
677
8.44k
        (intent_only ? ADD_CACHE_NEW_ONLY : 0));
678
8.44k
  unsigned hash_flags = pretend ? 0 : HASH_WRITE_OBJECT;
679
8.44k
  struct object_id oid;
680
681
8.44k
  if (flags & ADD_CACHE_RENORMALIZE)
682
0
    hash_flags |= HASH_RENORMALIZE;
683
684
8.44k
  if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
685
0
    return error(_("%s: can only add regular files, symbolic links or git-directories"), path);
686
687
8.44k
  namelen = strlen(path);
688
8.44k
  if (S_ISDIR(st_mode)) {
689
0
    if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
690
0
      return error(_("'%s' does not have a commit checked out"), path);
691
0
    while (namelen && path[namelen-1] == '/')
692
0
      namelen--;
693
0
  }
694
8.44k
  ce = make_empty_cache_entry(istate, namelen);
695
8.44k
  memcpy(ce->name, path, namelen);
696
8.44k
  ce->ce_namelen = namelen;
697
8.44k
  if (!intent_only)
698
8.44k
    fill_stat_cache_info(istate, ce, st);
699
0
  else
700
0
    ce->ce_flags |= CE_INTENT_TO_ADD;
701
702
703
8.44k
  if (trust_executable_bit && has_symlinks) {
704
8.44k
    ce->ce_mode = create_ce_mode(st_mode);
705
8.44k
  } else {
706
    /* If there is an existing entry, pick the mode bits and type
707
     * from it, otherwise assume unexecutable regular file.
708
     */
709
0
    struct cache_entry *ent;
710
0
    int pos = index_name_pos_also_unmerged(istate, path, namelen);
711
712
0
    ent = (0 <= pos) ? istate->cache[pos] : NULL;
713
0
    ce->ce_mode = ce_mode_from_stat(ent, st_mode);
714
0
  }
715
716
  /* When core.ignorecase=true, determine if a directory of the same name but differing
717
   * case already exists within the Git repository.  If it does, ensure the directory
718
   * case of the file being added to the repository matches (is folded into) the existing
719
   * entry's directory case.
720
   */
721
8.44k
  if (ignore_case) {
722
0
    adjust_dirname_case(istate, ce->name);
723
0
  }
724
8.44k
  if (!(flags & ADD_CACHE_RENORMALIZE)) {
725
8.44k
    alias = index_file_exists(istate, ce->name,
726
8.44k
            ce_namelen(ce), ignore_case);
727
8.44k
    if (alias &&
728
8.44k
        !ce_stage(alias) &&
729
8.44k
        !ie_match_stat(istate, alias, st, ce_option)) {
730
      /* Nothing changed, really */
731
0
      if (!S_ISGITLINK(alias->ce_mode))
732
0
        ce_mark_uptodate(alias);
733
0
      alias->ce_flags |= CE_ADDED;
734
735
0
      discard_cache_entry(ce);
736
0
      return 0;
737
0
    }
738
8.44k
  }
739
8.44k
  if (!intent_only) {
740
8.44k
    if (index_path(istate, &ce->oid, path, st, hash_flags)) {
741
0
      discard_cache_entry(ce);
742
0
      return error(_("unable to index file '%s'"), path);
743
0
    }
744
8.44k
  } else
745
0
    set_object_name_for_intent_to_add_entry(ce);
746
747
8.44k
  if (ignore_case && alias && different_name(ce, alias))
748
0
    ce = create_alias_ce(istate, ce, alias);
749
8.44k
  ce->ce_flags |= CE_ADDED;
750
751
  /* It was suspected to be racily clean, but it turns out to be Ok */
752
8.44k
  was_same = (alias &&
753
8.44k
        !ce_stage(alias) &&
754
8.44k
        oideq(&alias->oid, &ce->oid) &&
755
8.44k
        ce->ce_mode == alias->ce_mode);
756
757
8.44k
  if (pretend)
758
0
    discard_cache_entry(ce);
759
8.44k
  else if (add_index_entry(istate, ce, add_option)) {
760
0
    discard_cache_entry(ce);
761
0
    return error(_("unable to add '%s' to index"), path);
762
0
  }
763
8.44k
  if (verbose && !was_same)
764
0
    printf("add '%s'\n", path);
765
8.44k
  return 0;
766
8.44k
}
767
768
int add_file_to_index(struct index_state *istate, const char *path, int flags)
769
8.44k
{
770
8.44k
  struct stat st;
771
8.44k
  if (lstat(path, &st))
772
0
    die_errno(_("unable to stat '%s'"), path);
773
8.44k
  return add_to_index(istate, path, &st, flags);
774
8.44k
}
775
776
struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)
777
8.44k
{
778
8.44k
  return mem_pool__ce_calloc(find_mem_pool(istate), len);
779
8.44k
}
780
781
struct cache_entry *make_empty_transient_cache_entry(size_t len,
782
                 struct mem_pool *ce_mem_pool)
783
67.3k
{
784
67.3k
  if (ce_mem_pool)
785
0
    return mem_pool__ce_calloc(ce_mem_pool, len);
786
67.3k
  return xcalloc(1, cache_entry_size(len));
787
67.3k
}
788
789
enum verify_path_result {
790
  PATH_OK,
791
  PATH_INVALID,
792
  PATH_DIR_WITH_SEP,
793
};
794
795
static enum verify_path_result verify_path_internal(const char *, unsigned);
796
797
int verify_path(const char *path, unsigned mode)
798
0
{
799
0
  return verify_path_internal(path, mode) == PATH_OK;
800
0
}
801
802
struct cache_entry *make_cache_entry(struct index_state *istate,
803
             unsigned int mode,
804
             const struct object_id *oid,
805
             const char *path,
806
             int stage,
807
             unsigned int refresh_options)
808
0
{
809
0
  struct cache_entry *ce, *ret;
810
0
  int len;
811
812
0
  if (verify_path_internal(path, mode) == PATH_INVALID) {
813
0
    error(_("invalid path '%s'"), path);
814
0
    return NULL;
815
0
  }
816
817
0
  len = strlen(path);
818
0
  ce = make_empty_cache_entry(istate, len);
819
820
0
  oidcpy(&ce->oid, oid);
821
0
  memcpy(ce->name, path, len);
822
0
  ce->ce_flags = create_ce_flags(stage);
823
0
  ce->ce_namelen = len;
824
0
  ce->ce_mode = create_ce_mode(mode);
825
826
0
  ret = refresh_cache_entry(istate, ce, refresh_options);
827
0
  if (ret != ce)
828
0
    discard_cache_entry(ce);
829
0
  return ret;
830
0
}
831
832
struct cache_entry *make_transient_cache_entry(unsigned int mode,
833
                 const struct object_id *oid,
834
                 const char *path,
835
                 int stage,
836
                 struct mem_pool *ce_mem_pool)
837
0
{
838
0
  struct cache_entry *ce;
839
0
  int len;
840
841
0
  if (!verify_path(path, mode)) {
842
0
    error(_("invalid path '%s'"), path);
843
0
    return NULL;
844
0
  }
845
846
0
  len = strlen(path);
847
0
  ce = make_empty_transient_cache_entry(len, ce_mem_pool);
848
849
0
  oidcpy(&ce->oid, oid);
850
0
  memcpy(ce->name, path, len);
851
0
  ce->ce_flags = create_ce_flags(stage);
852
0
  ce->ce_namelen = len;
853
0
  ce->ce_mode = create_ce_mode(mode);
854
855
0
  return ce;
856
0
}
857
858
/*
859
 * Chmod an index entry with either +x or -x.
860
 *
861
 * Returns -1 if the chmod for the particular cache entry failed (if it's
862
 * not a regular file), -2 if an invalid flip argument is passed in, 0
863
 * otherwise.
864
 */
865
int chmod_index_entry(struct index_state *istate, struct cache_entry *ce,
866
          char flip)
867
0
{
868
0
  if (!S_ISREG(ce->ce_mode))
869
0
    return -1;
870
0
  switch (flip) {
871
0
  case '+':
872
0
    ce->ce_mode |= 0111;
873
0
    break;
874
0
  case '-':
875
0
    ce->ce_mode &= ~0111;
876
0
    break;
877
0
  default:
878
0
    return -2;
879
0
  }
880
0
  cache_tree_invalidate_path(istate, ce->name);
881
0
  ce->ce_flags |= CE_UPDATE_IN_BASE;
882
0
  mark_fsmonitor_invalid(istate, ce);
883
0
  istate->cache_changed |= CE_ENTRY_CHANGED;
884
885
0
  return 0;
886
0
}
887
888
int ce_same_name(const struct cache_entry *a, const struct cache_entry *b)
889
6.00k
{
890
6.00k
  int len = ce_namelen(a);
891
6.00k
  return ce_namelen(b) == len && !memcmp(a->name, b->name, len);
892
6.00k
}
893
894
/*
895
 * We fundamentally don't like some paths: we don't want
896
 * dot or dot-dot anywhere, and for obvious reasons don't
897
 * want to recurse into ".git" either.
898
 *
899
 * Also, we don't want double slashes or slashes at the
900
 * end that can make pathnames ambiguous.
901
 */
902
static int verify_dotfile(const char *rest, unsigned mode)
903
0
{
904
  /*
905
   * The first character was '.', but that
906
   * has already been discarded, we now test
907
   * the rest.
908
   */
909
910
  /* "." is not allowed */
911
0
  if (*rest == '\0' || is_dir_sep(*rest))
912
0
    return 0;
913
914
0
  switch (*rest) {
915
  /*
916
   * ".git" followed by NUL or slash is bad. Note that we match
917
   * case-insensitively here, even if ignore_case is not set.
918
   * This outlaws ".GIT" everywhere out of an abundance of caution,
919
   * since there's really no good reason to allow it.
920
   *
921
   * Once we've seen ".git", we can also find ".gitmodules", etc (also
922
   * case-insensitively).
923
   */
924
0
  case 'g':
925
0
  case 'G':
926
0
    if (rest[1] != 'i' && rest[1] != 'I')
927
0
      break;
928
0
    if (rest[2] != 't' && rest[2] != 'T')
929
0
      break;
930
0
    if (rest[3] == '\0' || is_dir_sep(rest[3]))
931
0
      return 0;
932
0
    if (S_ISLNK(mode)) {
933
0
      rest += 3;
934
0
      if (skip_iprefix(rest, "modules", &rest) &&
935
0
          (*rest == '\0' || is_dir_sep(*rest)))
936
0
        return 0;
937
0
    }
938
0
    break;
939
0
  case '.':
940
0
    if (rest[1] == '\0' || is_dir_sep(rest[1]))
941
0
      return 0;
942
0
  }
943
0
  return 1;
944
0
}
945
946
static enum verify_path_result verify_path_internal(const char *path,
947
                unsigned mode)
948
8.44k
{
949
8.44k
  char c = 0;
950
951
8.44k
  if (has_dos_drive_prefix(path))
952
0
    return PATH_INVALID;
953
954
8.44k
  if (!is_valid_path(path))
955
0
    return PATH_INVALID;
956
957
8.44k
  goto inside;
958
314k
  for (;;) {
959
314k
    if (!c)
960
8.44k
      return PATH_OK;
961
306k
    if (is_dir_sep(c)) {
962
8.44k
inside:
963
8.44k
      if (protect_hfs) {
964
965
0
        if (is_hfs_dotgit(path))
966
0
          return PATH_INVALID;
967
0
        if (S_ISLNK(mode)) {
968
0
          if (is_hfs_dotgitmodules(path))
969
0
            return PATH_INVALID;
970
0
        }
971
0
      }
972
8.44k
      if (protect_ntfs) {
973
#if defined GIT_WINDOWS_NATIVE || defined __CYGWIN__
974
        if (c == '\\')
975
          return PATH_INVALID;
976
#endif
977
8.44k
        if (is_ntfs_dotgit(path))
978
0
          return PATH_INVALID;
979
8.44k
        if (S_ISLNK(mode)) {
980
0
          if (is_ntfs_dotgitmodules(path))
981
0
            return PATH_INVALID;
982
0
        }
983
8.44k
      }
984
985
8.44k
      c = *path++;
986
8.44k
      if ((c == '.' && !verify_dotfile(path, mode)) ||
987
8.44k
          is_dir_sep(c))
988
0
        return PATH_INVALID;
989
      /*
990
       * allow terminating directory separators for
991
       * sparse directory entries.
992
       */
993
8.44k
      if (c == '\0')
994
0
        return S_ISDIR(mode) ? PATH_DIR_WITH_SEP :
995
0
                   PATH_INVALID;
996
306k
    } else if (c == '\\' && protect_ntfs) {
997
0
      if (is_ntfs_dotgit(path))
998
0
        return PATH_INVALID;
999
0
      if (S_ISLNK(mode)) {
1000
0
        if (is_ntfs_dotgitmodules(path))
1001
0
          return PATH_INVALID;
1002
0
      }
1003
0
    }
1004
1005
314k
    c = *path++;
1006
314k
  }
1007
0
}
1008
1009
/*
1010
 * Do we have another file that has the beginning components being a
1011
 * proper superset of the name we're trying to add?
1012
 */
1013
static int has_file_name(struct index_state *istate,
1014
       const struct cache_entry *ce, int pos, int ok_to_replace)
1015
8.44k
{
1016
8.44k
  int retval = 0;
1017
8.44k
  int len = ce_namelen(ce);
1018
8.44k
  int stage = ce_stage(ce);
1019
8.44k
  const char *name = ce->name;
1020
1021
8.44k
  while (pos < istate->cache_nr) {
1022
6.00k
    struct cache_entry *p = istate->cache[pos++];
1023
1024
6.00k
    if (len >= ce_namelen(p))
1025
6.00k
      break;
1026
0
    if (memcmp(name, p->name, len))
1027
0
      break;
1028
0
    if (ce_stage(p) != stage)
1029
0
      continue;
1030
0
    if (p->name[len] != '/')
1031
0
      continue;
1032
0
    if (p->ce_flags & CE_REMOVE)
1033
0
      continue;
1034
0
    retval = -1;
1035
0
    if (!ok_to_replace)
1036
0
      break;
1037
0
    remove_index_entry_at(istate, --pos);
1038
0
  }
1039
8.44k
  return retval;
1040
8.44k
}
1041
1042
1043
/*
1044
 * Like strcmp(), but also return the offset of the first change.
1045
 * If strings are equal, return the length.
1046
 */
1047
int strcmp_offset(const char *s1, const char *s2, size_t *first_change)
1048
7.22k
{
1049
7.22k
  size_t k;
1050
1051
7.22k
  if (!first_change)
1052
0
    return strcmp(s1, s2);
1053
1054
37.3k
  for (k = 0; s1[k] == s2[k]; k++)
1055
30.1k
    if (s1[k] == '\0')
1056
0
      break;
1057
1058
7.22k
  *first_change = k;
1059
7.22k
  return (unsigned char)s1[k] - (unsigned char)s2[k];
1060
7.22k
}
1061
1062
/*
1063
 * Do we have another file with a pathname that is a proper
1064
 * subset of the name we're trying to add?
1065
 *
1066
 * That is, is there another file in the index with a path
1067
 * that matches a sub-directory in the given entry?
1068
 */
1069
static int has_dir_name(struct index_state *istate,
1070
      const struct cache_entry *ce, int pos, int ok_to_replace)
1071
8.44k
{
1072
8.44k
  int retval = 0;
1073
8.44k
  int stage = ce_stage(ce);
1074
8.44k
  const char *name = ce->name;
1075
8.44k
  const char *slash = name + ce_namelen(ce);
1076
8.44k
  size_t len_eq_last;
1077
8.44k
  int cmp_last = 0;
1078
1079
  /*
1080
   * We are frequently called during an iteration on a sorted
1081
   * list of pathnames and while building a new index.  Therefore,
1082
   * there is a high probability that this entry will eventually
1083
   * be appended to the index, rather than inserted in the middle.
1084
   * If we can confirm that, we can avoid binary searches on the
1085
   * components of the pathname.
1086
   *
1087
   * Compare the entry's full path with the last path in the index.
1088
   */
1089
8.44k
  if (istate->cache_nr > 0) {
1090
7.22k
    cmp_last = strcmp_offset(name,
1091
7.22k
      istate->cache[istate->cache_nr - 1]->name,
1092
7.22k
      &len_eq_last);
1093
7.22k
    if (cmp_last > 0) {
1094
1.22k
      if (len_eq_last == 0) {
1095
        /*
1096
         * The entry sorts AFTER the last one in the
1097
         * index and their paths have no common prefix,
1098
         * so there cannot be a F/D conflict.
1099
         */
1100
0
        return retval;
1101
1.22k
      } else {
1102
        /*
1103
         * The entry sorts AFTER the last one in the
1104
         * index, but has a common prefix.  Fall through
1105
         * to the loop below to disect the entry's path
1106
         * and see where the difference is.
1107
         */
1108
1.22k
      }
1109
6.00k
    } else if (cmp_last == 0) {
1110
      /*
1111
       * The entry exactly matches the last one in the
1112
       * index, but because of multiple stage and CE_REMOVE
1113
       * items, we fall through and let the regular search
1114
       * code handle it.
1115
       */
1116
0
    }
1117
7.22k
  }
1118
1119
8.44k
  for (;;) {
1120
8.44k
    size_t len;
1121
1122
314k
    for (;;) {
1123
314k
      if (*--slash == '/')
1124
0
        break;
1125
314k
      if (slash <= ce->name)
1126
8.44k
        return retval;
1127
314k
    }
1128
0
    len = slash - name;
1129
1130
0
    if (cmp_last > 0) {
1131
      /*
1132
       * (len + 1) is a directory boundary (including
1133
       * the trailing slash).  And since the loop is
1134
       * decrementing "slash", the first iteration is
1135
       * the longest directory prefix; subsequent
1136
       * iterations consider parent directories.
1137
       */
1138
1139
0
      if (len + 1 <= len_eq_last) {
1140
        /*
1141
         * The directory prefix (including the trailing
1142
         * slash) also appears as a prefix in the last
1143
         * entry, so the remainder cannot collide (because
1144
         * strcmp said the whole path was greater).
1145
         *
1146
         * EQ: last: xxx/A
1147
         *     this: xxx/B
1148
         *
1149
         * LT: last: xxx/file_A
1150
         *     this: xxx/file_B
1151
         */
1152
0
        return retval;
1153
0
      }
1154
1155
0
      if (len > len_eq_last) {
1156
        /*
1157
         * This part of the directory prefix (excluding
1158
         * the trailing slash) is longer than the known
1159
         * equal portions, so this sub-directory cannot
1160
         * collide with a file.
1161
         *
1162
         * GT: last: xxxA
1163
         *     this: xxxB/file
1164
         */
1165
0
        return retval;
1166
0
      }
1167
1168
      /*
1169
       * This is a possible collision. Fall through and
1170
       * let the regular search code handle it.
1171
       *
1172
       * last: xxx
1173
       * this: xxx/file
1174
       */
1175
0
    }
1176
1177
0
    pos = index_name_stage_pos(istate, name, len, stage, EXPAND_SPARSE);
1178
0
    if (pos >= 0) {
1179
      /*
1180
       * Found one, but not so fast.  This could
1181
       * be a marker that says "I was here, but
1182
       * I am being removed".  Such an entry is
1183
       * not a part of the resulting tree, and
1184
       * it is Ok to have a directory at the same
1185
       * path.
1186
       */
1187
0
      if (!(istate->cache[pos]->ce_flags & CE_REMOVE)) {
1188
0
        retval = -1;
1189
0
        if (!ok_to_replace)
1190
0
          break;
1191
0
        remove_index_entry_at(istate, pos);
1192
0
        continue;
1193
0
      }
1194
0
    }
1195
0
    else
1196
0
      pos = -pos-1;
1197
1198
    /*
1199
     * Trivial optimization: if we find an entry that
1200
     * already matches the sub-directory, then we know
1201
     * we're ok, and we can exit.
1202
     */
1203
0
    while (pos < istate->cache_nr) {
1204
0
      struct cache_entry *p = istate->cache[pos];
1205
0
      if ((ce_namelen(p) <= len) ||
1206
0
          (p->name[len] != '/') ||
1207
0
          memcmp(p->name, name, len))
1208
0
        break; /* not our subdirectory */
1209
0
      if (ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE))
1210
        /*
1211
         * p is at the same stage as our entry, and
1212
         * is a subdirectory of what we are looking
1213
         * at, so we cannot have conflicts at our
1214
         * level or anything shorter.
1215
         */
1216
0
        return retval;
1217
0
      pos++;
1218
0
    }
1219
0
  }
1220
0
  return retval;
1221
8.44k
}
1222
1223
/* We may be in a situation where we already have path/file and path
1224
 * is being added, or we already have path and path/file is being
1225
 * added.  Either one would result in a nonsense tree that has path
1226
 * twice when git-write-tree tries to write it out.  Prevent it.
1227
 *
1228
 * If ok-to-replace is specified, we remove the conflicting entries
1229
 * from the cache so the caller should recompute the insert position.
1230
 * When this happens, we return non-zero.
1231
 */
1232
static int check_file_directory_conflict(struct index_state *istate,
1233
           const struct cache_entry *ce,
1234
           int pos, int ok_to_replace)
1235
8.44k
{
1236
8.44k
  int retval;
1237
1238
  /*
1239
   * When ce is an "I am going away" entry, we allow it to be added
1240
   */
1241
8.44k
  if (ce->ce_flags & CE_REMOVE)
1242
0
    return 0;
1243
1244
  /*
1245
   * We check if the path is a sub-path of a subsequent pathname
1246
   * first, since removing those will not change the position
1247
   * in the array.
1248
   */
1249
8.44k
  retval = has_file_name(istate, ce, pos, ok_to_replace);
1250
1251
  /*
1252
   * Then check if the path might have a clashing sub-directory
1253
   * before it.
1254
   */
1255
8.44k
  return retval + has_dir_name(istate, ce, pos, ok_to_replace);
1256
8.44k
}
1257
1258
static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option)
1259
8.44k
{
1260
8.44k
  int pos;
1261
8.44k
  int ok_to_add = option & ADD_CACHE_OK_TO_ADD;
1262
8.44k
  int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE;
1263
8.44k
  int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
1264
8.44k
  int new_only = option & ADD_CACHE_NEW_ONLY;
1265
1266
  /*
1267
   * If this entry's path sorts after the last entry in the index,
1268
   * we can avoid searching for it.
1269
   */
1270
8.44k
  if (istate->cache_nr > 0 &&
1271
8.44k
    strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0)
1272
1.22k
    pos = index_pos_to_insert_pos(istate->cache_nr);
1273
7.22k
  else
1274
7.22k
    pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);
1275
1276
  /*
1277
   * Cache tree path should be invalidated only after index_name_stage_pos,
1278
   * in case it expands a sparse index.
1279
   */
1280
8.44k
  if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
1281
8.44k
    cache_tree_invalidate_path(istate, ce->name);
1282
1283
  /* existing match? Just replace it. */
1284
8.44k
  if (pos >= 0) {
1285
0
    if (!new_only)
1286
0
      replace_index_entry(istate, pos, ce);
1287
0
    return 0;
1288
0
  }
1289
8.44k
  pos = -pos-1;
1290
1291
8.44k
  if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
1292
8.44k
    untracked_cache_add_to_index(istate, ce->name);
1293
1294
  /*
1295
   * Inserting a merged entry ("stage 0") into the index
1296
   * will always replace all non-merged entries..
1297
   */
1298
8.44k
  if (pos < istate->cache_nr && ce_stage(ce) == 0) {
1299
6.00k
    while (ce_same_name(istate->cache[pos], ce)) {
1300
0
      ok_to_add = 1;
1301
0
      if (!remove_index_entry_at(istate, pos))
1302
0
        break;
1303
0
    }
1304
6.00k
  }
1305
1306
8.44k
  if (!ok_to_add)
1307
0
    return -1;
1308
8.44k
  if (verify_path_internal(ce->name, ce->ce_mode) == PATH_INVALID)
1309
0
    return error(_("invalid path '%s'"), ce->name);
1310
1311
8.44k
  if (!skip_df_check &&
1312
8.44k
      check_file_directory_conflict(istate, ce, pos, ok_to_replace)) {
1313
0
    if (!ok_to_replace)
1314
0
      return error(_("'%s' appears as both a file and as a directory"),
1315
0
             ce->name);
1316
0
    pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);
1317
0
    pos = -pos-1;
1318
0
  }
1319
8.44k
  return pos + 1;
1320
8.44k
}
1321
1322
int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)
1323
8.44k
{
1324
8.44k
  int pos;
1325
1326
8.44k
  if (option & ADD_CACHE_JUST_APPEND)
1327
0
    pos = istate->cache_nr;
1328
8.44k
  else {
1329
8.44k
    int ret;
1330
8.44k
    ret = add_index_entry_with_check(istate, ce, option);
1331
8.44k
    if (ret <= 0)
1332
0
      return ret;
1333
8.44k
    pos = ret - 1;
1334
8.44k
  }
1335
1336
  /* Make sure the array is big enough .. */
1337
8.44k
  ALLOC_GROW(istate->cache, istate->cache_nr + 1, istate->cache_alloc);
1338
1339
  /* Add it in.. */
1340
8.44k
  istate->cache_nr++;
1341
8.44k
  if (istate->cache_nr > pos + 1)
1342
6.00k
    MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
1343
8.44k
         istate->cache_nr - pos - 1);
1344
8.44k
  set_index_entry(istate, pos, ce);
1345
8.44k
  istate->cache_changed |= CE_ENTRY_ADDED;
1346
8.44k
  return 0;
1347
8.44k
}
1348
1349
/*
1350
 * "refresh" does not calculate a new sha1 file or bring the
1351
 * cache up-to-date for mode/content changes. But what it
1352
 * _does_ do is to "re-match" the stat information of a file
1353
 * with the cache, so that you can refresh the cache for a
1354
 * file that hasn't been changed but where the stat entry is
1355
 * out of date.
1356
 *
1357
 * For example, you'd want to do this after doing a "git-read-tree",
1358
 * to link up the stat cache details with the proper files.
1359
 */
1360
static struct cache_entry *refresh_cache_ent(struct index_state *istate,
1361
               struct cache_entry *ce,
1362
               unsigned int options, int *err,
1363
               int *changed_ret,
1364
               int *t2_did_lstat,
1365
               int *t2_did_scan)
1366
50.3k
{
1367
50.3k
  struct stat st;
1368
50.3k
  struct cache_entry *updated;
1369
50.3k
  int changed;
1370
50.3k
  int refresh = options & CE_MATCH_REFRESH;
1371
50.3k
  int ignore_valid = options & CE_MATCH_IGNORE_VALID;
1372
50.3k
  int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
1373
50.3k
  int ignore_missing = options & CE_MATCH_IGNORE_MISSING;
1374
50.3k
  int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
1375
1376
50.3k
  if (!refresh || ce_uptodate(ce))
1377
50.3k
    return ce;
1378
1379
0
  if (!ignore_fsmonitor)
1380
0
    refresh_fsmonitor(istate);
1381
  /*
1382
   * CE_VALID or CE_SKIP_WORKTREE means the user promised us
1383
   * that the change to the work tree does not matter and told
1384
   * us not to worry.
1385
   */
1386
0
  if (!ignore_skip_worktree && ce_skip_worktree(ce)) {
1387
0
    ce_mark_uptodate(ce);
1388
0
    return ce;
1389
0
  }
1390
0
  if (!ignore_valid && (ce->ce_flags & CE_VALID)) {
1391
0
    ce_mark_uptodate(ce);
1392
0
    return ce;
1393
0
  }
1394
0
  if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) {
1395
0
    ce_mark_uptodate(ce);
1396
0
    return ce;
1397
0
  }
1398
1399
0
  if (has_symlink_leading_path(ce->name, ce_namelen(ce))) {
1400
0
    if (ignore_missing)
1401
0
      return ce;
1402
0
    if (err)
1403
0
      *err = ENOENT;
1404
0
    return NULL;
1405
0
  }
1406
1407
0
  if (t2_did_lstat)
1408
0
    *t2_did_lstat = 1;
1409
0
  if (lstat(ce->name, &st) < 0) {
1410
0
    if (ignore_missing && errno == ENOENT)
1411
0
      return ce;
1412
0
    if (err)
1413
0
      *err = errno;
1414
0
    return NULL;
1415
0
  }
1416
1417
0
  changed = ie_match_stat(istate, ce, &st, options);
1418
0
  if (changed_ret)
1419
0
    *changed_ret = changed;
1420
0
  if (!changed) {
1421
    /*
1422
     * The path is unchanged.  If we were told to ignore
1423
     * valid bit, then we did the actual stat check and
1424
     * found that the entry is unmodified.  If the entry
1425
     * is not marked VALID, this is the place to mark it
1426
     * valid again, under "assume unchanged" mode.
1427
     */
1428
0
    if (ignore_valid && assume_unchanged &&
1429
0
        !(ce->ce_flags & CE_VALID))
1430
0
      ; /* mark this one VALID again */
1431
0
    else {
1432
      /*
1433
       * We do not mark the index itself "modified"
1434
       * because CE_UPTODATE flag is in-core only;
1435
       * we are not going to write this change out.
1436
       */
1437
0
      if (!S_ISGITLINK(ce->ce_mode)) {
1438
0
        ce_mark_uptodate(ce);
1439
0
        mark_fsmonitor_valid(istate, ce);
1440
0
      }
1441
0
      return ce;
1442
0
    }
1443
0
  }
1444
1445
0
  if (t2_did_scan)
1446
0
    *t2_did_scan = 1;
1447
0
  if (ie_modified(istate, ce, &st, options)) {
1448
0
    if (err)
1449
0
      *err = EINVAL;
1450
0
    return NULL;
1451
0
  }
1452
1453
0
  updated = make_empty_cache_entry(istate, ce_namelen(ce));
1454
0
  copy_cache_entry(updated, ce);
1455
0
  memcpy(updated->name, ce->name, ce->ce_namelen + 1);
1456
0
  fill_stat_cache_info(istate, updated, &st);
1457
  /*
1458
   * If ignore_valid is not set, we should leave CE_VALID bit
1459
   * alone.  Otherwise, paths marked with --no-assume-unchanged
1460
   * (i.e. things to be edited) will reacquire CE_VALID bit
1461
   * automatically, which is not really what we want.
1462
   */
1463
0
  if (!ignore_valid && assume_unchanged &&
1464
0
      !(ce->ce_flags & CE_VALID))
1465
0
    updated->ce_flags &= ~CE_VALID;
1466
1467
  /* istate->cache_changed is updated in the caller */
1468
0
  return updated;
1469
0
}
1470
1471
static void show_file(const char * fmt, const char * name, int in_porcelain,
1472
          int * first, const char *header_msg)
1473
0
{
1474
0
  if (in_porcelain && *first && header_msg) {
1475
0
    printf("%s\n", header_msg);
1476
0
    *first = 0;
1477
0
  }
1478
0
  printf(fmt, name);
1479
0
}
1480
1481
int repo_refresh_and_write_index(struct repository *repo,
1482
         unsigned int refresh_flags,
1483
         unsigned int write_flags,
1484
         int gentle,
1485
         const struct pathspec *pathspec,
1486
         char *seen, const char *header_msg)
1487
0
{
1488
0
  struct lock_file lock_file = LOCK_INIT;
1489
0
  int fd, ret = 0;
1490
1491
0
  fd = repo_hold_locked_index(repo, &lock_file, 0);
1492
0
  if (!gentle && fd < 0)
1493
0
    return -1;
1494
0
  if (refresh_index(repo->index, refresh_flags, pathspec, seen, header_msg))
1495
0
    ret = 1;
1496
0
  if (0 <= fd && write_locked_index(repo->index, &lock_file, COMMIT_LOCK | write_flags))
1497
0
    ret = -1;
1498
0
  return ret;
1499
0
}
1500
1501
1502
int refresh_index(struct index_state *istate, unsigned int flags,
1503
      const struct pathspec *pathspec,
1504
      char *seen, const char *header_msg)
1505
7.32k
{
1506
7.32k
  int i;
1507
7.32k
  int has_errors = 0;
1508
7.32k
  int really = (flags & REFRESH_REALLY) != 0;
1509
7.32k
  int allow_unmerged = (flags & REFRESH_UNMERGED) != 0;
1510
7.32k
  int quiet = (flags & REFRESH_QUIET) != 0;
1511
7.32k
  int not_new = (flags & REFRESH_IGNORE_MISSING) != 0;
1512
7.32k
  int ignore_submodules = (flags & REFRESH_IGNORE_SUBMODULES) != 0;
1513
7.32k
  int ignore_skip_worktree = (flags & REFRESH_IGNORE_SKIP_WORKTREE) != 0;
1514
7.32k
  int first = 1;
1515
7.32k
  int in_porcelain = (flags & REFRESH_IN_PORCELAIN);
1516
7.32k
  unsigned int options = (CE_MATCH_REFRESH |
1517
7.32k
        (really ? CE_MATCH_IGNORE_VALID : 0) |
1518
7.32k
        (not_new ? CE_MATCH_IGNORE_MISSING : 0));
1519
7.32k
  const char *modified_fmt;
1520
7.32k
  const char *deleted_fmt;
1521
7.32k
  const char *typechange_fmt;
1522
7.32k
  const char *added_fmt;
1523
7.32k
  const char *unmerged_fmt;
1524
7.32k
  struct progress *progress = NULL;
1525
7.32k
  int t2_sum_lstat = 0;
1526
7.32k
  int t2_sum_scan = 0;
1527
1528
7.32k
  if (flags & REFRESH_PROGRESS && isatty(2))
1529
0
    progress = start_delayed_progress(_("Refresh index"),
1530
0
              istate->cache_nr);
1531
1532
7.32k
  trace_performance_enter();
1533
7.32k
  modified_fmt   = in_porcelain ? "M\t%s\n" : "%s: needs update\n";
1534
7.32k
  deleted_fmt    = in_porcelain ? "D\t%s\n" : "%s: needs update\n";
1535
7.32k
  typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
1536
7.32k
  added_fmt      = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
1537
7.32k
  unmerged_fmt   = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
1538
  /*
1539
   * Use the multi-threaded preload_index() to refresh most of the
1540
   * cache entries quickly then in the single threaded loop below,
1541
   * we only have to do the special cases that are left.
1542
   */
1543
7.32k
  preload_index(istate, pathspec, 0);
1544
7.32k
  trace2_region_enter("index", "refresh", NULL);
1545
1546
57.6k
  for (i = 0; i < istate->cache_nr; i++) {
1547
50.3k
    struct cache_entry *ce, *new_entry;
1548
50.3k
    int cache_errno = 0;
1549
50.3k
    int changed = 0;
1550
50.3k
    int filtered = 0;
1551
50.3k
    int t2_did_lstat = 0;
1552
50.3k
    int t2_did_scan = 0;
1553
1554
50.3k
    ce = istate->cache[i];
1555
50.3k
    if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
1556
0
      continue;
1557
50.3k
    if (ignore_skip_worktree && ce_skip_worktree(ce))
1558
0
      continue;
1559
1560
    /*
1561
     * If this entry is a sparse directory, then there isn't
1562
     * any stat() information to update. Ignore the entry.
1563
     */
1564
50.3k
    if (S_ISSPARSEDIR(ce->ce_mode))
1565
0
      continue;
1566
1567
50.3k
    if (pathspec && !ce_path_match(istate, ce, pathspec, seen))
1568
0
      filtered = 1;
1569
1570
50.3k
    if (ce_stage(ce)) {
1571
0
      while ((i < istate->cache_nr) &&
1572
0
             ! strcmp(istate->cache[i]->name, ce->name))
1573
0
        i++;
1574
0
      i--;
1575
0
      if (allow_unmerged)
1576
0
        continue;
1577
0
      if (!filtered)
1578
0
        show_file(unmerged_fmt, ce->name, in_porcelain,
1579
0
            &first, header_msg);
1580
0
      has_errors = 1;
1581
0
      continue;
1582
0
    }
1583
1584
50.3k
    if (filtered)
1585
0
      continue;
1586
1587
50.3k
    new_entry = refresh_cache_ent(istate, ce, options,
1588
50.3k
                &cache_errno, &changed,
1589
50.3k
                &t2_did_lstat, &t2_did_scan);
1590
50.3k
    t2_sum_lstat += t2_did_lstat;
1591
50.3k
    t2_sum_scan += t2_did_scan;
1592
50.3k
    if (new_entry == ce)
1593
50.3k
      continue;
1594
0
    display_progress(progress, i);
1595
0
    if (!new_entry) {
1596
0
      const char *fmt;
1597
1598
0
      if (really && cache_errno == EINVAL) {
1599
        /* If we are doing --really-refresh that
1600
         * means the index is not valid anymore.
1601
         */
1602
0
        ce->ce_flags &= ~CE_VALID;
1603
0
        ce->ce_flags |= CE_UPDATE_IN_BASE;
1604
0
        mark_fsmonitor_invalid(istate, ce);
1605
0
        istate->cache_changed |= CE_ENTRY_CHANGED;
1606
0
      }
1607
0
      if (quiet)
1608
0
        continue;
1609
1610
0
      if (cache_errno == ENOENT)
1611
0
        fmt = deleted_fmt;
1612
0
      else if (ce_intent_to_add(ce))
1613
0
        fmt = added_fmt; /* must be before other checks */
1614
0
      else if (changed & TYPE_CHANGED)
1615
0
        fmt = typechange_fmt;
1616
0
      else
1617
0
        fmt = modified_fmt;
1618
0
      show_file(fmt,
1619
0
          ce->name, in_porcelain, &first, header_msg);
1620
0
      has_errors = 1;
1621
0
      continue;
1622
0
    }
1623
1624
0
    replace_index_entry(istate, i, new_entry);
1625
0
  }
1626
7.32k
  trace2_data_intmax("index", NULL, "refresh/sum_lstat", t2_sum_lstat);
1627
7.32k
  trace2_data_intmax("index", NULL, "refresh/sum_scan", t2_sum_scan);
1628
7.32k
  trace2_region_leave("index", "refresh", NULL);
1629
7.32k
  display_progress(progress, istate->cache_nr);
1630
7.32k
  stop_progress(&progress);
1631
7.32k
  trace_performance_leave("refresh index");
1632
7.32k
  return has_errors;
1633
7.32k
}
1634
1635
struct cache_entry *refresh_cache_entry(struct index_state *istate,
1636
          struct cache_entry *ce,
1637
          unsigned int options)
1638
0
{
1639
0
  return refresh_cache_ent(istate, ce, options, NULL, NULL, NULL, NULL);
1640
0
}
1641
1642
1643
/*****************************************************************
1644
 * Index File I/O
1645
 *****************************************************************/
1646
1647
1.22k
#define INDEX_FORMAT_DEFAULT 3
1648
1649
static unsigned int get_index_format_default(struct repository *r)
1650
1.22k
{
1651
1.22k
  char *envversion = getenv("GIT_INDEX_VERSION");
1652
1.22k
  char *endp;
1653
1.22k
  unsigned int version = INDEX_FORMAT_DEFAULT;
1654
1655
1.22k
  if (!envversion) {
1656
1.22k
    prepare_repo_settings(r);
1657
1658
1.22k
    if (r->settings.index_version >= 0)
1659
0
      version = r->settings.index_version;
1660
1.22k
    if (version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {
1661
0
      warning(_("index.version set, but the value is invalid.\n"
1662
0
          "Using version %i"), INDEX_FORMAT_DEFAULT);
1663
0
      return INDEX_FORMAT_DEFAULT;
1664
0
    }
1665
1.22k
    return version;
1666
1.22k
  }
1667
1668
0
  version = strtoul(envversion, &endp, 10);
1669
0
  if (*endp ||
1670
0
      version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {
1671
0
    warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n"
1672
0
        "Using version %i"), INDEX_FORMAT_DEFAULT);
1673
0
    version = INDEX_FORMAT_DEFAULT;
1674
0
  }
1675
0
  return version;
1676
1.22k
}
1677
1678
/*
1679
 * dev/ino/uid/gid/size are also just tracked to the low 32 bits
1680
 * Again - this is just a (very strong in practice) heuristic that
1681
 * the inode hasn't changed.
1682
 *
1683
 * We save the fields in big-endian order to allow using the
1684
 * index file over NFS transparently.
1685
 */
1686
struct ondisk_cache_entry {
1687
  struct cache_time ctime;
1688
  struct cache_time mtime;
1689
  uint32_t dev;
1690
  uint32_t ino;
1691
  uint32_t mode;
1692
  uint32_t uid;
1693
  uint32_t gid;
1694
  uint32_t size;
1695
  /*
1696
   * unsigned char hash[hashsz];
1697
   * uint16_t flags;
1698
   * if (flags & CE_EXTENDED)
1699
   *  uint16_t flags2;
1700
   */
1701
  unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
1702
  char name[FLEX_ARRAY];
1703
};
1704
1705
/* These are only used for v3 or lower */
1706
100k
#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
1707
0
#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
1708
0
#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
1709
100k
#define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
1710
100k
             ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
1711
#define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
1712
0
#define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
1713
1714
/* Allow fsck to force verification of the index checksum. */
1715
int verify_index_checksum;
1716
1717
/* Allow fsck to force verification of the cache entry order. */
1718
int verify_ce_order;
1719
1720
static int verify_hdr(const struct cache_header *hdr, unsigned long size)
1721
0
{
1722
0
  git_hash_ctx c;
1723
0
  unsigned char hash[GIT_MAX_RAWSZ];
1724
0
  int hdr_version;
1725
0
  unsigned char *start, *end;
1726
0
  struct object_id oid;
1727
1728
0
  if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
1729
0
    return error(_("bad signature 0x%08x"), hdr->hdr_signature);
1730
0
  hdr_version = ntohl(hdr->hdr_version);
1731
0
  if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version)
1732
0
    return error(_("bad index version %d"), hdr_version);
1733
1734
0
  if (!verify_index_checksum)
1735
0
    return 0;
1736
1737
0
  end = (unsigned char *)hdr + size;
1738
0
  start = end - the_hash_algo->rawsz;
1739
0
  oidread(&oid, start);
1740
0
  if (oideq(&oid, null_oid()))
1741
0
    return 0;
1742
1743
0
  the_hash_algo->init_fn(&c);
1744
0
  the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
1745
0
  the_hash_algo->final_fn(hash, &c);
1746
0
  if (!hasheq(hash, start))
1747
0
    return error(_("bad index file sha1 signature"));
1748
0
  return 0;
1749
0
}
1750
1751
static int read_index_extension(struct index_state *istate,
1752
        const char *ext, const char *data, unsigned long sz)
1753
0
{
1754
0
  switch (CACHE_EXT(ext)) {
1755
0
  case CACHE_EXT_TREE:
1756
0
    istate->cache_tree = cache_tree_read(data, sz);
1757
0
    break;
1758
0
  case CACHE_EXT_RESOLVE_UNDO:
1759
0
    istate->resolve_undo = resolve_undo_read(data, sz);
1760
0
    break;
1761
0
  case CACHE_EXT_LINK:
1762
0
    if (read_link_extension(istate, data, sz))
1763
0
      return -1;
1764
0
    break;
1765
0
  case CACHE_EXT_UNTRACKED:
1766
0
    istate->untracked = read_untracked_extension(data, sz);
1767
0
    break;
1768
0
  case CACHE_EXT_FSMONITOR:
1769
0
    read_fsmonitor_extension(istate, data, sz);
1770
0
    break;
1771
0
  case CACHE_EXT_ENDOFINDEXENTRIES:
1772
0
  case CACHE_EXT_INDEXENTRYOFFSETTABLE:
1773
    /* already handled in do_read_index() */
1774
0
    break;
1775
0
  case CACHE_EXT_SPARSE_DIRECTORIES:
1776
    /* no content, only an indicator */
1777
0
    istate->sparse_index = INDEX_COLLAPSED;
1778
0
    break;
1779
0
  default:
1780
0
    if (*ext < 'A' || 'Z' < *ext)
1781
0
      return error(_("index uses %.4s extension, which we do not understand"),
1782
0
             ext);
1783
0
    fprintf_ln(stderr, _("ignoring %.4s extension"), ext);
1784
0
    break;
1785
0
  }
1786
0
  return 0;
1787
0
}
1788
1789
/*
1790
 * Parses the contents of the cache entry contained within the 'ondisk' buffer
1791
 * into a new incore 'cache_entry'.
1792
 *
1793
 * Note that 'char *ondisk' may not be aligned to a 4-byte address interval in
1794
 * index v4, so we cannot cast it to 'struct ondisk_cache_entry *' and access
1795
 * its members. Instead, we use the byte offsets of members within the struct to
1796
 * identify where 'get_be16()', 'get_be32()', and 'oidread()' (which can all
1797
 * read from an unaligned memory buffer) should read from the 'ondisk' buffer
1798
 * into the corresponding incore 'cache_entry' members.
1799
 */
1800
static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
1801
              unsigned int version,
1802
              const char *ondisk,
1803
              unsigned long *ent_size,
1804
              const struct cache_entry *previous_ce)
1805
0
{
1806
0
  struct cache_entry *ce;
1807
0
  size_t len;
1808
0
  const char *name;
1809
0
  const unsigned hashsz = the_hash_algo->rawsz;
1810
0
  const char *flagsp = ondisk + offsetof(struct ondisk_cache_entry, data) + hashsz;
1811
0
  unsigned int flags;
1812
0
  size_t copy_len = 0;
1813
  /*
1814
   * Adjacent cache entries tend to share the leading paths, so it makes
1815
   * sense to only store the differences in later entries.  In the v4
1816
   * on-disk format of the index, each on-disk cache entry stores the
1817
   * number of bytes to be stripped from the end of the previous name,
1818
   * and the bytes to append to the result, to come up with its name.
1819
   */
1820
0
  int expand_name_field = version == 4;
1821
1822
  /* On-disk flags are just 16 bits */
1823
0
  flags = get_be16(flagsp);
1824
0
  len = flags & CE_NAMEMASK;
1825
1826
0
  if (flags & CE_EXTENDED) {
1827
0
    int extended_flags;
1828
0
    extended_flags = get_be16(flagsp + sizeof(uint16_t)) << 16;
1829
    /* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
1830
0
    if (extended_flags & ~CE_EXTENDED_FLAGS)
1831
0
      die(_("unknown index entry format 0x%08x"), extended_flags);
1832
0
    flags |= extended_flags;
1833
0
    name = (const char *)(flagsp + 2 * sizeof(uint16_t));
1834
0
  }
1835
0
  else
1836
0
    name = (const char *)(flagsp + sizeof(uint16_t));
1837
1838
0
  if (expand_name_field) {
1839
0
    const unsigned char *cp = (const unsigned char *)name;
1840
0
    size_t strip_len, previous_len;
1841
1842
    /* If we're at the beginning of a block, ignore the previous name */
1843
0
    strip_len = decode_varint(&cp);
1844
0
    if (previous_ce) {
1845
0
      previous_len = previous_ce->ce_namelen;
1846
0
      if (previous_len < strip_len)
1847
0
        die(_("malformed name field in the index, near path '%s'"),
1848
0
          previous_ce->name);
1849
0
      copy_len = previous_len - strip_len;
1850
0
    }
1851
0
    name = (const char *)cp;
1852
0
  }
1853
1854
0
  if (len == CE_NAMEMASK) {
1855
0
    len = strlen(name);
1856
0
    if (expand_name_field)
1857
0
      len += copy_len;
1858
0
  }
1859
1860
0
  ce = mem_pool__ce_alloc(ce_mem_pool, len);
1861
1862
  /*
1863
   * NEEDSWORK: using 'offsetof()' is cumbersome and should be replaced
1864
   * with something more akin to 'load_bitmap_entries_v1()'s use of
1865
   * 'read_be16'/'read_be32'. For consistency with the corresponding
1866
   * ondisk entry write function ('copy_cache_entry_to_ondisk()'), this
1867
   * should be done at the same time as removing references to
1868
   * 'ondisk_cache_entry' there.
1869
   */
1870
0
  ce->ce_stat_data.sd_ctime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)
1871
0
              + offsetof(struct cache_time, sec));
1872
0
  ce->ce_stat_data.sd_mtime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)
1873
0
              + offsetof(struct cache_time, sec));
1874
0
  ce->ce_stat_data.sd_ctime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)
1875
0
               + offsetof(struct cache_time, nsec));
1876
0
  ce->ce_stat_data.sd_mtime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)
1877
0
               + offsetof(struct cache_time, nsec));
1878
0
  ce->ce_stat_data.sd_dev   = get_be32(ondisk + offsetof(struct ondisk_cache_entry, dev));
1879
0
  ce->ce_stat_data.sd_ino   = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ino));
1880
0
  ce->ce_mode  = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mode));
1881
0
  ce->ce_stat_data.sd_uid   = get_be32(ondisk + offsetof(struct ondisk_cache_entry, uid));
1882
0
  ce->ce_stat_data.sd_gid   = get_be32(ondisk + offsetof(struct ondisk_cache_entry, gid));
1883
0
  ce->ce_stat_data.sd_size  = get_be32(ondisk + offsetof(struct ondisk_cache_entry, size));
1884
0
  ce->ce_flags = flags & ~CE_NAMEMASK;
1885
0
  ce->ce_namelen = len;
1886
0
  ce->index = 0;
1887
0
  oidread(&ce->oid, (const unsigned char *)ondisk + offsetof(struct ondisk_cache_entry, data));
1888
1889
0
  if (expand_name_field) {
1890
0
    if (copy_len)
1891
0
      memcpy(ce->name, previous_ce->name, copy_len);
1892
0
    memcpy(ce->name + copy_len, name, len + 1 - copy_len);
1893
0
    *ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len;
1894
0
  } else {
1895
0
    memcpy(ce->name, name, len + 1);
1896
0
    *ent_size = ondisk_ce_size(ce);
1897
0
  }
1898
0
  return ce;
1899
0
}
1900
1901
static void check_ce_order(struct index_state *istate)
1902
1.22k
{
1903
1.22k
  unsigned int i;
1904
1905
1.22k
  if (!verify_ce_order)
1906
1.22k
    return;
1907
1908
0
  for (i = 1; i < istate->cache_nr; i++) {
1909
0
    struct cache_entry *ce = istate->cache[i - 1];
1910
0
    struct cache_entry *next_ce = istate->cache[i];
1911
0
    int name_compare = strcmp(ce->name, next_ce->name);
1912
1913
0
    if (0 < name_compare)
1914
0
      die(_("unordered stage entries in index"));
1915
0
    if (!name_compare) {
1916
0
      if (!ce_stage(ce))
1917
0
        die(_("multiple stage entries for merged file '%s'"),
1918
0
            ce->name);
1919
0
      if (ce_stage(ce) > ce_stage(next_ce))
1920
0
        die(_("unordered stage entries for '%s'"),
1921
0
            ce->name);
1922
0
    }
1923
0
  }
1924
0
}
1925
1926
static void tweak_untracked_cache(struct index_state *istate)
1927
1.22k
{
1928
1.22k
  struct repository *r = the_repository;
1929
1930
1.22k
  prepare_repo_settings(r);
1931
1932
1.22k
  switch (r->settings.core_untracked_cache) {
1933
0
  case UNTRACKED_CACHE_REMOVE:
1934
0
    remove_untracked_cache(istate);
1935
0
    break;
1936
0
  case UNTRACKED_CACHE_WRITE:
1937
0
    add_untracked_cache(istate);
1938
0
    break;
1939
1.22k
  case UNTRACKED_CACHE_KEEP:
1940
    /*
1941
     * Either an explicit "core.untrackedCache=keep", the
1942
     * default if "core.untrackedCache" isn't configured,
1943
     * or a fallback on an unknown "core.untrackedCache"
1944
     * value.
1945
     */
1946
1.22k
    break;
1947
1.22k
  }
1948
1.22k
}
1949
1950
static void tweak_split_index(struct index_state *istate)
1951
1.22k
{
1952
1.22k
  switch (git_config_get_split_index()) {
1953
1.22k
  case -1: /* unset: do nothing */
1954
1.22k
    break;
1955
0
  case 0: /* false */
1956
0
    remove_split_index(istate);
1957
0
    break;
1958
0
  case 1: /* true */
1959
0
    add_split_index(istate);
1960
0
    break;
1961
0
  default: /* unknown value: do nothing */
1962
0
    break;
1963
1.22k
  }
1964
1.22k
}
1965
1966
static void post_read_index_from(struct index_state *istate)
1967
1.22k
{
1968
1.22k
  check_ce_order(istate);
1969
1.22k
  tweak_untracked_cache(istate);
1970
1.22k
  tweak_split_index(istate);
1971
1.22k
  tweak_fsmonitor(istate);
1972
1.22k
}
1973
1974
static size_t estimate_cache_size_from_compressed(unsigned int entries)
1975
0
{
1976
0
  return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);
1977
0
}
1978
1979
static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
1980
0
{
1981
0
  long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
1982
1983
  /*
1984
   * Account for potential alignment differences.
1985
   */
1986
0
  per_entry += align_padding_size(per_entry, 0);
1987
0
  return ondisk_size + entries * per_entry;
1988
0
}
1989
1990
struct index_entry_offset
1991
{
1992
  /* starting byte offset into index file, count of index entries in this block */
1993
  int offset, nr;
1994
};
1995
1996
struct index_entry_offset_table
1997
{
1998
  int nr;
1999
  struct index_entry_offset entries[FLEX_ARRAY];
2000
};
2001
2002
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);
2003
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);
2004
2005
static size_t read_eoie_extension(const char *mmap, size_t mmap_size);
2006
static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);
2007
2008
struct load_index_extensions
2009
{
2010
  pthread_t pthread;
2011
  struct index_state *istate;
2012
  const char *mmap;
2013
  size_t mmap_size;
2014
  unsigned long src_offset;
2015
};
2016
2017
static void *load_index_extensions(void *_data)
2018
0
{
2019
0
  struct load_index_extensions *p = _data;
2020
0
  unsigned long src_offset = p->src_offset;
2021
2022
0
  while (src_offset <= p->mmap_size - the_hash_algo->rawsz - 8) {
2023
    /* After an array of active_nr index entries,
2024
     * there can be arbitrary number of extended
2025
     * sections, each of which is prefixed with
2026
     * extension name (4-byte) and section length
2027
     * in 4-byte network byte order.
2028
     */
2029
0
    uint32_t extsize = get_be32(p->mmap + src_offset + 4);
2030
0
    if (read_index_extension(p->istate,
2031
0
           p->mmap + src_offset,
2032
0
           p->mmap + src_offset + 8,
2033
0
           extsize) < 0) {
2034
0
      munmap((void *)p->mmap, p->mmap_size);
2035
0
      die(_("index file corrupt"));
2036
0
    }
2037
0
    src_offset += 8;
2038
0
    src_offset += extsize;
2039
0
  }
2040
2041
0
  return NULL;
2042
0
}
2043
2044
/*
2045
 * A helper function that will load the specified range of cache entries
2046
 * from the memory mapped file and add them to the given index.
2047
 */
2048
static unsigned long load_cache_entry_block(struct index_state *istate,
2049
      struct mem_pool *ce_mem_pool, int offset, int nr, const char *mmap,
2050
      unsigned long start_offset, const struct cache_entry *previous_ce)
2051
0
{
2052
0
  int i;
2053
0
  unsigned long src_offset = start_offset;
2054
2055
0
  for (i = offset; i < offset + nr; i++) {
2056
0
    struct cache_entry *ce;
2057
0
    unsigned long consumed;
2058
2059
0
    ce = create_from_disk(ce_mem_pool, istate->version,
2060
0
              mmap + src_offset,
2061
0
              &consumed, previous_ce);
2062
0
    set_index_entry(istate, i, ce);
2063
2064
0
    src_offset += consumed;
2065
0
    previous_ce = ce;
2066
0
  }
2067
0
  return src_offset - start_offset;
2068
0
}
2069
2070
static unsigned long load_all_cache_entries(struct index_state *istate,
2071
      const char *mmap, size_t mmap_size, unsigned long src_offset)
2072
0
{
2073
0
  unsigned long consumed;
2074
2075
0
  istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2076
0
  if (istate->version == 4) {
2077
0
    mem_pool_init(istate->ce_mem_pool,
2078
0
        estimate_cache_size_from_compressed(istate->cache_nr));
2079
0
  } else {
2080
0
    mem_pool_init(istate->ce_mem_pool,
2081
0
        estimate_cache_size(mmap_size, istate->cache_nr));
2082
0
  }
2083
2084
0
  consumed = load_cache_entry_block(istate, istate->ce_mem_pool,
2085
0
          0, istate->cache_nr, mmap, src_offset, NULL);
2086
0
  return consumed;
2087
0
}
2088
2089
/*
2090
 * Mostly randomly chosen maximum thread counts: we
2091
 * cap the parallelism to online_cpus() threads, and we want
2092
 * to have at least 10000 cache entries per thread for it to
2093
 * be worth starting a thread.
2094
 */
2095
2096
0
#define THREAD_COST   (10000)
2097
2098
struct load_cache_entries_thread_data
2099
{
2100
  pthread_t pthread;
2101
  struct index_state *istate;
2102
  struct mem_pool *ce_mem_pool;
2103
  int offset;
2104
  const char *mmap;
2105
  struct index_entry_offset_table *ieot;
2106
  int ieot_start;   /* starting index into the ieot array */
2107
  int ieot_blocks;  /* count of ieot entries to process */
2108
  unsigned long consumed; /* return # of bytes in index file processed */
2109
};
2110
2111
/*
2112
 * A thread proc to run the load_cache_entries() computation
2113
 * across multiple background threads.
2114
 */
2115
static void *load_cache_entries_thread(void *_data)
2116
0
{
2117
0
  struct load_cache_entries_thread_data *p = _data;
2118
0
  int i;
2119
2120
  /* iterate across all ieot blocks assigned to this thread */
2121
0
  for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {
2122
0
    p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,
2123
0
      p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);
2124
0
    p->offset += p->ieot->entries[i].nr;
2125
0
  }
2126
0
  return NULL;
2127
0
}
2128
2129
static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,
2130
             int nr_threads, struct index_entry_offset_table *ieot)
2131
0
{
2132
0
  int i, offset, ieot_blocks, ieot_start, err;
2133
0
  struct load_cache_entries_thread_data *data;
2134
0
  unsigned long consumed = 0;
2135
2136
  /* a little sanity checking */
2137
0
  if (istate->name_hash_initialized)
2138
0
    BUG("the name hash isn't thread safe");
2139
2140
0
  istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2141
0
  mem_pool_init(istate->ce_mem_pool, 0);
2142
2143
  /* ensure we have no more threads than we have blocks to process */
2144
0
  if (nr_threads > ieot->nr)
2145
0
    nr_threads = ieot->nr;
2146
0
  CALLOC_ARRAY(data, nr_threads);
2147
2148
0
  offset = ieot_start = 0;
2149
0
  ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads);
2150
0
  for (i = 0; i < nr_threads; i++) {
2151
0
    struct load_cache_entries_thread_data *p = &data[i];
2152
0
    int nr, j;
2153
2154
0
    if (ieot_start + ieot_blocks > ieot->nr)
2155
0
      ieot_blocks = ieot->nr - ieot_start;
2156
2157
0
    p->istate = istate;
2158
0
    p->offset = offset;
2159
0
    p->mmap = mmap;
2160
0
    p->ieot = ieot;
2161
0
    p->ieot_start = ieot_start;
2162
0
    p->ieot_blocks = ieot_blocks;
2163
2164
    /* create a mem_pool for each thread */
2165
0
    nr = 0;
2166
0
    for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
2167
0
      nr += p->ieot->entries[j].nr;
2168
0
    p->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2169
0
    if (istate->version == 4) {
2170
0
      mem_pool_init(p->ce_mem_pool,
2171
0
        estimate_cache_size_from_compressed(nr));
2172
0
    } else {
2173
0
      mem_pool_init(p->ce_mem_pool,
2174
0
        estimate_cache_size(mmap_size, nr));
2175
0
    }
2176
2177
0
    err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p);
2178
0
    if (err)
2179
0
      die(_("unable to create load_cache_entries thread: %s"), strerror(err));
2180
2181
    /* increment by the number of cache entries in the ieot block being processed */
2182
0
    for (j = 0; j < ieot_blocks; j++)
2183
0
      offset += ieot->entries[ieot_start + j].nr;
2184
0
    ieot_start += ieot_blocks;
2185
0
  }
2186
2187
0
  for (i = 0; i < nr_threads; i++) {
2188
0
    struct load_cache_entries_thread_data *p = &data[i];
2189
2190
0
    err = pthread_join(p->pthread, NULL);
2191
0
    if (err)
2192
0
      die(_("unable to join load_cache_entries thread: %s"), strerror(err));
2193
0
    mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);
2194
0
    consumed += p->consumed;
2195
0
  }
2196
2197
0
  free(data);
2198
2199
0
  return consumed;
2200
0
}
2201
2202
static void set_new_index_sparsity(struct index_state *istate)
2203
1.22k
{
2204
  /*
2205
   * If the index's repo exists, mark it sparse according to
2206
   * repo settings.
2207
   */
2208
1.22k
  prepare_repo_settings(istate->repo);
2209
1.22k
  if (!istate->repo->settings.command_requires_full_index &&
2210
1.22k
      is_sparse_index_allowed(istate, 0))
2211
0
    istate->sparse_index = 1;
2212
1.22k
}
2213
2214
/* remember to discard_cache() before reading a different cache! */
2215
int do_read_index(struct index_state *istate, const char *path, int must_exist)
2216
1.22k
{
2217
1.22k
  int fd;
2218
1.22k
  struct stat st;
2219
1.22k
  unsigned long src_offset;
2220
1.22k
  const struct cache_header *hdr;
2221
1.22k
  const char *mmap;
2222
1.22k
  size_t mmap_size;
2223
1.22k
  struct load_index_extensions p;
2224
1.22k
  size_t extension_offset = 0;
2225
1.22k
  int nr_threads, cpus;
2226
1.22k
  struct index_entry_offset_table *ieot = NULL;
2227
2228
1.22k
  if (istate->initialized)
2229
0
    return istate->cache_nr;
2230
2231
1.22k
  istate->timestamp.sec = 0;
2232
1.22k
  istate->timestamp.nsec = 0;
2233
1.22k
  fd = open(path, O_RDONLY);
2234
1.22k
  if (fd < 0) {
2235
1.22k
    if (!must_exist && errno == ENOENT) {
2236
1.22k
      set_new_index_sparsity(istate);
2237
1.22k
      istate->initialized = 1;
2238
1.22k
      return 0;
2239
1.22k
    }
2240
0
    die_errno(_("%s: index file open failed"), path);
2241
1.22k
  }
2242
2243
0
  if (fstat(fd, &st))
2244
0
    die_errno(_("%s: cannot stat the open index"), path);
2245
2246
0
  mmap_size = xsize_t(st.st_size);
2247
0
  if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
2248
0
    die(_("%s: index file smaller than expected"), path);
2249
2250
0
  mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
2251
0
  if (mmap == MAP_FAILED)
2252
0
    die_errno(_("%s: unable to map index file%s"), path,
2253
0
      mmap_os_err());
2254
0
  close(fd);
2255
2256
0
  hdr = (const struct cache_header *)mmap;
2257
0
  if (verify_hdr(hdr, mmap_size) < 0)
2258
0
    goto unmap;
2259
2260
0
  oidread(&istate->oid, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
2261
0
  istate->version = ntohl(hdr->hdr_version);
2262
0
  istate->cache_nr = ntohl(hdr->hdr_entries);
2263
0
  istate->cache_alloc = alloc_nr(istate->cache_nr);
2264
0
  CALLOC_ARRAY(istate->cache, istate->cache_alloc);
2265
0
  istate->initialized = 1;
2266
2267
0
  p.istate = istate;
2268
0
  p.mmap = mmap;
2269
0
  p.mmap_size = mmap_size;
2270
2271
0
  src_offset = sizeof(*hdr);
2272
2273
0
  if (git_config_get_index_threads(&nr_threads))
2274
0
    nr_threads = 1;
2275
2276
  /* TODO: does creating more threads than cores help? */
2277
0
  if (!nr_threads) {
2278
0
    nr_threads = istate->cache_nr / THREAD_COST;
2279
0
    cpus = online_cpus();
2280
0
    if (nr_threads > cpus)
2281
0
      nr_threads = cpus;
2282
0
  }
2283
2284
0
  if (!HAVE_THREADS)
2285
0
    nr_threads = 1;
2286
2287
0
  if (nr_threads > 1) {
2288
0
    extension_offset = read_eoie_extension(mmap, mmap_size);
2289
0
    if (extension_offset) {
2290
0
      int err;
2291
2292
0
      p.src_offset = extension_offset;
2293
0
      err = pthread_create(&p.pthread, NULL, load_index_extensions, &p);
2294
0
      if (err)
2295
0
        die(_("unable to create load_index_extensions thread: %s"), strerror(err));
2296
2297
0
      nr_threads--;
2298
0
    }
2299
0
  }
2300
2301
  /*
2302
   * Locate and read the index entry offset table so that we can use it
2303
   * to multi-thread the reading of the cache entries.
2304
   */
2305
0
  if (extension_offset && nr_threads > 1)
2306
0
    ieot = read_ieot_extension(mmap, mmap_size, extension_offset);
2307
2308
0
  if (ieot) {
2309
0
    src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, nr_threads, ieot);
2310
0
    free(ieot);
2311
0
  } else {
2312
0
    src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
2313
0
  }
2314
2315
0
  istate->timestamp.sec = st.st_mtime;
2316
0
  istate->timestamp.nsec = ST_MTIME_NSEC(st);
2317
2318
  /* if we created a thread, join it otherwise load the extensions on the primary thread */
2319
0
  if (extension_offset) {
2320
0
    int ret = pthread_join(p.pthread, NULL);
2321
0
    if (ret)
2322
0
      die(_("unable to join load_index_extensions thread: %s"), strerror(ret));
2323
0
  } else {
2324
0
    p.src_offset = src_offset;
2325
0
    load_index_extensions(&p);
2326
0
  }
2327
0
  munmap((void *)mmap, mmap_size);
2328
2329
  /*
2330
   * TODO trace2: replace "the_repository" with the actual repo instance
2331
   * that is associated with the given "istate".
2332
   */
2333
0
  trace2_data_intmax("index", the_repository, "read/version",
2334
0
         istate->version);
2335
0
  trace2_data_intmax("index", the_repository, "read/cache_nr",
2336
0
         istate->cache_nr);
2337
2338
  /*
2339
   * If the command explicitly requires a full index, force it
2340
   * to be full. Otherwise, correct the sparsity based on repository
2341
   * settings and other properties of the index (if necessary).
2342
   */
2343
0
  prepare_repo_settings(istate->repo);
2344
0
  if (istate->repo->settings.command_requires_full_index)
2345
0
    ensure_full_index(istate);
2346
0
  else
2347
0
    ensure_correct_sparsity(istate);
2348
2349
0
  return istate->cache_nr;
2350
2351
0
unmap:
2352
0
  munmap((void *)mmap, mmap_size);
2353
0
  die(_("index file corrupt"));
2354
0
}
2355
2356
/*
2357
 * Signal that the shared index is used by updating its mtime.
2358
 *
2359
 * This way, shared index can be removed if they have not been used
2360
 * for some time.
2361
 */
2362
static void freshen_shared_index(const char *shared_index, int warn)
2363
0
{
2364
0
  if (!check_and_freshen_file(shared_index, 1) && warn)
2365
0
    warning(_("could not freshen shared index '%s'"), shared_index);
2366
0
}
2367
2368
int read_index_from(struct index_state *istate, const char *path,
2369
        const char *gitdir)
2370
39.8k
{
2371
39.8k
  struct split_index *split_index;
2372
39.8k
  int ret;
2373
39.8k
  char *base_oid_hex;
2374
39.8k
  char *base_path;
2375
2376
  /* istate->initialized covers both .git/index and .git/sharedindex.xxx */
2377
39.8k
  if (istate->initialized)
2378
38.6k
    return istate->cache_nr;
2379
2380
  /*
2381
   * TODO trace2: replace "the_repository" with the actual repo instance
2382
   * that is associated with the given "istate".
2383
   */
2384
1.22k
  trace2_region_enter_printf("index", "do_read_index", the_repository,
2385
1.22k
           "%s", path);
2386
1.22k
  trace_performance_enter();
2387
1.22k
  ret = do_read_index(istate, path, 0);
2388
1.22k
  trace_performance_leave("read cache %s", path);
2389
1.22k
  trace2_region_leave_printf("index", "do_read_index", the_repository,
2390
1.22k
           "%s", path);
2391
2392
1.22k
  split_index = istate->split_index;
2393
1.22k
  if (!split_index || is_null_oid(&split_index->base_oid)) {
2394
1.22k
    post_read_index_from(istate);
2395
1.22k
    return ret;
2396
1.22k
  }
2397
2398
0
  trace_performance_enter();
2399
0
  if (split_index->base)
2400
0
    release_index(split_index->base);
2401
0
  else
2402
0
    ALLOC_ARRAY(split_index->base, 1);
2403
0
  index_state_init(split_index->base, istate->repo);
2404
2405
0
  base_oid_hex = oid_to_hex(&split_index->base_oid);
2406
0
  base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
2407
0
  if (file_exists(base_path)) {
2408
0
    trace2_region_enter_printf("index", "shared/do_read_index",
2409
0
          the_repository, "%s", base_path);
2410
2411
0
    ret = do_read_index(split_index->base, base_path, 0);
2412
0
    trace2_region_leave_printf("index", "shared/do_read_index",
2413
0
          the_repository, "%s", base_path);
2414
0
  } else {
2415
0
    char *path_copy = xstrdup(path);
2416
0
    char *base_path2 = xstrfmt("%s/sharedindex.%s",
2417
0
             dirname(path_copy), base_oid_hex);
2418
0
    free(path_copy);
2419
0
    trace2_region_enter_printf("index", "shared/do_read_index",
2420
0
             the_repository, "%s", base_path2);
2421
0
    ret = do_read_index(split_index->base, base_path2, 1);
2422
0
    trace2_region_leave_printf("index", "shared/do_read_index",
2423
0
             the_repository, "%s", base_path2);
2424
0
    free(base_path2);
2425
0
  }
2426
0
  if (!oideq(&split_index->base_oid, &split_index->base->oid))
2427
0
    die(_("broken index, expect %s in %s, got %s"),
2428
0
        base_oid_hex, base_path,
2429
0
        oid_to_hex(&split_index->base->oid));
2430
2431
0
  freshen_shared_index(base_path, 0);
2432
0
  merge_base_index(istate);
2433
0
  post_read_index_from(istate);
2434
0
  trace_performance_leave("read cache %s", base_path);
2435
0
  free(base_path);
2436
0
  return ret;
2437
0
}
2438
2439
int is_index_unborn(struct index_state *istate)
2440
0
{
2441
0
  return (!istate->cache_nr && !istate->timestamp.sec);
2442
0
}
2443
2444
void index_state_init(struct index_state *istate, struct repository *r)
2445
39.8k
{
2446
39.8k
  struct index_state blank = INDEX_STATE_INIT(r);
2447
39.8k
  memcpy(istate, &blank, sizeof(*istate));
2448
39.8k
}
2449
2450
void release_index(struct index_state *istate)
2451
22.9k
{
2452
  /*
2453
   * Cache entries in istate->cache[] should have been allocated
2454
   * from the memory pool associated with this index, or from an
2455
   * associated split_index. There is no need to free individual
2456
   * cache entries. validate_cache_entries can detect when this
2457
   * assertion does not hold.
2458
   */
2459
22.9k
  validate_cache_entries(istate);
2460
2461
22.9k
  resolve_undo_clear_index(istate);
2462
22.9k
  free_name_hash(istate);
2463
22.9k
  cache_tree_free(&(istate->cache_tree));
2464
22.9k
  free(istate->fsmonitor_last_update);
2465
22.9k
  free(istate->cache);
2466
22.9k
  discard_split_index(istate);
2467
22.9k
  free_untracked_cache(istate->untracked);
2468
2469
22.9k
  if (istate->sparse_checkout_patterns) {
2470
0
    clear_pattern_list(istate->sparse_checkout_patterns);
2471
0
    FREE_AND_NULL(istate->sparse_checkout_patterns);
2472
0
  }
2473
2474
22.9k
  if (istate->ce_mem_pool) {
2475
1.22k
    mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
2476
1.22k
    FREE_AND_NULL(istate->ce_mem_pool);
2477
1.22k
  }
2478
22.9k
}
2479
2480
void discard_index(struct index_state *istate)
2481
22.9k
{
2482
22.9k
  release_index(istate);
2483
22.9k
  index_state_init(istate, istate->repo);
2484
22.9k
}
2485
2486
/*
2487
 * Validate the cache entries of this index.
2488
 * All cache entries associated with this index
2489
 * should have been allocated by the memory pool
2490
 * associated with this index, or by a referenced
2491
 * split index.
2492
 */
2493
void validate_cache_entries(const struct index_state *istate)
2494
22.9k
{
2495
22.9k
  int i;
2496
2497
22.9k
  if (!should_validate_cache_entries() ||!istate || !istate->initialized)
2498
22.9k
    return;
2499
2500
0
  for (i = 0; i < istate->cache_nr; i++) {
2501
0
    if (!istate) {
2502
0
      BUG("cache entry is not allocated from expected memory pool");
2503
0
    } else if (!istate->ce_mem_pool ||
2504
0
      !mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
2505
0
      if (!istate->split_index ||
2506
0
        !istate->split_index->base ||
2507
0
        !istate->split_index->base->ce_mem_pool ||
2508
0
        !mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
2509
0
        BUG("cache entry is not allocated from expected memory pool");
2510
0
      }
2511
0
    }
2512
0
  }
2513
2514
0
  if (istate->split_index)
2515
0
    validate_cache_entries(istate->split_index->base);
2516
0
}
2517
2518
int unmerged_index(const struct index_state *istate)
2519
0
{
2520
0
  int i;
2521
0
  for (i = 0; i < istate->cache_nr; i++) {
2522
0
    if (ce_stage(istate->cache[i]))
2523
0
      return 1;
2524
0
  }
2525
0
  return 0;
2526
0
}
2527
2528
int repo_index_has_changes(struct repository *repo,
2529
         struct tree *tree,
2530
         struct strbuf *sb)
2531
0
{
2532
0
  struct index_state *istate = repo->index;
2533
0
  struct object_id cmp;
2534
0
  int i;
2535
2536
0
  if (tree)
2537
0
    cmp = tree->object.oid;
2538
0
  if (tree || !repo_get_oid_tree(repo, "HEAD", &cmp)) {
2539
0
    struct diff_options opt;
2540
2541
0
    repo_diff_setup(repo, &opt);
2542
0
    opt.flags.exit_with_status = 1;
2543
0
    if (!sb)
2544
0
      opt.flags.quick = 1;
2545
0
    diff_setup_done(&opt);
2546
0
    do_diff_cache(&cmp, &opt);
2547
0
    diffcore_std(&opt);
2548
0
    for (i = 0; sb && i < diff_queued_diff.nr; i++) {
2549
0
      if (i)
2550
0
        strbuf_addch(sb, ' ');
2551
0
      strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
2552
0
    }
2553
0
    diff_flush(&opt);
2554
0
    return opt.flags.has_changes != 0;
2555
0
  } else {
2556
    /* TODO: audit for interaction with sparse-index. */
2557
0
    ensure_full_index(istate);
2558
0
    for (i = 0; sb && i < istate->cache_nr; i++) {
2559
0
      if (i)
2560
0
        strbuf_addch(sb, ' ');
2561
0
      strbuf_addstr(sb, istate->cache[i]->name);
2562
0
    }
2563
0
    return !!istate->cache_nr;
2564
0
  }
2565
0
}
2566
2567
static int write_index_ext_header(struct hashfile *f,
2568
          git_hash_ctx *eoie_f,
2569
          unsigned int ext,
2570
          unsigned int sz)
2571
13.4k
{
2572
13.4k
  hashwrite_be32(f, ext);
2573
13.4k
  hashwrite_be32(f, sz);
2574
2575
13.4k
  if (eoie_f) {
2576
0
    ext = htonl(ext);
2577
0
    sz = htonl(sz);
2578
0
    the_hash_algo->update_fn(eoie_f, &ext, sizeof(ext));
2579
0
    the_hash_algo->update_fn(eoie_f, &sz, sizeof(sz));
2580
0
  }
2581
13.4k
  return 0;
2582
13.4k
}
2583
2584
static void ce_smudge_racily_clean_entry(struct index_state *istate,
2585
           struct cache_entry *ce)
2586
0
{
2587
  /*
2588
   * The only thing we care about in this function is to smudge the
2589
   * falsely clean entry due to touch-update-touch race, so we leave
2590
   * everything else as they are.  We are called for entries whose
2591
   * ce_stat_data.sd_mtime match the index file mtime.
2592
   *
2593
   * Note that this actually does not do much for gitlinks, for
2594
   * which ce_match_stat_basic() always goes to the actual
2595
   * contents.  The caller checks with is_racy_timestamp() which
2596
   * always says "no" for gitlinks, so we are not called for them ;-)
2597
   */
2598
0
  struct stat st;
2599
2600
0
  if (lstat(ce->name, &st) < 0)
2601
0
    return;
2602
0
  if (ce_match_stat_basic(ce, &st))
2603
0
    return;
2604
0
  if (ce_modified_check_fs(istate, ce, &st)) {
2605
    /* This is "racily clean"; smudge it.  Note that this
2606
     * is a tricky code.  At first glance, it may appear
2607
     * that it can break with this sequence:
2608
     *
2609
     * $ echo xyzzy >frotz
2610
     * $ git-update-index --add frotz
2611
     * $ : >frotz
2612
     * $ sleep 3
2613
     * $ echo filfre >nitfol
2614
     * $ git-update-index --add nitfol
2615
     *
2616
     * but it does not.  When the second update-index runs,
2617
     * it notices that the entry "frotz" has the same timestamp
2618
     * as index, and if we were to smudge it by resetting its
2619
     * size to zero here, then the object name recorded
2620
     * in index is the 6-byte file but the cached stat information
2621
     * becomes zero --- which would then match what we would
2622
     * obtain from the filesystem next time we stat("frotz").
2623
     *
2624
     * However, the second update-index, before calling
2625
     * this function, notices that the cached size is 6
2626
     * bytes and what is on the filesystem is an empty
2627
     * file, and never calls us, so the cached size information
2628
     * for "frotz" stays 6 which does not match the filesystem.
2629
     */
2630
0
    ce->ce_stat_data.sd_size = 0;
2631
0
  }
2632
0
}
2633
2634
/* Copy miscellaneous fields but not the name */
2635
static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
2636
               struct cache_entry *ce)
2637
100k
{
2638
100k
  short flags;
2639
100k
  const unsigned hashsz = the_hash_algo->rawsz;
2640
100k
  uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
2641
2642
100k
  ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
2643
100k
  ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
2644
100k
  ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec);
2645
100k
  ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec);
2646
100k
  ondisk->dev  = htonl(ce->ce_stat_data.sd_dev);
2647
100k
  ondisk->ino  = htonl(ce->ce_stat_data.sd_ino);
2648
100k
  ondisk->mode = htonl(ce->ce_mode);
2649
100k
  ondisk->uid  = htonl(ce->ce_stat_data.sd_uid);
2650
100k
  ondisk->gid  = htonl(ce->ce_stat_data.sd_gid);
2651
100k
  ondisk->size = htonl(ce->ce_stat_data.sd_size);
2652
100k
  hashcpy(ondisk->data, ce->oid.hash);
2653
2654
100k
  flags = ce->ce_flags & ~CE_NAMEMASK;
2655
100k
  flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
2656
100k
  flagsp[0] = htons(flags);
2657
100k
  if (ce->ce_flags & CE_EXTENDED) {
2658
0
    flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
2659
0
  }
2660
100k
}
2661
2662
static int ce_write_entry(struct hashfile *f, struct cache_entry *ce,
2663
        struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
2664
100k
{
2665
100k
  int size;
2666
100k
  unsigned int saved_namelen;
2667
100k
  int stripped_name = 0;
2668
100k
  static unsigned char padding[8] = { 0x00 };
2669
2670
100k
  if (ce->ce_flags & CE_STRIP_NAME) {
2671
0
    saved_namelen = ce_namelen(ce);
2672
0
    ce->ce_namelen = 0;
2673
0
    stripped_name = 1;
2674
0
  }
2675
2676
100k
  size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
2677
2678
100k
  if (!previous_name) {
2679
100k
    int len = ce_namelen(ce);
2680
100k
    copy_cache_entry_to_ondisk(ondisk, ce);
2681
100k
    hashwrite(f, ondisk, size);
2682
100k
    hashwrite(f, ce->name, len);
2683
100k
    hashwrite(f, padding, align_padding_size(size, len));
2684
100k
  } else {
2685
0
    int common, to_remove, prefix_size;
2686
0
    unsigned char to_remove_vi[16];
2687
0
    for (common = 0;
2688
0
         (ce->name[common] &&
2689
0
          common < previous_name->len &&
2690
0
          ce->name[common] == previous_name->buf[common]);
2691
0
         common++)
2692
0
      ; /* still matching */
2693
0
    to_remove = previous_name->len - common;
2694
0
    prefix_size = encode_varint(to_remove, to_remove_vi);
2695
2696
0
    copy_cache_entry_to_ondisk(ondisk, ce);
2697
0
    hashwrite(f, ondisk, size);
2698
0
    hashwrite(f, to_remove_vi, prefix_size);
2699
0
    hashwrite(f, ce->name + common, ce_namelen(ce) - common);
2700
0
    hashwrite(f, padding, 1);
2701
2702
0
    strbuf_splice(previous_name, common, to_remove,
2703
0
            ce->name + common, ce_namelen(ce) - common);
2704
0
  }
2705
100k
  if (stripped_name) {
2706
0
    ce->ce_namelen = saved_namelen;
2707
0
    ce->ce_flags &= ~CE_STRIP_NAME;
2708
0
  }
2709
2710
100k
  return 0;
2711
100k
}
2712
2713
/*
2714
 * This function verifies if index_state has the correct sha1 of the
2715
 * index file.  Don't die if we have any other failure, just return 0.
2716
 */
2717
static int verify_index_from(const struct index_state *istate, const char *path)
2718
0
{
2719
0
  int fd;
2720
0
  ssize_t n;
2721
0
  struct stat st;
2722
0
  unsigned char hash[GIT_MAX_RAWSZ];
2723
2724
0
  if (!istate->initialized)
2725
0
    return 0;
2726
2727
0
  fd = open(path, O_RDONLY);
2728
0
  if (fd < 0)
2729
0
    return 0;
2730
2731
0
  if (fstat(fd, &st))
2732
0
    goto out;
2733
2734
0
  if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
2735
0
    goto out;
2736
2737
0
  n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
2738
0
  if (n != the_hash_algo->rawsz)
2739
0
    goto out;
2740
2741
0
  if (!hasheq(istate->oid.hash, hash))
2742
0
    goto out;
2743
2744
0
  close(fd);
2745
0
  return 1;
2746
2747
0
out:
2748
0
  close(fd);
2749
0
  return 0;
2750
0
}
2751
2752
static int repo_verify_index(struct repository *repo)
2753
0
{
2754
0
  return verify_index_from(repo->index, repo->index_file);
2755
0
}
2756
2757
int has_racy_timestamp(struct index_state *istate)
2758
0
{
2759
0
  int entries = istate->cache_nr;
2760
0
  int i;
2761
2762
0
  for (i = 0; i < entries; i++) {
2763
0
    struct cache_entry *ce = istate->cache[i];
2764
0
    if (is_racy_timestamp(istate, ce))
2765
0
      return 1;
2766
0
  }
2767
0
  return 0;
2768
0
}
2769
2770
void repo_update_index_if_able(struct repository *repo,
2771
             struct lock_file *lockfile)
2772
0
{
2773
0
  if ((repo->index->cache_changed ||
2774
0
       has_racy_timestamp(repo->index)) &&
2775
0
      repo_verify_index(repo))
2776
0
    write_locked_index(repo->index, lockfile, COMMIT_LOCK);
2777
0
  else
2778
0
    rollback_lock_file(lockfile);
2779
0
}
2780
2781
static int record_eoie(void)
2782
14.6k
{
2783
14.6k
  int val;
2784
2785
14.6k
  if (!git_config_get_bool("index.recordendofindexentries", &val))
2786
0
    return val;
2787
2788
  /*
2789
   * As a convenience, the end of index entries extension
2790
   * used for threading is written by default if the user
2791
   * explicitly requested threaded index reads.
2792
   */
2793
14.6k
  return !git_config_get_index_threads(&val) && val != 1;
2794
14.6k
}
2795
2796
static int record_ieot(void)
2797
0
{
2798
0
  int val;
2799
2800
0
  if (!git_config_get_bool("index.recordoffsettable", &val))
2801
0
    return val;
2802
2803
  /*
2804
   * As a convenience, the offset table used for threading is
2805
   * written by default if the user explicitly requested
2806
   * threaded index reads.
2807
   */
2808
0
  return !git_config_get_index_threads(&val) && val != 1;
2809
0
}
2810
2811
enum write_extensions {
2812
  WRITE_NO_EXTENSION =              0,
2813
  WRITE_SPLIT_INDEX_EXTENSION =     1<<0,
2814
  WRITE_CACHE_TREE_EXTENSION =      1<<1,
2815
  WRITE_RESOLVE_UNDO_EXTENSION =    1<<2,
2816
  WRITE_UNTRACKED_CACHE_EXTENSION = 1<<3,
2817
  WRITE_FSMONITOR_EXTENSION =       1<<4,
2818
};
2819
0
#define WRITE_ALL_EXTENSIONS ((enum write_extensions)-1)
2820
2821
/*
2822
 * On success, `tempfile` is closed. If it is the temporary file
2823
 * of a `struct lock_file`, we will therefore effectively perform
2824
 * a 'close_lock_file_gently()`. Since that is an implementation
2825
 * detail of lockfiles, callers of `do_write_index()` should not
2826
 * rely on it.
2827
 */
2828
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
2829
        enum write_extensions write_extensions, unsigned flags)
2830
14.6k
{
2831
14.6k
  uint64_t start = getnanotime();
2832
14.6k
  struct hashfile *f;
2833
14.6k
  git_hash_ctx *eoie_c = NULL;
2834
14.6k
  struct cache_header hdr;
2835
14.6k
  int i, err = 0, removed, extended, hdr_version;
2836
14.6k
  struct cache_entry **cache = istate->cache;
2837
14.6k
  int entries = istate->cache_nr;
2838
14.6k
  struct stat st;
2839
14.6k
  struct ondisk_cache_entry ondisk;
2840
14.6k
  struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
2841
14.6k
  int drop_cache_tree = istate->drop_cache_tree;
2842
14.6k
  off_t offset;
2843
14.6k
  int csum_fsync_flag;
2844
14.6k
  int ieot_entries = 1;
2845
14.6k
  struct index_entry_offset_table *ieot = NULL;
2846
14.6k
  int nr, nr_threads;
2847
14.6k
  struct repository *r = istate->repo;
2848
2849
14.6k
  f = hashfd(tempfile->fd, tempfile->filename.buf);
2850
2851
14.6k
  prepare_repo_settings(r);
2852
14.6k
  f->skip_hash = r->settings.index_skip_hash;
2853
2854
115k
  for (i = removed = extended = 0; i < entries; i++) {
2855
100k
    if (cache[i]->ce_flags & CE_REMOVE)
2856
0
      removed++;
2857
2858
    /* reduce extended entries if possible */
2859
100k
    cache[i]->ce_flags &= ~CE_EXTENDED;
2860
100k
    if (cache[i]->ce_flags & CE_EXTENDED_FLAGS) {
2861
0
      extended++;
2862
0
      cache[i]->ce_flags |= CE_EXTENDED;
2863
0
    }
2864
100k
  }
2865
2866
14.6k
  if (!istate->version)
2867
1.22k
    istate->version = get_index_format_default(r);
2868
2869
  /* demote version 3 to version 2 when the latter suffices */
2870
14.6k
  if (istate->version == 3 || istate->version == 2)
2871
14.6k
    istate->version = extended ? 3 : 2;
2872
2873
14.6k
  hdr_version = istate->version;
2874
2875
14.6k
  hdr.hdr_signature = htonl(CACHE_SIGNATURE);
2876
14.6k
  hdr.hdr_version = htonl(hdr_version);
2877
14.6k
  hdr.hdr_entries = htonl(entries - removed);
2878
2879
14.6k
  hashwrite(f, &hdr, sizeof(hdr));
2880
2881
14.6k
  if (!HAVE_THREADS || git_config_get_index_threads(&nr_threads))
2882
14.6k
    nr_threads = 1;
2883
2884
14.6k
  if (nr_threads != 1 && record_ieot()) {
2885
0
    int ieot_blocks, cpus;
2886
2887
    /*
2888
     * ensure default number of ieot blocks maps evenly to the
2889
     * default number of threads that will process them leaving
2890
     * room for the thread to load the index extensions.
2891
     */
2892
0
    if (!nr_threads) {
2893
0
      ieot_blocks = istate->cache_nr / THREAD_COST;
2894
0
      cpus = online_cpus();
2895
0
      if (ieot_blocks > cpus - 1)
2896
0
        ieot_blocks = cpus - 1;
2897
0
    } else {
2898
0
      ieot_blocks = nr_threads;
2899
0
      if (ieot_blocks > istate->cache_nr)
2900
0
        ieot_blocks = istate->cache_nr;
2901
0
    }
2902
2903
    /*
2904
     * no reason to write out the IEOT extension if we don't
2905
     * have enough blocks to utilize multi-threading
2906
     */
2907
0
    if (ieot_blocks > 1) {
2908
0
      ieot = xcalloc(1, sizeof(struct index_entry_offset_table)
2909
0
        + (ieot_blocks * sizeof(struct index_entry_offset)));
2910
0
      ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
2911
0
    }
2912
0
  }
2913
2914
14.6k
  offset = hashfile_total(f);
2915
2916
14.6k
  nr = 0;
2917
14.6k
  previous_name = (hdr_version == 4) ? &previous_name_buf : NULL;
2918
2919
115k
  for (i = 0; i < entries; i++) {
2920
100k
    struct cache_entry *ce = cache[i];
2921
100k
    if (ce->ce_flags & CE_REMOVE)
2922
0
      continue;
2923
100k
    if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce))
2924
0
      ce_smudge_racily_clean_entry(istate, ce);
2925
100k
    if (is_null_oid(&ce->oid)) {
2926
0
      static const char msg[] = "cache entry has null sha1: %s";
2927
0
      static int allow = -1;
2928
2929
0
      if (allow < 0)
2930
0
        allow = git_env_bool("GIT_ALLOW_NULL_SHA1", 0);
2931
0
      if (allow)
2932
0
        warning(msg, ce->name);
2933
0
      else
2934
0
        err = error(msg, ce->name);
2935
2936
0
      drop_cache_tree = 1;
2937
0
    }
2938
100k
    if (ieot && i && (i % ieot_entries == 0)) {
2939
0
      ieot->entries[ieot->nr].nr = nr;
2940
0
      ieot->entries[ieot->nr].offset = offset;
2941
0
      ieot->nr++;
2942
      /*
2943
       * If we have a V4 index, set the first byte to an invalid
2944
       * character to ensure there is nothing common with the previous
2945
       * entry
2946
       */
2947
0
      if (previous_name)
2948
0
        previous_name->buf[0] = 0;
2949
0
      nr = 0;
2950
2951
0
      offset = hashfile_total(f);
2952
0
    }
2953
100k
    if (ce_write_entry(f, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0)
2954
0
      err = -1;
2955
2956
100k
    if (err)
2957
0
      break;
2958
100k
    nr++;
2959
100k
  }
2960
14.6k
  if (ieot && nr) {
2961
0
    ieot->entries[ieot->nr].nr = nr;
2962
0
    ieot->entries[ieot->nr].offset = offset;
2963
0
    ieot->nr++;
2964
0
  }
2965
14.6k
  strbuf_release(&previous_name_buf);
2966
2967
14.6k
  if (err) {
2968
0
    free(ieot);
2969
0
    return err;
2970
0
  }
2971
2972
14.6k
  offset = hashfile_total(f);
2973
2974
  /*
2975
   * The extension headers must be hashed on their own for the
2976
   * EOIE extension. Create a hashfile here to compute that hash.
2977
   */
2978
14.6k
  if (offset && record_eoie()) {
2979
0
    CALLOC_ARRAY(eoie_c, 1);
2980
0
    the_hash_algo->init_fn(eoie_c);
2981
0
  }
2982
2983
  /*
2984
   * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
2985
   * can minimize the number of extensions we have to scan through to
2986
   * find it during load.  Write it out regardless of the
2987
   * strip_extensions parameter as we need it when loading the shared
2988
   * index.
2989
   */
2990
14.6k
  if (ieot) {
2991
0
    struct strbuf sb = STRBUF_INIT;
2992
2993
0
    write_ieot_extension(&sb, ieot);
2994
0
    err = write_index_ext_header(f, eoie_c, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0;
2995
0
    hashwrite(f, sb.buf, sb.len);
2996
0
    strbuf_release(&sb);
2997
0
    free(ieot);
2998
0
    if (err)
2999
0
      return -1;
3000
0
  }
3001
3002
14.6k
  if (write_extensions & WRITE_SPLIT_INDEX_EXTENSION &&
3003
14.6k
      istate->split_index) {
3004
0
    struct strbuf sb = STRBUF_INIT;
3005
3006
0
    if (istate->sparse_index)
3007
0
      die(_("cannot write split index for a sparse index"));
3008
3009
0
    err = write_link_extension(&sb, istate) < 0 ||
3010
0
      write_index_ext_header(f, eoie_c, CACHE_EXT_LINK,
3011
0
                 sb.len) < 0;
3012
0
    hashwrite(f, sb.buf, sb.len);
3013
0
    strbuf_release(&sb);
3014
0
    if (err)
3015
0
      return -1;
3016
0
  }
3017
14.6k
  if (write_extensions & WRITE_CACHE_TREE_EXTENSION &&
3018
14.6k
      !drop_cache_tree && istate->cache_tree) {
3019
13.4k
    struct strbuf sb = STRBUF_INIT;
3020
3021
13.4k
    cache_tree_write(&sb, istate->cache_tree);
3022
13.4k
    err = write_index_ext_header(f, eoie_c, CACHE_EXT_TREE, sb.len) < 0;
3023
13.4k
    hashwrite(f, sb.buf, sb.len);
3024
13.4k
    strbuf_release(&sb);
3025
13.4k
    if (err)
3026
0
      return -1;
3027
13.4k
  }
3028
14.6k
  if (write_extensions & WRITE_RESOLVE_UNDO_EXTENSION &&
3029
14.6k
      istate->resolve_undo) {
3030
0
    struct strbuf sb = STRBUF_INIT;
3031
3032
0
    resolve_undo_write(&sb, istate->resolve_undo);
3033
0
    err = write_index_ext_header(f, eoie_c, CACHE_EXT_RESOLVE_UNDO,
3034
0
               sb.len) < 0;
3035
0
    hashwrite(f, sb.buf, sb.len);
3036
0
    strbuf_release(&sb);
3037
0
    if (err)
3038
0
      return -1;
3039
0
  }
3040
14.6k
  if (write_extensions & WRITE_UNTRACKED_CACHE_EXTENSION &&
3041
14.6k
      istate->untracked) {
3042
0
    struct strbuf sb = STRBUF_INIT;
3043
3044
0
    write_untracked_extension(&sb, istate->untracked);
3045
0
    err = write_index_ext_header(f, eoie_c, CACHE_EXT_UNTRACKED,
3046
0
               sb.len) < 0;
3047
0
    hashwrite(f, sb.buf, sb.len);
3048
0
    strbuf_release(&sb);
3049
0
    if (err)
3050
0
      return -1;
3051
0
  }
3052
14.6k
  if (write_extensions & WRITE_FSMONITOR_EXTENSION &&
3053
14.6k
      istate->fsmonitor_last_update) {
3054
0
    struct strbuf sb = STRBUF_INIT;
3055
3056
0
    write_fsmonitor_extension(&sb, istate);
3057
0
    err = write_index_ext_header(f, eoie_c, CACHE_EXT_FSMONITOR, sb.len) < 0;
3058
0
    hashwrite(f, sb.buf, sb.len);
3059
0
    strbuf_release(&sb);
3060
0
    if (err)
3061
0
      return -1;
3062
0
  }
3063
14.6k
  if (istate->sparse_index) {
3064
0
    if (write_index_ext_header(f, eoie_c, CACHE_EXT_SPARSE_DIRECTORIES, 0) < 0)
3065
0
      return -1;
3066
0
  }
3067
3068
  /*
3069
   * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
3070
   * so that it can be found and processed before all the index entries are
3071
   * read.  Write it out regardless of the strip_extensions parameter as we need it
3072
   * when loading the shared index.
3073
   */
3074
14.6k
  if (eoie_c) {
3075
0
    struct strbuf sb = STRBUF_INIT;
3076
3077
0
    write_eoie_extension(&sb, eoie_c, offset);
3078
0
    err = write_index_ext_header(f, NULL, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0;
3079
0
    hashwrite(f, sb.buf, sb.len);
3080
0
    strbuf_release(&sb);
3081
0
    if (err)
3082
0
      return -1;
3083
0
  }
3084
3085
14.6k
  csum_fsync_flag = 0;
3086
14.6k
  if (!alternate_index_output && (flags & COMMIT_LOCK))
3087
14.6k
    csum_fsync_flag = CSUM_FSYNC;
3088
3089
14.6k
  finalize_hashfile(f, istate->oid.hash, FSYNC_COMPONENT_INDEX,
3090
14.6k
        CSUM_HASH_IN_STREAM | csum_fsync_flag);
3091
3092
14.6k
  if (close_tempfile_gently(tempfile)) {
3093
0
    error(_("could not close '%s'"), get_tempfile_path(tempfile));
3094
0
    return -1;
3095
0
  }
3096
14.6k
  if (stat(get_tempfile_path(tempfile), &st))
3097
0
    return -1;
3098
14.6k
  istate->timestamp.sec = (unsigned int)st.st_mtime;
3099
14.6k
  istate->timestamp.nsec = ST_MTIME_NSEC(st);
3100
14.6k
  trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
3101
3102
  /*
3103
   * TODO trace2: replace "the_repository" with the actual repo instance
3104
   * that is associated with the given "istate".
3105
   */
3106
14.6k
  trace2_data_intmax("index", the_repository, "write/version",
3107
14.6k
         istate->version);
3108
14.6k
  trace2_data_intmax("index", the_repository, "write/cache_nr",
3109
14.6k
         istate->cache_nr);
3110
3111
14.6k
  return 0;
3112
14.6k
}
3113
3114
void set_alternate_index_output(const char *name)
3115
0
{
3116
0
  alternate_index_output = name;
3117
0
}
3118
3119
static int commit_locked_index(struct lock_file *lk)
3120
14.6k
{
3121
14.6k
  if (alternate_index_output)
3122
0
    return commit_lock_file_to(lk, alternate_index_output);
3123
14.6k
  else
3124
14.6k
    return commit_lock_file(lk);
3125
14.6k
}
3126
3127
static int do_write_locked_index(struct index_state *istate,
3128
         struct lock_file *lock,
3129
         unsigned flags,
3130
         enum write_extensions write_extensions)
3131
14.6k
{
3132
14.6k
  int ret;
3133
14.6k
  int was_full = istate->sparse_index == INDEX_EXPANDED;
3134
3135
14.6k
  ret = convert_to_sparse(istate, 0);
3136
3137
14.6k
  if (ret) {
3138
0
    warning(_("failed to convert to a sparse-index"));
3139
0
    return ret;
3140
0
  }
3141
3142
  /*
3143
   * TODO trace2: replace "the_repository" with the actual repo instance
3144
   * that is associated with the given "istate".
3145
   */
3146
14.6k
  trace2_region_enter_printf("index", "do_write_index", the_repository,
3147
14.6k
           "%s", get_lock_file_path(lock));
3148
14.6k
  ret = do_write_index(istate, lock->tempfile, write_extensions, flags);
3149
14.6k
  trace2_region_leave_printf("index", "do_write_index", the_repository,
3150
14.6k
           "%s", get_lock_file_path(lock));
3151
3152
14.6k
  if (was_full)
3153
14.6k
    ensure_full_index(istate);
3154
3155
14.6k
  if (ret)
3156
0
    return ret;
3157
14.6k
  if (flags & COMMIT_LOCK)
3158
14.6k
    ret = commit_locked_index(lock);
3159
0
  else
3160
0
    ret = close_lock_file_gently(lock);
3161
3162
14.6k
  run_hooks_l("post-index-change",
3163
14.6k
      istate->updated_workdir ? "1" : "0",
3164
14.6k
      istate->updated_skipworktree ? "1" : "0", NULL);
3165
14.6k
  istate->updated_workdir = 0;
3166
14.6k
  istate->updated_skipworktree = 0;
3167
3168
14.6k
  return ret;
3169
14.6k
}
3170
3171
static int write_split_index(struct index_state *istate,
3172
           struct lock_file *lock,
3173
           unsigned flags)
3174
0
{
3175
0
  int ret;
3176
0
  prepare_to_write_split_index(istate);
3177
0
  ret = do_write_locked_index(istate, lock, flags, WRITE_ALL_EXTENSIONS);
3178
0
  finish_writing_split_index(istate);
3179
0
  return ret;
3180
0
}
3181
3182
static const char *shared_index_expire = "2.weeks.ago";
3183
3184
static unsigned long get_shared_index_expire_date(void)
3185
0
{
3186
0
  static unsigned long shared_index_expire_date;
3187
0
  static int shared_index_expire_date_prepared;
3188
3189
0
  if (!shared_index_expire_date_prepared) {
3190
0
    git_config_get_expiry("splitindex.sharedindexexpire",
3191
0
              &shared_index_expire);
3192
0
    shared_index_expire_date = approxidate(shared_index_expire);
3193
0
    shared_index_expire_date_prepared = 1;
3194
0
  }
3195
3196
0
  return shared_index_expire_date;
3197
0
}
3198
3199
static int should_delete_shared_index(const char *shared_index_path)
3200
0
{
3201
0
  struct stat st;
3202
0
  unsigned long expiration;
3203
3204
  /* Check timestamp */
3205
0
  expiration = get_shared_index_expire_date();
3206
0
  if (!expiration)
3207
0
    return 0;
3208
0
  if (stat(shared_index_path, &st))
3209
0
    return error_errno(_("could not stat '%s'"), shared_index_path);
3210
0
  if (st.st_mtime > expiration)
3211
0
    return 0;
3212
3213
0
  return 1;
3214
0
}
3215
3216
static int clean_shared_index_files(const char *current_hex)
3217
0
{
3218
0
  struct dirent *de;
3219
0
  DIR *dir = opendir(get_git_dir());
3220
3221
0
  if (!dir)
3222
0
    return error_errno(_("unable to open git dir: %s"), get_git_dir());
3223
3224
0
  while ((de = readdir(dir)) != NULL) {
3225
0
    const char *sha1_hex;
3226
0
    const char *shared_index_path;
3227
0
    if (!skip_prefix(de->d_name, "sharedindex.", &sha1_hex))
3228
0
      continue;
3229
0
    if (!strcmp(sha1_hex, current_hex))
3230
0
      continue;
3231
0
    shared_index_path = git_path("%s", de->d_name);
3232
0
    if (should_delete_shared_index(shared_index_path) > 0 &&
3233
0
        unlink(shared_index_path))
3234
0
      warning_errno(_("unable to unlink: %s"), shared_index_path);
3235
0
  }
3236
0
  closedir(dir);
3237
3238
0
  return 0;
3239
0
}
3240
3241
static int write_shared_index(struct index_state *istate,
3242
            struct tempfile **temp, unsigned flags)
3243
0
{
3244
0
  struct split_index *si = istate->split_index;
3245
0
  int ret, was_full = !istate->sparse_index;
3246
3247
0
  move_cache_to_base_index(istate);
3248
0
  convert_to_sparse(istate, 0);
3249
3250
0
  trace2_region_enter_printf("index", "shared/do_write_index",
3251
0
           the_repository, "%s", get_tempfile_path(*temp));
3252
0
  ret = do_write_index(si->base, *temp, WRITE_NO_EXTENSION, flags);
3253
0
  trace2_region_leave_printf("index", "shared/do_write_index",
3254
0
           the_repository, "%s", get_tempfile_path(*temp));
3255
3256
0
  if (was_full)
3257
0
    ensure_full_index(istate);
3258
3259
0
  if (ret)
3260
0
    return ret;
3261
0
  ret = adjust_shared_perm(get_tempfile_path(*temp));
3262
0
  if (ret) {
3263
0
    error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp));
3264
0
    return ret;
3265
0
  }
3266
0
  ret = rename_tempfile(temp,
3267
0
            git_path("sharedindex.%s", oid_to_hex(&si->base->oid)));
3268
0
  if (!ret) {
3269
0
    oidcpy(&si->base_oid, &si->base->oid);
3270
0
    clean_shared_index_files(oid_to_hex(&si->base->oid));
3271
0
  }
3272
3273
0
  return ret;
3274
0
}
3275
3276
static const int default_max_percent_split_change = 20;
3277
3278
static int too_many_not_shared_entries(struct index_state *istate)
3279
0
{
3280
0
  int i, not_shared = 0;
3281
0
  int max_split = git_config_get_max_percent_split_change();
3282
3283
0
  switch (max_split) {
3284
0
  case -1:
3285
    /* not or badly configured: use the default value */
3286
0
    max_split = default_max_percent_split_change;
3287
0
    break;
3288
0
  case 0:
3289
0
    return 1; /* 0% means always write a new shared index */
3290
0
  case 100:
3291
0
    return 0; /* 100% means never write a new shared index */
3292
0
  default:
3293
0
    break; /* just use the configured value */
3294
0
  }
3295
3296
  /* Count not shared entries */
3297
0
  for (i = 0; i < istate->cache_nr; i++) {
3298
0
    struct cache_entry *ce = istate->cache[i];
3299
0
    if (!ce->index)
3300
0
      not_shared++;
3301
0
  }
3302
3303
0
  return (int64_t)istate->cache_nr * max_split < (int64_t)not_shared * 100;
3304
0
}
3305
3306
int write_locked_index(struct index_state *istate, struct lock_file *lock,
3307
           unsigned flags)
3308
14.6k
{
3309
14.6k
  int new_shared_index, ret, test_split_index_env;
3310
14.6k
  struct split_index *si = istate->split_index;
3311
3312
14.6k
  if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
3313
0
    cache_tree_verify(the_repository, istate);
3314
3315
14.6k
  if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
3316
0
    if (flags & COMMIT_LOCK)
3317
0
      rollback_lock_file(lock);
3318
0
    return 0;
3319
0
  }
3320
3321
14.6k
  if (istate->fsmonitor_last_update)
3322
0
    fill_fsmonitor_bitmap(istate);
3323
3324
14.6k
  test_split_index_env = git_env_bool("GIT_TEST_SPLIT_INDEX", 0);
3325
3326
14.6k
  if ((!si && !test_split_index_env) ||
3327
14.6k
      alternate_index_output ||
3328
14.6k
      (istate->cache_changed & ~EXTMASK)) {
3329
14.6k
    ret = do_write_locked_index(istate, lock, flags,
3330
14.6k
              ~WRITE_SPLIT_INDEX_EXTENSION);
3331
14.6k
    goto out;
3332
14.6k
  }
3333
3334
0
  if (test_split_index_env) {
3335
0
    if (!si) {
3336
0
      si = init_split_index(istate);
3337
0
      istate->cache_changed |= SPLIT_INDEX_ORDERED;
3338
0
    } else {
3339
0
      int v = si->base_oid.hash[0];
3340
0
      if ((v & 15) < 6)
3341
0
        istate->cache_changed |= SPLIT_INDEX_ORDERED;
3342
0
    }
3343
0
  }
3344
0
  if (too_many_not_shared_entries(istate))
3345
0
    istate->cache_changed |= SPLIT_INDEX_ORDERED;
3346
3347
0
  new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
3348
3349
0
  if (new_shared_index) {
3350
0
    struct tempfile *temp;
3351
0
    int saved_errno;
3352
3353
    /* Same initial permissions as the main .git/index file */
3354
0
    temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
3355
0
    if (!temp) {
3356
0
      ret = do_write_locked_index(istate, lock, flags,
3357
0
                ~WRITE_SPLIT_INDEX_EXTENSION);
3358
0
      goto out;
3359
0
    }
3360
0
    ret = write_shared_index(istate, &temp, flags);
3361
3362
0
    saved_errno = errno;
3363
0
    if (is_tempfile_active(temp))
3364
0
      delete_tempfile(&temp);
3365
0
    errno = saved_errno;
3366
3367
0
    if (ret)
3368
0
      goto out;
3369
0
  }
3370
3371
0
  ret = write_split_index(istate, lock, flags);
3372
3373
  /* Freshen the shared index only if the split-index was written */
3374
0
  if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
3375
0
    const char *shared_index = git_path("sharedindex.%s",
3376
0
                oid_to_hex(&si->base_oid));
3377
0
    freshen_shared_index(shared_index, 1);
3378
0
  }
3379
3380
14.6k
out:
3381
14.6k
  if (flags & COMMIT_LOCK)
3382
14.6k
    rollback_lock_file(lock);
3383
14.6k
  return ret;
3384
0
}
3385
3386
/*
3387
 * Read the index file that is potentially unmerged into given
3388
 * index_state, dropping any unmerged entries to stage #0 (potentially
3389
 * resulting in a path appearing as both a file and a directory in the
3390
 * index; the caller is responsible to clear out the extra entries
3391
 * before writing the index to a tree).  Returns true if the index is
3392
 * unmerged.  Callers who want to refuse to work from an unmerged
3393
 * state can call this and check its return value, instead of calling
3394
 * read_cache().
3395
 */
3396
int repo_read_index_unmerged(struct repository *repo)
3397
0
{
3398
0
  struct index_state *istate;
3399
0
  int i;
3400
0
  int unmerged = 0;
3401
3402
0
  repo_read_index(repo);
3403
0
  istate = repo->index;
3404
0
  for (i = 0; i < istate->cache_nr; i++) {
3405
0
    struct cache_entry *ce = istate->cache[i];
3406
0
    struct cache_entry *new_ce;
3407
0
    int len;
3408
3409
0
    if (!ce_stage(ce))
3410
0
      continue;
3411
0
    unmerged = 1;
3412
0
    len = ce_namelen(ce);
3413
0
    new_ce = make_empty_cache_entry(istate, len);
3414
0
    memcpy(new_ce->name, ce->name, len);
3415
0
    new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
3416
0
    new_ce->ce_namelen = len;
3417
0
    new_ce->ce_mode = ce->ce_mode;
3418
0
    if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
3419
0
      return error(_("%s: cannot drop to stage #0"),
3420
0
             new_ce->name);
3421
0
  }
3422
0
  return unmerged;
3423
0
}
3424
3425
/*
3426
 * Returns 1 if the path is an "other" path with respect to
3427
 * the index; that is, the path is not mentioned in the index at all,
3428
 * either as a file, a directory with some files in the index,
3429
 * or as an unmerged entry.
3430
 *
3431
 * We helpfully remove a trailing "/" from directories so that
3432
 * the output of read_directory can be used as-is.
3433
 */
3434
int index_name_is_other(struct index_state *istate, const char *name,
3435
      int namelen)
3436
1.78k
{
3437
1.78k
  int pos;
3438
1.78k
  if (namelen && name[namelen - 1] == '/')
3439
594
    namelen--;
3440
1.78k
  pos = index_name_pos(istate, name, namelen);
3441
1.78k
  if (0 <= pos)
3442
0
    return 0; /* exact match */
3443
1.78k
  pos = -pos - 1;
3444
1.78k
  if (pos < istate->cache_nr) {
3445
0
    struct cache_entry *ce = istate->cache[pos];
3446
0
    if (ce_namelen(ce) == namelen &&
3447
0
        !memcmp(ce->name, name, namelen))
3448
0
      return 0; /* Yup, this one exists unmerged */
3449
0
  }
3450
1.78k
  return 1;
3451
1.78k
}
3452
3453
void *read_blob_data_from_index(struct index_state *istate,
3454
        const char *path, unsigned long *size)
3455
2
{
3456
2
  int pos, len;
3457
2
  unsigned long sz;
3458
2
  enum object_type type;
3459
2
  void *data;
3460
3461
2
  len = strlen(path);
3462
2
  pos = index_name_pos(istate, path, len);
3463
2
  if (pos < 0) {
3464
    /*
3465
     * We might be in the middle of a merge, in which
3466
     * case we would read stage #2 (ours).
3467
     */
3468
2
    int i;
3469
2
    for (i = -pos - 1;
3470
2
         (pos < 0 && i < istate->cache_nr &&
3471
2
          !strcmp(istate->cache[i]->name, path));
3472
2
         i++)
3473
0
      if (ce_stage(istate->cache[i]) == 2)
3474
0
        pos = i;
3475
2
  }
3476
2
  if (pos < 0)
3477
2
    return NULL;
3478
0
  data = repo_read_object_file(the_repository, &istate->cache[pos]->oid,
3479
0
             &type, &sz);
3480
0
  if (!data || type != OBJ_BLOB) {
3481
0
    free(data);
3482
0
    return NULL;
3483
0
  }
3484
0
  if (size)
3485
0
    *size = sz;
3486
0
  return data;
3487
0
}
3488
3489
void move_index_extensions(struct index_state *dst, struct index_state *src)
3490
0
{
3491
0
  dst->untracked = src->untracked;
3492
0
  src->untracked = NULL;
3493
0
  dst->cache_tree = src->cache_tree;
3494
0
  src->cache_tree = NULL;
3495
0
}
3496
3497
struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
3498
            struct index_state *istate)
3499
0
{
3500
0
  unsigned int size = ce_size(ce);
3501
0
  int mem_pool_allocated;
3502
0
  struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
3503
0
  mem_pool_allocated = new_entry->mem_pool_allocated;
3504
3505
0
  memcpy(new_entry, ce, size);
3506
0
  new_entry->mem_pool_allocated = mem_pool_allocated;
3507
0
  return new_entry;
3508
0
}
3509
3510
void discard_cache_entry(struct cache_entry *ce)
3511
67.3k
{
3512
67.3k
  if (ce && should_validate_cache_entries())
3513
0
    memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));
3514
3515
67.3k
  if (ce && ce->mem_pool_allocated)
3516
0
    return;
3517
3518
67.3k
  free(ce);
3519
67.3k
}
3520
3521
int should_validate_cache_entries(void)
3522
91.5k
{
3523
91.5k
  static int validate_index_cache_entries = -1;
3524
3525
91.5k
  if (validate_index_cache_entries < 0) {
3526
2
    if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
3527
0
      validate_index_cache_entries = 1;
3528
2
    else
3529
2
      validate_index_cache_entries = 0;
3530
2
  }
3531
3532
91.5k
  return validate_index_cache_entries;
3533
91.5k
}
3534
3535
0
#define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */
3536
0
#define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */
3537
3538
static size_t read_eoie_extension(const char *mmap, size_t mmap_size)
3539
0
{
3540
  /*
3541
   * The end of index entries (EOIE) extension is guaranteed to be last
3542
   * so that it can be found by scanning backwards from the EOF.
3543
   *
3544
   * "EOIE"
3545
   * <4-byte length>
3546
   * <4-byte offset>
3547
   * <20-byte hash>
3548
   */
3549
0
  const char *index, *eoie;
3550
0
  uint32_t extsize;
3551
0
  size_t offset, src_offset;
3552
0
  unsigned char hash[GIT_MAX_RAWSZ];
3553
0
  git_hash_ctx c;
3554
3555
  /* ensure we have an index big enough to contain an EOIE extension */
3556
0
  if (mmap_size < sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz)
3557
0
    return 0;
3558
3559
  /* validate the extension signature */
3560
0
  index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz;
3561
0
  if (CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES)
3562
0
    return 0;
3563
0
  index += sizeof(uint32_t);
3564
3565
  /* validate the extension size */
3566
0
  extsize = get_be32(index);
3567
0
  if (extsize != EOIE_SIZE)
3568
0
    return 0;
3569
0
  index += sizeof(uint32_t);
3570
3571
  /*
3572
   * Validate the offset we're going to look for the first extension
3573
   * signature is after the index header and before the eoie extension.
3574
   */
3575
0
  offset = get_be32(index);
3576
0
  if (mmap + offset < mmap + sizeof(struct cache_header))
3577
0
    return 0;
3578
0
  if (mmap + offset >= eoie)
3579
0
    return 0;
3580
0
  index += sizeof(uint32_t);
3581
3582
  /*
3583
   * The hash is computed over extension types and their sizes (but not
3584
   * their contents).  E.g. if we have "TREE" extension that is N-bytes
3585
   * long, "REUC" extension that is M-bytes long, followed by "EOIE",
3586
   * then the hash would be:
3587
   *
3588
   * SHA-1("TREE" + <binary representation of N> +
3589
   *   "REUC" + <binary representation of M>)
3590
   */
3591
0
  src_offset = offset;
3592
0
  the_hash_algo->init_fn(&c);
3593
0
  while (src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) {
3594
    /* After an array of active_nr index entries,
3595
     * there can be arbitrary number of extended
3596
     * sections, each of which is prefixed with
3597
     * extension name (4-byte) and section length
3598
     * in 4-byte network byte order.
3599
     */
3600
0
    uint32_t extsize;
3601
0
    memcpy(&extsize, mmap + src_offset + 4, 4);
3602
0
    extsize = ntohl(extsize);
3603
3604
    /* verify the extension size isn't so large it will wrap around */
3605
0
    if (src_offset + 8 + extsize < src_offset)
3606
0
      return 0;
3607
3608
0
    the_hash_algo->update_fn(&c, mmap + src_offset, 8);
3609
3610
0
    src_offset += 8;
3611
0
    src_offset += extsize;
3612
0
  }
3613
0
  the_hash_algo->final_fn(hash, &c);
3614
0
  if (!hasheq(hash, (const unsigned char *)index))
3615
0
    return 0;
3616
3617
  /* Validate that the extension offsets returned us back to the eoie extension. */
3618
0
  if (src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER)
3619
0
    return 0;
3620
3621
0
  return offset;
3622
0
}
3623
3624
static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset)
3625
0
{
3626
0
  uint32_t buffer;
3627
0
  unsigned char hash[GIT_MAX_RAWSZ];
3628
3629
  /* offset */
3630
0
  put_be32(&buffer, offset);
3631
0
  strbuf_add(sb, &buffer, sizeof(uint32_t));
3632
3633
  /* hash */
3634
0
  the_hash_algo->final_fn(hash, eoie_context);
3635
0
  strbuf_add(sb, hash, the_hash_algo->rawsz);
3636
0
}
3637
3638
0
#define IEOT_VERSION  (1)
3639
3640
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
3641
0
{
3642
0
  const char *index = NULL;
3643
0
  uint32_t extsize, ext_version;
3644
0
  struct index_entry_offset_table *ieot;
3645
0
  int i, nr;
3646
3647
  /* find the IEOT extension */
3648
0
  if (!offset)
3649
0
    return NULL;
3650
0
  while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
3651
0
    extsize = get_be32(mmap + offset + 4);
3652
0
    if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
3653
0
      index = mmap + offset + 4 + 4;
3654
0
      break;
3655
0
    }
3656
0
    offset += 8;
3657
0
    offset += extsize;
3658
0
  }
3659
0
  if (!index)
3660
0
    return NULL;
3661
3662
  /* validate the version is IEOT_VERSION */
3663
0
  ext_version = get_be32(index);
3664
0
  if (ext_version != IEOT_VERSION) {
3665
0
    error("invalid IEOT version %d", ext_version);
3666
0
    return NULL;
3667
0
  }
3668
0
  index += sizeof(uint32_t);
3669
3670
  /* extension size - version bytes / bytes per entry */
3671
0
  nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
3672
0
  if (!nr) {
3673
0
    error("invalid number of IEOT entries %d", nr);
3674
0
    return NULL;
3675
0
  }
3676
0
  ieot = xmalloc(sizeof(struct index_entry_offset_table)
3677
0
           + (nr * sizeof(struct index_entry_offset)));
3678
0
  ieot->nr = nr;
3679
0
  for (i = 0; i < nr; i++) {
3680
0
    ieot->entries[i].offset = get_be32(index);
3681
0
    index += sizeof(uint32_t);
3682
0
    ieot->entries[i].nr = get_be32(index);
3683
0
    index += sizeof(uint32_t);
3684
0
  }
3685
3686
0
  return ieot;
3687
0
}
3688
3689
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
3690
0
{
3691
0
  uint32_t buffer;
3692
0
  int i;
3693
3694
  /* version */
3695
0
  put_be32(&buffer, IEOT_VERSION);
3696
0
  strbuf_add(sb, &buffer, sizeof(uint32_t));
3697
3698
  /* ieot */
3699
0
  for (i = 0; i < ieot->nr; i++) {
3700
3701
    /* offset */
3702
0
    put_be32(&buffer, ieot->entries[i].offset);
3703
0
    strbuf_add(sb, &buffer, sizeof(uint32_t));
3704
3705
    /* count */
3706
0
    put_be32(&buffer, ieot->entries[i].nr);
3707
0
    strbuf_add(sb, &buffer, sizeof(uint32_t));
3708
0
  }
3709
0
}
3710
3711
void prefetch_cache_entries(const struct index_state *istate,
3712
          must_prefetch_predicate must_prefetch)
3713
0
{
3714
0
  int i;
3715
0
  struct oid_array to_fetch = OID_ARRAY_INIT;
3716
3717
0
  for (i = 0; i < istate->cache_nr; i++) {
3718
0
    struct cache_entry *ce = istate->cache[i];
3719
3720
0
    if (S_ISGITLINK(ce->ce_mode) || !must_prefetch(ce))
3721
0
      continue;
3722
0
    if (!oid_object_info_extended(the_repository, &ce->oid,
3723
0
                NULL,
3724
0
                OBJECT_INFO_FOR_PREFETCH))
3725
0
      continue;
3726
0
    oid_array_append(&to_fetch, &ce->oid);
3727
0
  }
3728
0
  promisor_remote_get_direct(the_repository,
3729
0
           to_fetch.oid, to_fetch.nr);
3730
0
  oid_array_clear(&to_fetch);
3731
0
}
3732
3733
static int read_one_entry_opt(struct index_state *istate,
3734
            const struct object_id *oid,
3735
            struct strbuf *base,
3736
            const char *pathname,
3737
            unsigned mode, int opt)
3738
0
{
3739
0
  int len;
3740
0
  struct cache_entry *ce;
3741
3742
0
  if (S_ISDIR(mode))
3743
0
    return READ_TREE_RECURSIVE;
3744
3745
0
  len = strlen(pathname);
3746
0
  ce = make_empty_cache_entry(istate, base->len + len);
3747
3748
0
  ce->ce_mode = create_ce_mode(mode);
3749
0
  ce->ce_flags = create_ce_flags(1);
3750
0
  ce->ce_namelen = base->len + len;
3751
0
  memcpy(ce->name, base->buf, base->len);
3752
0
  memcpy(ce->name + base->len, pathname, len+1);
3753
0
  oidcpy(&ce->oid, oid);
3754
0
  return add_index_entry(istate, ce, opt);
3755
0
}
3756
3757
static int read_one_entry(const struct object_id *oid, struct strbuf *base,
3758
        const char *pathname, unsigned mode,
3759
        void *context)
3760
0
{
3761
0
  struct index_state *istate = context;
3762
0
  return read_one_entry_opt(istate, oid, base, pathname,
3763
0
          mode,
3764
0
          ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
3765
0
}
3766
3767
/*
3768
 * This is used when the caller knows there is no existing entries at
3769
 * the stage that will conflict with the entry being added.
3770
 */
3771
static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
3772
        const char *pathname, unsigned mode,
3773
        void *context)
3774
0
{
3775
0
  struct index_state *istate = context;
3776
0
  return read_one_entry_opt(istate, oid, base, pathname,
3777
0
          mode, ADD_CACHE_JUST_APPEND);
3778
0
}
3779
3780
/*
3781
 * Read the tree specified with --with-tree option
3782
 * (typically, HEAD) into stage #1 and then
3783
 * squash them down to stage #0.  This is used for
3784
 * --error-unmatch to list and check the path patterns
3785
 * that were given from the command line.  We are not
3786
 * going to write this index out.
3787
 */
3788
void overlay_tree_on_index(struct index_state *istate,
3789
         const char *tree_name, const char *prefix)
3790
0
{
3791
0
  struct tree *tree;
3792
0
  struct object_id oid;
3793
0
  struct pathspec pathspec;
3794
0
  struct cache_entry *last_stage0 = NULL;
3795
0
  int i;
3796
0
  read_tree_fn_t fn = NULL;
3797
0
  int err;
3798
3799
0
  if (repo_get_oid(the_repository, tree_name, &oid))
3800
0
    die("tree-ish %s not found.", tree_name);
3801
0
  tree = parse_tree_indirect(&oid);
3802
0
  if (!tree)
3803
0
    die("bad tree-ish %s", tree_name);
3804
3805
  /* Hoist the unmerged entries up to stage #3 to make room */
3806
  /* TODO: audit for interaction with sparse-index. */
3807
0
  ensure_full_index(istate);
3808
0
  for (i = 0; i < istate->cache_nr; i++) {
3809
0
    struct cache_entry *ce = istate->cache[i];
3810
0
    if (!ce_stage(ce))
3811
0
      continue;
3812
0
    ce->ce_flags |= CE_STAGEMASK;
3813
0
  }
3814
3815
0
  if (prefix) {
3816
0
    static const char *(matchbuf[1]);
3817
0
    matchbuf[0] = NULL;
3818
0
    parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC,
3819
0
             PATHSPEC_PREFER_CWD, prefix, matchbuf);
3820
0
  } else
3821
0
    memset(&pathspec, 0, sizeof(pathspec));
3822
3823
  /*
3824
   * See if we have cache entry at the stage.  If so,
3825
   * do it the original slow way, otherwise, append and then
3826
   * sort at the end.
3827
   */
3828
0
  for (i = 0; !fn && i < istate->cache_nr; i++) {
3829
0
    const struct cache_entry *ce = istate->cache[i];
3830
0
    if (ce_stage(ce) == 1)
3831
0
      fn = read_one_entry;
3832
0
  }
3833
3834
0
  if (!fn)
3835
0
    fn = read_one_entry_quick;
3836
0
  err = read_tree(the_repository, tree, &pathspec, fn, istate);
3837
0
  clear_pathspec(&pathspec);
3838
0
  if (err)
3839
0
    die("unable to read tree entries %s", tree_name);
3840
3841
  /*
3842
   * Sort the cache entry -- we need to nuke the cache tree, though.
3843
   */
3844
0
  if (fn == read_one_entry_quick) {
3845
0
    cache_tree_free(&istate->cache_tree);
3846
0
    QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
3847
0
  }
3848
3849
0
  for (i = 0; i < istate->cache_nr; i++) {
3850
0
    struct cache_entry *ce = istate->cache[i];
3851
0
    switch (ce_stage(ce)) {
3852
0
    case 0:
3853
0
      last_stage0 = ce;
3854
      /* fallthru */
3855
0
    default:
3856
0
      continue;
3857
0
    case 1:
3858
      /*
3859
       * If there is stage #0 entry for this, we do not
3860
       * need to show it.  We use CE_UPDATE bit to mark
3861
       * such an entry.
3862
       */
3863
0
      if (last_stage0 &&
3864
0
          !strcmp(last_stage0->name, ce->name))
3865
0
        ce->ce_flags |= CE_UPDATE;
3866
0
    }
3867
0
  }
3868
0
}
3869
3870
struct update_callback_data {
3871
  struct index_state *index;
3872
  int include_sparse;
3873
  int flags;
3874
  int add_errors;
3875
};
3876
3877
static int fix_unmerged_status(struct diff_filepair *p,
3878
             struct update_callback_data *data)
3879
0
{
3880
0
  if (p->status != DIFF_STATUS_UNMERGED)
3881
0
    return p->status;
3882
0
  if (!(data->flags & ADD_CACHE_IGNORE_REMOVAL) && !p->two->mode)
3883
    /*
3884
     * This is not an explicit add request, and the
3885
     * path is missing from the working tree (deleted)
3886
     */
3887
0
    return DIFF_STATUS_DELETED;
3888
0
  else
3889
    /*
3890
     * Either an explicit add request, or path exists
3891
     * in the working tree.  An attempt to explicitly
3892
     * add a path that does not exist in the working tree
3893
     * will be caught as an error by the caller immediately.
3894
     */
3895
0
    return DIFF_STATUS_MODIFIED;
3896
0
}
3897
3898
static void update_callback(struct diff_queue_struct *q,
3899
          struct diff_options *opt UNUSED, void *cbdata)
3900
0
{
3901
0
  int i;
3902
0
  struct update_callback_data *data = cbdata;
3903
3904
0
  for (i = 0; i < q->nr; i++) {
3905
0
    struct diff_filepair *p = q->queue[i];
3906
0
    const char *path = p->one->path;
3907
3908
0
    if (!data->include_sparse &&
3909
0
        !path_in_sparse_checkout(path, data->index))
3910
0
      continue;
3911
3912
0
    switch (fix_unmerged_status(p, data)) {
3913
0
    default:
3914
0
      die(_("unexpected diff status %c"), p->status);
3915
0
    case DIFF_STATUS_MODIFIED:
3916
0
    case DIFF_STATUS_TYPE_CHANGED:
3917
0
      if (add_file_to_index(data->index, path, data->flags)) {
3918
0
        if (!(data->flags & ADD_CACHE_IGNORE_ERRORS))
3919
0
          die(_("updating files failed"));
3920
0
        data->add_errors++;
3921
0
      }
3922
0
      break;
3923
0
    case DIFF_STATUS_DELETED:
3924
0
      if (data->flags & ADD_CACHE_IGNORE_REMOVAL)
3925
0
        break;
3926
0
      if (!(data->flags & ADD_CACHE_PRETEND))
3927
0
        remove_file_from_index(data->index, path);
3928
0
      if (data->flags & (ADD_CACHE_PRETEND|ADD_CACHE_VERBOSE))
3929
0
        printf(_("remove '%s'\n"), path);
3930
0
      break;
3931
0
    }
3932
0
  }
3933
0
}
3934
3935
int add_files_to_cache(struct repository *repo, const char *prefix,
3936
           const struct pathspec *pathspec, int include_sparse,
3937
           int flags)
3938
7.32k
{
3939
7.32k
  struct update_callback_data data;
3940
7.32k
  struct rev_info rev;
3941
3942
7.32k
  memset(&data, 0, sizeof(data));
3943
7.32k
  data.index = repo->index;
3944
7.32k
  data.include_sparse = include_sparse;
3945
7.32k
  data.flags = flags;
3946
3947
7.32k
  repo_init_revisions(repo, &rev, prefix);
3948
7.32k
  setup_revisions(0, NULL, &rev, NULL);
3949
7.32k
  if (pathspec)
3950
7.32k
    copy_pathspec(&rev.prune_data, pathspec);
3951
7.32k
  rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
3952
7.32k
  rev.diffopt.format_callback = update_callback;
3953
7.32k
  rev.diffopt.format_callback_data = &data;
3954
7.32k
  rev.diffopt.flags.override_submodule_config = 1;
3955
7.32k
  rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
3956
3957
  /*
3958
   * Use an ODB transaction to optimize adding multiple objects.
3959
   * This function is invoked from commands other than 'add', which
3960
   * may not have their own transaction active.
3961
   */
3962
7.32k
  begin_odb_transaction();
3963
7.32k
  run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
3964
7.32k
  end_odb_transaction();
3965
3966
7.32k
  release_revisions(&rev);
3967
7.32k
  return !!data.add_errors;
3968
7.32k
}