Coverage Report

Created: 2025-12-14 06:31

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/git/object-file.c
Line
Count
Source
1
/*
2
 * GIT - The information manager from hell
3
 *
4
 * Copyright (C) Linus Torvalds, 2005
5
 *
6
 * This handles basic git object files - packing, unpacking,
7
 * creation etc.
8
 */
9
10
#define USE_THE_REPOSITORY_VARIABLE
11
12
#include "git-compat-util.h"
13
#include "convert.h"
14
#include "dir.h"
15
#include "environment.h"
16
#include "fsck.h"
17
#include "gettext.h"
18
#include "hex.h"
19
#include "loose.h"
20
#include "object-file-convert.h"
21
#include "object-file.h"
22
#include "odb.h"
23
#include "oidtree.h"
24
#include "pack.h"
25
#include "packfile.h"
26
#include "path.h"
27
#include "read-cache-ll.h"
28
#include "setup.h"
29
#include "streaming.h"
30
#include "tempfile.h"
31
#include "tmp-objdir.h"
32
33
/* The maximum size for an object header. */
34
#define MAX_HEADER_LEN 32
35
36
static int get_conv_flags(unsigned flags)
37
0
{
38
0
  if (flags & INDEX_RENORMALIZE)
39
0
    return CONV_EOL_RENORMALIZE;
40
0
  else if (flags & INDEX_WRITE_OBJECT)
41
0
    return global_conv_flags_eol | CONV_WRITE_OBJECT;
42
0
  else
43
0
    return 0;
44
0
}
45
46
static void fill_loose_path(struct strbuf *buf,
47
          const struct object_id *oid,
48
          const struct git_hash_algo *algop)
49
0
{
50
0
  for (size_t i = 0; i < algop->rawsz; i++) {
51
0
    static char hex[] = "0123456789abcdef";
52
0
    unsigned int val = oid->hash[i];
53
0
    strbuf_addch(buf, hex[val >> 4]);
54
0
    strbuf_addch(buf, hex[val & 0xf]);
55
0
    if (!i)
56
0
      strbuf_addch(buf, '/');
57
0
  }
58
0
}
59
60
const char *odb_loose_path(struct odb_source *source,
61
         struct strbuf *buf,
62
         const struct object_id *oid)
63
0
{
64
0
  strbuf_reset(buf);
65
0
  strbuf_addstr(buf, source->path);
66
0
  strbuf_addch(buf, '/');
67
0
  fill_loose_path(buf, oid, source->odb->repo->hash_algo);
68
0
  return buf->buf;
69
0
}
70
71
/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
72
static int freshen_file(const char *fn)
73
0
{
74
0
  return !utime(fn, NULL);
75
0
}
76
77
/*
78
 * All of the check_and_freshen functions return 1 if the file exists and was
79
 * freshened (if freshening was requested), 0 otherwise. If they return
80
 * 0, you should not assume that it is safe to skip a write of the object (it
81
 * either does not exist on disk, or has a stale mtime and may be subject to
82
 * pruning).
83
 */
84
int check_and_freshen_file(const char *fn, int freshen)
85
0
{
86
0
  if (access(fn, F_OK))
87
0
    return 0;
88
0
  if (freshen && !freshen_file(fn))
89
0
    return 0;
90
0
  return 1;
91
0
}
92
93
static int check_and_freshen_source(struct odb_source *source,
94
            const struct object_id *oid,
95
            int freshen)
96
0
{
97
0
  static struct strbuf path = STRBUF_INIT;
98
0
  odb_loose_path(source, &path, oid);
99
0
  return check_and_freshen_file(path.buf, freshen);
100
0
}
101
102
int odb_source_loose_has_object(struct odb_source *source,
103
        const struct object_id *oid)
104
0
{
105
0
  return check_and_freshen_source(source, oid, 0);
106
0
}
107
108
int format_object_header(char *str, size_t size, enum object_type type,
109
       size_t objsize)
110
0
{
111
0
  const char *name = type_name(type);
112
113
0
  if (!name)
114
0
    BUG("could not get a type name for 'enum object_type' value %d", type);
115
116
0
  return xsnprintf(str, size, "%s %"PRIuMAX, name, (uintmax_t)objsize) + 1;
117
0
}
118
119
int check_object_signature(struct repository *r, const struct object_id *oid,
120
         void *buf, unsigned long size,
121
         enum object_type type)
122
0
{
123
0
  const struct git_hash_algo *algo =
124
0
    oid->algo ? &hash_algos[oid->algo] : r->hash_algo;
125
0
  struct object_id real_oid;
126
127
0
  hash_object_file(algo, buf, size, type, &real_oid);
128
129
0
  return !oideq(oid, &real_oid) ? -1 : 0;
130
0
}
131
132
int stream_object_signature(struct repository *r, const struct object_id *oid)
133
0
{
134
0
  struct object_id real_oid;
135
0
  unsigned long size;
136
0
  enum object_type obj_type;
137
0
  struct git_istream *st;
138
0
  struct git_hash_ctx c;
139
0
  char hdr[MAX_HEADER_LEN];
140
0
  int hdrlen;
141
142
0
  st = open_istream(r, oid, &obj_type, &size, NULL);
143
0
  if (!st)
144
0
    return -1;
145
146
  /* Generate the header */
147
0
  hdrlen = format_object_header(hdr, sizeof(hdr), obj_type, size);
148
149
  /* Sha1.. */
150
0
  r->hash_algo->init_fn(&c);
151
0
  git_hash_update(&c, hdr, hdrlen);
152
0
  for (;;) {
153
0
    char buf[1024 * 16];
154
0
    ssize_t readlen = read_istream(st, buf, sizeof(buf));
155
156
0
    if (readlen < 0) {
157
0
      close_istream(st);
158
0
      return -1;
159
0
    }
160
0
    if (!readlen)
161
0
      break;
162
0
    git_hash_update(&c, buf, readlen);
163
0
  }
164
0
  git_hash_final_oid(&real_oid, &c);
165
0
  close_istream(st);
166
0
  return !oideq(oid, &real_oid) ? -1 : 0;
167
0
}
168
169
/*
170
 * Find "oid" as a loose object in given source.
171
 * Returns 0 on success, negative on failure.
172
 *
173
 * The "path" out-parameter will give the path of the object we found (if any).
174
 * Note that it may point to static storage and is only valid until another
175
 * call to stat_loose_object().
176
 */
177
static int stat_loose_object(struct odb_source_loose *loose,
178
           const struct object_id *oid,
179
           struct stat *st, const char **path)
180
0
{
181
0
  static struct strbuf buf = STRBUF_INIT;
182
183
0
  *path = odb_loose_path(loose->source, &buf, oid);
184
0
  if (!lstat(*path, st))
185
0
    return 0;
186
187
0
  return -1;
188
0
}
189
190
/*
191
 * Like stat_loose_object(), but actually open the object and return the
192
 * descriptor. See the caveats on the "path" parameter above.
193
 */
194
static int open_loose_object(struct odb_source_loose *loose,
195
           const struct object_id *oid, const char **path)
196
0
{
197
0
  static struct strbuf buf = STRBUF_INIT;
198
0
  int fd;
199
200
0
  *path = odb_loose_path(loose->source, &buf, oid);
201
0
  fd = git_open(*path);
202
0
  if (fd >= 0)
203
0
    return fd;
204
205
0
  return -1;
206
0
}
207
208
static int quick_has_loose(struct odb_source_loose *loose,
209
         const struct object_id *oid)
210
0
{
211
0
  return !!oidtree_contains(odb_source_loose_cache(loose->source, oid), oid);
212
0
}
213
214
/*
215
 * Map and close the given loose object fd. The path argument is used for
216
 * error reporting.
217
 */
218
static void *map_fd(int fd, const char *path, unsigned long *size)
219
0
{
220
0
  void *map = NULL;
221
0
  struct stat st;
222
223
0
  if (!fstat(fd, &st)) {
224
0
    *size = xsize_t(st.st_size);
225
0
    if (!*size) {
226
      /* mmap() is forbidden on empty files */
227
0
      error(_("object file %s is empty"), path);
228
0
      close(fd);
229
0
      return NULL;
230
0
    }
231
0
    map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
232
0
  }
233
0
  close(fd);
234
0
  return map;
235
0
}
236
237
void *odb_source_loose_map_object(struct odb_source *source,
238
          const struct object_id *oid,
239
          unsigned long *size)
240
0
{
241
0
  const char *p;
242
0
  int fd = open_loose_object(source->loose, oid, &p);
243
244
0
  if (fd < 0)
245
0
    return NULL;
246
0
  return map_fd(fd, p, size);
247
0
}
248
249
enum unpack_loose_header_result unpack_loose_header(git_zstream *stream,
250
                unsigned char *map,
251
                unsigned long mapsize,
252
                void *buffer,
253
                unsigned long bufsiz)
254
0
{
255
0
  int status;
256
257
  /* Get the data stream */
258
0
  memset(stream, 0, sizeof(*stream));
259
0
  stream->next_in = map;
260
0
  stream->avail_in = mapsize;
261
0
  stream->next_out = buffer;
262
0
  stream->avail_out = bufsiz;
263
264
0
  git_inflate_init(stream);
265
0
  obj_read_unlock();
266
0
  status = git_inflate(stream, 0);
267
0
  obj_read_lock();
268
0
  if (status != Z_OK && status != Z_STREAM_END)
269
0
    return ULHR_BAD;
270
271
  /*
272
   * Check if entire header is unpacked in the first iteration.
273
   */
274
0
  if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
275
0
    return ULHR_OK;
276
277
  /*
278
   * We have a header longer than MAX_HEADER_LEN.
279
   */
280
0
  return ULHR_TOO_LONG;
281
0
}
282
283
static void *unpack_loose_rest(git_zstream *stream,
284
             void *buffer, unsigned long size,
285
             const struct object_id *oid)
286
0
{
287
0
  size_t bytes = strlen(buffer) + 1, n;
288
0
  unsigned char *buf = xmallocz(size);
289
0
  int status = Z_OK;
290
291
0
  n = stream->total_out - bytes;
292
0
  if (n > size)
293
0
    n = size;
294
0
  memcpy(buf, (char *) buffer + bytes, n);
295
0
  bytes = n;
296
0
  if (bytes <= size) {
297
    /*
298
     * The above condition must be (bytes <= size), not
299
     * (bytes < size).  In other words, even though we
300
     * expect no more output and set avail_out to zero,
301
     * the input zlib stream may have bytes that express
302
     * "this concludes the stream", and we *do* want to
303
     * eat that input.
304
     *
305
     * Otherwise we would not be able to test that we
306
     * consumed all the input to reach the expected size;
307
     * we also want to check that zlib tells us that all
308
     * went well with status == Z_STREAM_END at the end.
309
     */
310
0
    stream->next_out = buf + bytes;
311
0
    stream->avail_out = size - bytes;
312
0
    while (status == Z_OK) {
313
0
      obj_read_unlock();
314
0
      status = git_inflate(stream, Z_FINISH);
315
0
      obj_read_lock();
316
0
    }
317
0
  }
318
319
0
  if (status != Z_STREAM_END) {
320
0
    error(_("corrupt loose object '%s'"), oid_to_hex(oid));
321
0
    FREE_AND_NULL(buf);
322
0
  } else if (stream->avail_in) {
323
0
    error(_("garbage at end of loose object '%s'"),
324
0
          oid_to_hex(oid));
325
0
    FREE_AND_NULL(buf);
326
0
  }
327
328
0
  return buf;
329
0
}
330
331
/*
332
 * We used to just use "sscanf()", but that's actually way
333
 * too permissive for what we want to check. So do an anal
334
 * object header parse by hand.
335
 */
336
int parse_loose_header(const char *hdr, struct object_info *oi)
337
0
{
338
0
  const char *type_buf = hdr;
339
0
  size_t size;
340
0
  int type, type_len = 0;
341
342
  /*
343
   * The type can be of any size but is followed by
344
   * a space.
345
   */
346
0
  for (;;) {
347
0
    char c = *hdr++;
348
0
    if (!c)
349
0
      return -1;
350
0
    if (c == ' ')
351
0
      break;
352
0
    type_len++;
353
0
  }
354
355
0
  type = type_from_string_gently(type_buf, type_len, 1);
356
0
  if (oi->typep)
357
0
    *oi->typep = type;
358
359
  /*
360
   * The length must follow immediately, and be in canonical
361
   * decimal format (ie "010" is not valid).
362
   */
363
0
  size = *hdr++ - '0';
364
0
  if (size > 9)
365
0
    return -1;
366
0
  if (size) {
367
0
    for (;;) {
368
0
      unsigned long c = *hdr - '0';
369
0
      if (c > 9)
370
0
        break;
371
0
      hdr++;
372
0
      size = st_add(st_mult(size, 10), c);
373
0
    }
374
0
  }
375
376
0
  if (oi->sizep)
377
0
    *oi->sizep = cast_size_t_to_ulong(size);
378
379
  /*
380
   * The length must be followed by a zero byte
381
   */
382
0
  if (*hdr)
383
0
    return -1;
384
385
  /*
386
   * The format is valid, but the type may still be bogus. The
387
   * Caller needs to check its oi->typep.
388
   */
389
0
  return 0;
390
0
}
391
392
int odb_source_loose_read_object_info(struct odb_source *source,
393
              const struct object_id *oid,
394
              struct object_info *oi, int flags)
395
0
{
396
0
  int status = 0;
397
0
  int fd;
398
0
  unsigned long mapsize;
399
0
  const char *path;
400
0
  void *map;
401
0
  git_zstream stream;
402
0
  char hdr[MAX_HEADER_LEN];
403
0
  unsigned long size_scratch;
404
0
  enum object_type type_scratch;
405
406
0
  if (oi->delta_base_oid)
407
0
    oidclr(oi->delta_base_oid, source->odb->repo->hash_algo);
408
409
  /*
410
   * If we don't care about type or size, then we don't
411
   * need to look inside the object at all. Note that we
412
   * do not optimize out the stat call, even if the
413
   * caller doesn't care about the disk-size, since our
414
   * return value implicitly indicates whether the
415
   * object even exists.
416
   */
417
0
  if (!oi->typep && !oi->sizep && !oi->contentp) {
418
0
    struct stat st;
419
0
    if (!oi->disk_sizep && (flags & OBJECT_INFO_QUICK))
420
0
      return quick_has_loose(source->loose, oid) ? 0 : -1;
421
0
    if (stat_loose_object(source->loose, oid, &st, &path) < 0)
422
0
      return -1;
423
0
    if (oi->disk_sizep)
424
0
      *oi->disk_sizep = st.st_size;
425
0
    return 0;
426
0
  }
427
428
0
  fd = open_loose_object(source->loose, oid, &path);
429
0
  if (fd < 0) {
430
0
    if (errno != ENOENT)
431
0
      error_errno(_("unable to open loose object %s"), oid_to_hex(oid));
432
0
    return -1;
433
0
  }
434
0
  map = map_fd(fd, path, &mapsize);
435
0
  if (!map)
436
0
    return -1;
437
438
0
  if (!oi->sizep)
439
0
    oi->sizep = &size_scratch;
440
0
  if (!oi->typep)
441
0
    oi->typep = &type_scratch;
442
443
0
  if (oi->disk_sizep)
444
0
    *oi->disk_sizep = mapsize;
445
446
0
  switch (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr))) {
447
0
  case ULHR_OK:
448
0
    if (parse_loose_header(hdr, oi) < 0)
449
0
      status = error(_("unable to parse %s header"), oid_to_hex(oid));
450
0
    else if (*oi->typep < 0)
451
0
      die(_("invalid object type"));
452
453
0
    if (!oi->contentp)
454
0
      break;
455
0
    *oi->contentp = unpack_loose_rest(&stream, hdr, *oi->sizep, oid);
456
0
    if (*oi->contentp)
457
0
      goto cleanup;
458
459
0
    status = -1;
460
0
    break;
461
0
  case ULHR_BAD:
462
0
    status = error(_("unable to unpack %s header"),
463
0
             oid_to_hex(oid));
464
0
    break;
465
0
  case ULHR_TOO_LONG:
466
0
    status = error(_("header for %s too long, exceeds %d bytes"),
467
0
             oid_to_hex(oid), MAX_HEADER_LEN);
468
0
    break;
469
0
  }
470
471
0
  if (status && (flags & OBJECT_INFO_DIE_IF_CORRUPT))
472
0
    die(_("loose object %s (stored in %s) is corrupt"),
473
0
        oid_to_hex(oid), path);
474
475
0
cleanup:
476
0
  git_inflate_end(&stream);
477
0
  munmap(map, mapsize);
478
0
  if (oi->sizep == &size_scratch)
479
0
    oi->sizep = NULL;
480
0
  if (oi->typep == &type_scratch)
481
0
    oi->typep = NULL;
482
0
  oi->whence = OI_LOOSE;
483
0
  return status;
484
0
}
485
486
static void hash_object_body(const struct git_hash_algo *algo, struct git_hash_ctx *c,
487
           const void *buf, unsigned long len,
488
           struct object_id *oid,
489
           char *hdr, int *hdrlen)
490
0
{
491
0
  algo->init_fn(c);
492
0
  git_hash_update(c, hdr, *hdrlen);
493
0
  git_hash_update(c, buf, len);
494
0
  git_hash_final_oid(oid, c);
495
0
}
496
497
static void write_object_file_prepare(const struct git_hash_algo *algo,
498
              const void *buf, unsigned long len,
499
              enum object_type type, struct object_id *oid,
500
              char *hdr, int *hdrlen)
501
0
{
502
0
  struct git_hash_ctx c;
503
504
  /* Generate the header */
505
0
  *hdrlen = format_object_header(hdr, *hdrlen, type, len);
506
507
  /* Sha1.. */
508
0
  hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen);
509
0
}
510
511
0
#define CHECK_COLLISION_DEST_VANISHED -2
512
513
static int check_collision(const char *source, const char *dest)
514
0
{
515
0
  char buf_source[4096], buf_dest[4096];
516
0
  int fd_source = -1, fd_dest = -1;
517
0
  int ret = 0;
518
519
0
  fd_source = open(source, O_RDONLY);
520
0
  if (fd_source < 0) {
521
0
    ret = error_errno(_("unable to open %s"), source);
522
0
    goto out;
523
0
  }
524
525
0
  fd_dest = open(dest, O_RDONLY);
526
0
  if (fd_dest < 0) {
527
0
    if (errno != ENOENT)
528
0
      ret = error_errno(_("unable to open %s"), dest);
529
0
    else
530
0
      ret = CHECK_COLLISION_DEST_VANISHED;
531
0
    goto out;
532
0
  }
533
534
0
  while (1) {
535
0
    ssize_t sz_a, sz_b;
536
537
0
    sz_a = read_in_full(fd_source, buf_source, sizeof(buf_source));
538
0
    if (sz_a < 0) {
539
0
      ret = error_errno(_("unable to read %s"), source);
540
0
      goto out;
541
0
    }
542
543
0
    sz_b = read_in_full(fd_dest, buf_dest, sizeof(buf_dest));
544
0
    if (sz_b < 0) {
545
0
      ret = error_errno(_("unable to read %s"), dest);
546
0
      goto out;
547
0
    }
548
549
0
    if (sz_a != sz_b || memcmp(buf_source, buf_dest, sz_a)) {
550
0
      ret = error(_("files '%s' and '%s' differ in contents"),
551
0
            source, dest);
552
0
      goto out;
553
0
    }
554
555
0
    if ((size_t) sz_a < sizeof(buf_source))
556
0
      break;
557
0
  }
558
559
0
out:
560
0
  if (fd_source > -1)
561
0
    close(fd_source);
562
0
  if (fd_dest > -1)
563
0
    close(fd_dest);
564
0
  return ret;
565
0
}
566
567
/*
568
 * Move the just written object into its final resting place.
569
 */
570
int finalize_object_file(struct repository *repo,
571
       const char *tmpfile, const char *filename)
572
0
{
573
0
  return finalize_object_file_flags(repo, tmpfile, filename, 0);
574
0
}
575
576
int finalize_object_file_flags(struct repository *repo,
577
             const char *tmpfile, const char *filename,
578
             enum finalize_object_file_flags flags)
579
0
{
580
0
  unsigned retries = 0;
581
0
  int ret;
582
583
0
retry:
584
0
  ret = 0;
585
586
0
  if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
587
0
    goto try_rename;
588
0
  else if (link(tmpfile, filename))
589
0
    ret = errno;
590
0
  else
591
0
    unlink_or_warn(tmpfile);
592
593
  /*
594
   * Coda hack - coda doesn't like cross-directory links,
595
   * so we fall back to a rename, which will mean that it
596
   * won't be able to check collisions, but that's not a
597
   * big deal.
598
   *
599
   * The same holds for FAT formatted media.
600
   *
601
   * When this succeeds, we just return.  We have nothing
602
   * left to unlink.
603
   */
604
0
  if (ret && ret != EEXIST) {
605
0
    struct stat st;
606
607
0
  try_rename:
608
0
    if (!stat(filename, &st))
609
0
      ret = EEXIST;
610
0
    else if (!rename(tmpfile, filename))
611
0
      goto out;
612
0
    else
613
0
      ret = errno;
614
0
  }
615
0
  if (ret) {
616
0
    if (ret != EEXIST) {
617
0
      int saved_errno = errno;
618
0
      unlink_or_warn(tmpfile);
619
0
      errno = saved_errno;
620
0
      return error_errno(_("unable to write file %s"), filename);
621
0
    }
622
0
    if (!(flags & FOF_SKIP_COLLISION_CHECK)) {
623
0
      ret = check_collision(tmpfile, filename);
624
0
      if (ret == CHECK_COLLISION_DEST_VANISHED) {
625
0
        if (retries++ > 5)
626
0
          return error(_("unable to write repeatedly vanishing file %s"),
627
0
                 filename);
628
0
        goto retry;
629
0
      }
630
0
      else if (ret)
631
0
        return -1;
632
0
    }
633
0
    unlink_or_warn(tmpfile);
634
0
  }
635
636
0
out:
637
0
  if (adjust_shared_perm(repo, filename))
638
0
    return error(_("unable to set permission to '%s'"), filename);
639
0
  return 0;
640
0
}
641
642
void hash_object_file(const struct git_hash_algo *algo, const void *buf,
643
          unsigned long len, enum object_type type,
644
          struct object_id *oid)
645
0
{
646
0
  char hdr[MAX_HEADER_LEN];
647
0
  int hdrlen = sizeof(hdr);
648
649
0
  write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
650
0
}
651
652
struct transaction_packfile {
653
  char *pack_tmp_name;
654
  struct hashfile *f;
655
  off_t offset;
656
  struct pack_idx_option pack_idx_opts;
657
658
  struct pack_idx_entry **written;
659
  uint32_t alloc_written;
660
  uint32_t nr_written;
661
};
662
663
struct odb_transaction {
664
  struct object_database *odb;
665
666
  struct tmp_objdir *objdir;
667
  struct transaction_packfile packfile;
668
};
669
670
static void prepare_loose_object_transaction(struct odb_transaction *transaction)
671
0
{
672
  /*
673
   * We lazily create the temporary object directory
674
   * the first time an object might be added, since
675
   * callers may not know whether any objects will be
676
   * added at the time they call object_file_transaction_begin.
677
   */
678
0
  if (!transaction || transaction->objdir)
679
0
    return;
680
681
0
  transaction->objdir = tmp_objdir_create(transaction->odb->repo, "bulk-fsync");
682
0
  if (transaction->objdir)
683
0
    tmp_objdir_replace_primary_odb(transaction->objdir, 0);
684
0
}
685
686
static void fsync_loose_object_transaction(struct odb_transaction *transaction,
687
             int fd, const char *filename)
688
0
{
689
  /*
690
   * If we have an active ODB transaction, we issue a call that
691
   * cleans the filesystem page cache but avoids a hardware flush
692
   * command. Later on we will issue a single hardware flush
693
   * before renaming the objects to their final names as part of
694
   * flush_batch_fsync.
695
   */
696
0
  if (!transaction || !transaction->objdir ||
697
0
      git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
698
0
    if (errno == ENOSYS)
699
0
      warning(_("core.fsyncMethod = batch is unsupported on this platform"));
700
0
    fsync_or_die(fd, filename);
701
0
  }
702
0
}
703
704
/*
705
 * Cleanup after batch-mode fsync_object_files.
706
 */
707
static void flush_loose_object_transaction(struct odb_transaction *transaction)
708
0
{
709
0
  struct strbuf temp_path = STRBUF_INIT;
710
0
  struct tempfile *temp;
711
712
0
  if (!transaction->objdir)
713
0
    return;
714
715
  /*
716
   * Issue a full hardware flush against a temporary file to ensure
717
   * that all objects are durable before any renames occur. The code in
718
   * fsync_loose_object_transaction has already issued a writeout
719
   * request, but it has not flushed any writeback cache in the storage
720
   * hardware or any filesystem logs. This fsync call acts as a barrier
721
   * to ensure that the data in each new object file is durable before
722
   * the final name is visible.
723
   */
724
0
  strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX",
725
0
        repo_get_object_directory(transaction->odb->repo));
726
0
  temp = xmks_tempfile(temp_path.buf);
727
0
  fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
728
0
  delete_tempfile(&temp);
729
0
  strbuf_release(&temp_path);
730
731
  /*
732
   * Make the object files visible in the primary ODB after their data is
733
   * fully durable.
734
   */
735
0
  tmp_objdir_migrate(transaction->objdir);
736
0
  transaction->objdir = NULL;
737
0
}
738
739
/* Finalize a file on disk, and close it. */
740
static void close_loose_object(struct odb_source *source,
741
             int fd, const char *filename)
742
0
{
743
0
  if (source->will_destroy)
744
0
    goto out;
745
746
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
747
0
    fsync_loose_object_transaction(source->odb->transaction, fd, filename);
748
0
  else if (fsync_object_files > 0)
749
0
    fsync_or_die(fd, filename);
750
0
  else
751
0
    fsync_component_or_die(FSYNC_COMPONENT_LOOSE_OBJECT, fd,
752
0
               filename);
753
754
0
out:
755
0
  if (close(fd) != 0)
756
0
    die_errno(_("error when closing loose object file"));
757
0
}
758
759
/* Size of directory component, including the ending '/' */
760
static inline int directory_size(const char *filename)
761
0
{
762
0
  const char *s = strrchr(filename, '/');
763
0
  if (!s)
764
0
    return 0;
765
0
  return s - filename + 1;
766
0
}
767
768
/*
769
 * This creates a temporary file in the same directory as the final
770
 * 'filename'
771
 *
772
 * We want to avoid cross-directory filename renames, because those
773
 * can have problems on various filesystems (FAT, NFS, Coda).
774
 */
775
static int create_tmpfile(struct repository *repo,
776
        struct strbuf *tmp, const char *filename)
777
0
{
778
0
  int fd, dirlen = directory_size(filename);
779
780
0
  strbuf_reset(tmp);
781
0
  strbuf_add(tmp, filename, dirlen);
782
0
  strbuf_addstr(tmp, "tmp_obj_XXXXXX");
783
0
  fd = git_mkstemp_mode(tmp->buf, 0444);
784
0
  if (fd < 0 && dirlen && errno == ENOENT) {
785
    /*
786
     * Make sure the directory exists; note that the contents
787
     * of the buffer are undefined after mkstemp returns an
788
     * error, so we have to rewrite the whole buffer from
789
     * scratch.
790
     */
791
0
    strbuf_reset(tmp);
792
0
    strbuf_add(tmp, filename, dirlen - 1);
793
0
    if (mkdir(tmp->buf, 0777) && errno != EEXIST)
794
0
      return -1;
795
0
    if (adjust_shared_perm(repo, tmp->buf))
796
0
      return -1;
797
798
    /* Try again */
799
0
    strbuf_addstr(tmp, "/tmp_obj_XXXXXX");
800
0
    fd = git_mkstemp_mode(tmp->buf, 0444);
801
0
  }
802
0
  return fd;
803
0
}
804
805
/**
806
 * Common steps for loose object writers to start writing loose
807
 * objects:
808
 *
809
 * - Create tmpfile for the loose object.
810
 * - Setup zlib stream for compression.
811
 * - Start to feed header to zlib stream.
812
 *
813
 * Returns a "fd", which should later be provided to
814
 * end_loose_object_common().
815
 */
816
static int start_loose_object_common(struct odb_source *source,
817
             struct strbuf *tmp_file,
818
             const char *filename, unsigned flags,
819
             git_zstream *stream,
820
             unsigned char *buf, size_t buflen,
821
             struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
822
             char *hdr, int hdrlen)
823
0
{
824
0
  const struct git_hash_algo *algo = source->odb->repo->hash_algo;
825
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
826
0
  int fd;
827
828
0
  fd = create_tmpfile(source->odb->repo, tmp_file, filename);
829
0
  if (fd < 0) {
830
0
    if (flags & WRITE_OBJECT_SILENT)
831
0
      return -1;
832
0
    else if (errno == EACCES)
833
0
      return error(_("insufficient permission for adding "
834
0
               "an object to repository database %s"),
835
0
             source->path);
836
0
    else
837
0
      return error_errno(
838
0
        _("unable to create temporary file"));
839
0
  }
840
841
  /*  Setup zlib stream for compression */
842
0
  git_deflate_init(stream, zlib_compression_level);
843
0
  stream->next_out = buf;
844
0
  stream->avail_out = buflen;
845
0
  algo->init_fn(c);
846
0
  if (compat && compat_c)
847
0
    compat->init_fn(compat_c);
848
849
  /*  Start to feed header to zlib stream */
850
0
  stream->next_in = (unsigned char *)hdr;
851
0
  stream->avail_in = hdrlen;
852
0
  while (git_deflate(stream, 0) == Z_OK)
853
0
    ; /* nothing */
854
0
  git_hash_update(c, hdr, hdrlen);
855
0
  if (compat && compat_c)
856
0
    git_hash_update(compat_c, hdr, hdrlen);
857
858
0
  return fd;
859
0
}
860
861
/**
862
 * Common steps for the inner git_deflate() loop for writing loose
863
 * objects. Returns what git_deflate() returns.
864
 */
865
static int write_loose_object_common(struct odb_source *source,
866
             struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
867
             git_zstream *stream, const int flush,
868
             unsigned char *in0, const int fd,
869
             unsigned char *compressed,
870
             const size_t compressed_len)
871
0
{
872
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
873
0
  int ret;
874
875
0
  ret = git_deflate(stream, flush ? Z_FINISH : 0);
876
0
  git_hash_update(c, in0, stream->next_in - in0);
877
0
  if (compat && compat_c)
878
0
    git_hash_update(compat_c, in0, stream->next_in - in0);
879
0
  if (write_in_full(fd, compressed, stream->next_out - compressed) < 0)
880
0
    die_errno(_("unable to write loose object file"));
881
0
  stream->next_out = compressed;
882
0
  stream->avail_out = compressed_len;
883
884
0
  return ret;
885
0
}
886
887
/**
888
 * Common steps for loose object writers to end writing loose objects:
889
 *
890
 * - End the compression of zlib stream.
891
 * - Get the calculated oid to "oid".
892
 */
893
static int end_loose_object_common(struct odb_source *source,
894
           struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
895
           git_zstream *stream, struct object_id *oid,
896
           struct object_id *compat_oid)
897
0
{
898
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
899
0
  int ret;
900
901
0
  ret = git_deflate_end_gently(stream);
902
0
  if (ret != Z_OK)
903
0
    return ret;
904
0
  git_hash_final_oid(oid, c);
905
0
  if (compat && compat_c)
906
0
    git_hash_final_oid(compat_oid, compat_c);
907
908
0
  return Z_OK;
909
0
}
910
911
static int write_loose_object(struct odb_source *source,
912
            const struct object_id *oid, char *hdr,
913
            int hdrlen, const void *buf, unsigned long len,
914
            time_t mtime, unsigned flags)
915
0
{
916
0
  int fd, ret;
917
0
  unsigned char compressed[4096];
918
0
  git_zstream stream;
919
0
  struct git_hash_ctx c;
920
0
  struct object_id parano_oid;
921
0
  static struct strbuf tmp_file = STRBUF_INIT;
922
0
  static struct strbuf filename = STRBUF_INIT;
923
924
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
925
0
    prepare_loose_object_transaction(source->odb->transaction);
926
927
0
  odb_loose_path(source, &filename, oid);
928
929
0
  fd = start_loose_object_common(source, &tmp_file, filename.buf, flags,
930
0
               &stream, compressed, sizeof(compressed),
931
0
               &c, NULL, hdr, hdrlen);
932
0
  if (fd < 0)
933
0
    return -1;
934
935
  /* Then the data itself.. */
936
0
  stream.next_in = (void *)buf;
937
0
  stream.avail_in = len;
938
0
  do {
939
0
    unsigned char *in0 = stream.next_in;
940
941
0
    ret = write_loose_object_common(source, &c, NULL, &stream, 1, in0, fd,
942
0
            compressed, sizeof(compressed));
943
0
  } while (ret == Z_OK);
944
945
0
  if (ret != Z_STREAM_END)
946
0
    die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
947
0
        ret);
948
0
  ret = end_loose_object_common(source, &c, NULL, &stream, &parano_oid, NULL);
949
0
  if (ret != Z_OK)
950
0
    die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
951
0
        ret);
952
0
  if (!oideq(oid, &parano_oid))
953
0
    die(_("confused by unstable object source data for %s"),
954
0
        oid_to_hex(oid));
955
956
0
  close_loose_object(source, fd, tmp_file.buf);
957
958
0
  if (mtime) {
959
0
    struct utimbuf utb;
960
0
    utb.actime = mtime;
961
0
    utb.modtime = mtime;
962
0
    if (utime(tmp_file.buf, &utb) < 0 &&
963
0
        !(flags & WRITE_OBJECT_SILENT))
964
0
      warning_errno(_("failed utime() on %s"), tmp_file.buf);
965
0
  }
966
967
0
  return finalize_object_file_flags(source->odb->repo, tmp_file.buf, filename.buf,
968
0
            FOF_SKIP_COLLISION_CHECK);
969
0
}
970
971
int odb_source_loose_freshen_object(struct odb_source *source,
972
            const struct object_id *oid)
973
0
{
974
0
  return !!check_and_freshen_source(source, oid, 1);
975
0
}
976
977
int odb_source_loose_write_stream(struct odb_source *source,
978
          struct odb_write_stream *in_stream, size_t len,
979
          struct object_id *oid)
980
0
{
981
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
982
0
  struct object_id compat_oid;
983
0
  int fd, ret, err = 0, flush = 0;
984
0
  unsigned char compressed[4096];
985
0
  git_zstream stream;
986
0
  struct git_hash_ctx c, compat_c;
987
0
  struct strbuf tmp_file = STRBUF_INIT;
988
0
  struct strbuf filename = STRBUF_INIT;
989
0
  int dirlen;
990
0
  char hdr[MAX_HEADER_LEN];
991
0
  int hdrlen;
992
993
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
994
0
    prepare_loose_object_transaction(source->odb->transaction);
995
996
  /* Since oid is not determined, save tmp file to odb path. */
997
0
  strbuf_addf(&filename, "%s/", source->path);
998
0
  hdrlen = format_object_header(hdr, sizeof(hdr), OBJ_BLOB, len);
999
1000
  /*
1001
   * Common steps for write_loose_object and stream_loose_object to
1002
   * start writing loose objects:
1003
   *
1004
   *  - Create tmpfile for the loose object.
1005
   *  - Setup zlib stream for compression.
1006
   *  - Start to feed header to zlib stream.
1007
   */
1008
0
  fd = start_loose_object_common(source, &tmp_file, filename.buf, 0,
1009
0
               &stream, compressed, sizeof(compressed),
1010
0
               &c, &compat_c, hdr, hdrlen);
1011
0
  if (fd < 0) {
1012
0
    err = -1;
1013
0
    goto cleanup;
1014
0
  }
1015
1016
  /* Then the data itself.. */
1017
0
  do {
1018
0
    unsigned char *in0 = stream.next_in;
1019
1020
0
    if (!stream.avail_in && !in_stream->is_finished) {
1021
0
      const void *in = in_stream->read(in_stream, &stream.avail_in);
1022
0
      stream.next_in = (void *)in;
1023
0
      in0 = (unsigned char *)in;
1024
      /* All data has been read. */
1025
0
      if (in_stream->is_finished)
1026
0
        flush = 1;
1027
0
    }
1028
0
    ret = write_loose_object_common(source, &c, &compat_c, &stream, flush, in0, fd,
1029
0
            compressed, sizeof(compressed));
1030
    /*
1031
     * Unlike write_loose_object(), we do not have the entire
1032
     * buffer. If we get Z_BUF_ERROR due to too few input bytes,
1033
     * then we'll replenish them in the next input_stream->read()
1034
     * call when we loop.
1035
     */
1036
0
  } while (ret == Z_OK || ret == Z_BUF_ERROR);
1037
1038
0
  if (stream.total_in != len + hdrlen)
1039
0
    die(_("write stream object %ld != %"PRIuMAX), stream.total_in,
1040
0
        (uintmax_t)len + hdrlen);
1041
1042
  /*
1043
   * Common steps for write_loose_object and stream_loose_object to
1044
   * end writing loose object:
1045
   *
1046
   *  - End the compression of zlib stream.
1047
   *  - Get the calculated oid.
1048
   */
1049
0
  if (ret != Z_STREAM_END)
1050
0
    die(_("unable to stream deflate new object (%d)"), ret);
1051
0
  ret = end_loose_object_common(source, &c, &compat_c, &stream, oid, &compat_oid);
1052
0
  if (ret != Z_OK)
1053
0
    die(_("deflateEnd on stream object failed (%d)"), ret);
1054
0
  close_loose_object(source, fd, tmp_file.buf);
1055
1056
0
  if (odb_freshen_object(source->odb, oid)) {
1057
0
    unlink_or_warn(tmp_file.buf);
1058
0
    goto cleanup;
1059
0
  }
1060
0
  odb_loose_path(source, &filename, oid);
1061
1062
  /* We finally know the object path, and create the missing dir. */
1063
0
  dirlen = directory_size(filename.buf);
1064
0
  if (dirlen) {
1065
0
    struct strbuf dir = STRBUF_INIT;
1066
0
    strbuf_add(&dir, filename.buf, dirlen);
1067
1068
0
    if (safe_create_dir_in_gitdir(source->odb->repo, dir.buf) &&
1069
0
        errno != EEXIST) {
1070
0
      err = error_errno(_("unable to create directory %s"), dir.buf);
1071
0
      strbuf_release(&dir);
1072
0
      goto cleanup;
1073
0
    }
1074
0
    strbuf_release(&dir);
1075
0
  }
1076
1077
0
  err = finalize_object_file_flags(source->odb->repo, tmp_file.buf, filename.buf,
1078
0
           FOF_SKIP_COLLISION_CHECK);
1079
0
  if (!err && compat)
1080
0
    err = repo_add_loose_object_map(source, oid, &compat_oid);
1081
0
cleanup:
1082
0
  strbuf_release(&tmp_file);
1083
0
  strbuf_release(&filename);
1084
0
  return err;
1085
0
}
1086
1087
int odb_source_loose_write_object(struct odb_source *source,
1088
          const void *buf, unsigned long len,
1089
          enum object_type type, struct object_id *oid,
1090
          struct object_id *compat_oid_in, unsigned flags)
1091
0
{
1092
0
  const struct git_hash_algo *algo = source->odb->repo->hash_algo;
1093
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
1094
0
  struct object_id compat_oid;
1095
0
  char hdr[MAX_HEADER_LEN];
1096
0
  int hdrlen = sizeof(hdr);
1097
1098
  /* Generate compat_oid */
1099
0
  if (compat) {
1100
0
    if (compat_oid_in)
1101
0
      oidcpy(&compat_oid, compat_oid_in);
1102
0
    else if (type == OBJ_BLOB)
1103
0
      hash_object_file(compat, buf, len, type, &compat_oid);
1104
0
    else {
1105
0
      struct strbuf converted = STRBUF_INIT;
1106
0
      convert_object_file(source->odb->repo, &converted, algo, compat,
1107
0
              buf, len, type, 0);
1108
0
      hash_object_file(compat, converted.buf, converted.len,
1109
0
           type, &compat_oid);
1110
0
      strbuf_release(&converted);
1111
0
    }
1112
0
  }
1113
1114
  /* Normally if we have it in the pack then we do not bother writing
1115
   * it out into .git/objects/??/?{38} file.
1116
   */
1117
0
  write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
1118
0
  if (odb_freshen_object(source->odb, oid))
1119
0
    return 0;
1120
0
  if (write_loose_object(source, oid, hdr, hdrlen, buf, len, 0, flags))
1121
0
    return -1;
1122
0
  if (compat)
1123
0
    return repo_add_loose_object_map(source, oid, &compat_oid);
1124
0
  return 0;
1125
0
}
1126
1127
int force_object_loose(struct odb_source *source,
1128
           const struct object_id *oid, time_t mtime)
1129
0
{
1130
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
1131
0
  void *buf;
1132
0
  unsigned long len;
1133
0
  struct object_info oi = OBJECT_INFO_INIT;
1134
0
  struct object_id compat_oid;
1135
0
  enum object_type type;
1136
0
  char hdr[MAX_HEADER_LEN];
1137
0
  int hdrlen;
1138
0
  int ret;
1139
1140
0
  for (struct odb_source *s = source->odb->sources; s; s = s->next)
1141
0
    if (odb_source_loose_has_object(s, oid))
1142
0
      return 0;
1143
1144
0
  oi.typep = &type;
1145
0
  oi.sizep = &len;
1146
0
  oi.contentp = &buf;
1147
0
  if (odb_read_object_info_extended(source->odb, oid, &oi, 0))
1148
0
    return error(_("cannot read object for %s"), oid_to_hex(oid));
1149
0
  if (compat) {
1150
0
    if (repo_oid_to_algop(source->odb->repo, oid, compat, &compat_oid))
1151
0
      return error(_("cannot map object %s to %s"),
1152
0
             oid_to_hex(oid), compat->name);
1153
0
  }
1154
0
  hdrlen = format_object_header(hdr, sizeof(hdr), type, len);
1155
0
  ret = write_loose_object(source, oid, hdr, hdrlen, buf, len, mtime, 0);
1156
0
  if (!ret && compat)
1157
0
    ret = repo_add_loose_object_map(source, oid, &compat_oid);
1158
0
  free(buf);
1159
1160
0
  return ret;
1161
0
}
1162
1163
/*
1164
 * We can't use the normal fsck_error_function() for index_mem(),
1165
 * because we don't yet have a valid oid for it to report. Instead,
1166
 * report the minimal fsck error here, and rely on the caller to
1167
 * give more context.
1168
 */
1169
static int hash_format_check_report(struct fsck_options *opts UNUSED,
1170
            void *fsck_report UNUSED,
1171
            enum fsck_msg_type msg_type UNUSED,
1172
            enum fsck_msg_id msg_id UNUSED,
1173
            const char *message)
1174
0
{
1175
0
  error(_("object fails fsck: %s"), message);
1176
0
  return 1;
1177
0
}
1178
1179
static int index_mem(struct index_state *istate,
1180
         struct object_id *oid,
1181
         const void *buf, size_t size,
1182
         enum object_type type,
1183
         const char *path, unsigned flags)
1184
0
{
1185
0
  struct strbuf nbuf = STRBUF_INIT;
1186
0
  int ret = 0;
1187
0
  int write_object = flags & INDEX_WRITE_OBJECT;
1188
1189
0
  if (!type)
1190
0
    type = OBJ_BLOB;
1191
1192
  /*
1193
   * Convert blobs to git internal format
1194
   */
1195
0
  if ((type == OBJ_BLOB) && path) {
1196
0
    if (convert_to_git(istate, path, buf, size, &nbuf,
1197
0
           get_conv_flags(flags))) {
1198
0
      buf = nbuf.buf;
1199
0
      size = nbuf.len;
1200
0
    }
1201
0
  }
1202
0
  if (flags & INDEX_FORMAT_CHECK) {
1203
0
    struct fsck_options opts = FSCK_OPTIONS_DEFAULT;
1204
1205
0
    opts.strict = 1;
1206
0
    opts.error_func = hash_format_check_report;
1207
0
    if (fsck_buffer(null_oid(istate->repo->hash_algo), type, buf, size, &opts))
1208
0
      die(_("refusing to create malformed object"));
1209
0
    fsck_finish(&opts);
1210
0
  }
1211
1212
0
  if (write_object)
1213
0
    ret = odb_write_object(istate->repo->objects, buf, size, type, oid);
1214
0
  else
1215
0
    hash_object_file(istate->repo->hash_algo, buf, size, type, oid);
1216
1217
0
  strbuf_release(&nbuf);
1218
0
  return ret;
1219
0
}
1220
1221
static int index_stream_convert_blob(struct index_state *istate,
1222
             struct object_id *oid,
1223
             int fd,
1224
             const char *path,
1225
             unsigned flags)
1226
0
{
1227
0
  int ret = 0;
1228
0
  const int write_object = flags & INDEX_WRITE_OBJECT;
1229
0
  struct strbuf sbuf = STRBUF_INIT;
1230
1231
0
  assert(path);
1232
0
  ASSERT(would_convert_to_git_filter_fd(istate, path));
1233
1234
0
  convert_to_git_filter_fd(istate, path, fd, &sbuf,
1235
0
         get_conv_flags(flags));
1236
1237
0
  if (write_object)
1238
0
    ret = odb_write_object(istate->repo->objects, sbuf.buf, sbuf.len, OBJ_BLOB,
1239
0
               oid);
1240
0
  else
1241
0
    hash_object_file(istate->repo->hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB,
1242
0
         oid);
1243
0
  strbuf_release(&sbuf);
1244
0
  return ret;
1245
0
}
1246
1247
static int index_pipe(struct index_state *istate, struct object_id *oid,
1248
          int fd, enum object_type type,
1249
          const char *path, unsigned flags)
1250
0
{
1251
0
  struct strbuf sbuf = STRBUF_INIT;
1252
0
  int ret;
1253
1254
0
  if (strbuf_read(&sbuf, fd, 4096) >= 0)
1255
0
    ret = index_mem(istate, oid, sbuf.buf, sbuf.len, type, path, flags);
1256
0
  else
1257
0
    ret = -1;
1258
0
  strbuf_release(&sbuf);
1259
0
  return ret;
1260
0
}
1261
1262
0
#define SMALL_FILE_SIZE (32*1024)
1263
1264
static int index_core(struct index_state *istate,
1265
          struct object_id *oid, int fd, size_t size,
1266
          enum object_type type, const char *path,
1267
          unsigned flags)
1268
0
{
1269
0
  int ret;
1270
1271
0
  if (!size) {
1272
0
    ret = index_mem(istate, oid, "", size, type, path, flags);
1273
0
  } else if (size <= SMALL_FILE_SIZE) {
1274
0
    char *buf = xmalloc(size);
1275
0
    ssize_t read_result = read_in_full(fd, buf, size);
1276
0
    if (read_result < 0)
1277
0
      ret = error_errno(_("read error while indexing %s"),
1278
0
            path ? path : "<unknown>");
1279
0
    else if ((size_t) read_result != size)
1280
0
      ret = error(_("short read while indexing %s"),
1281
0
            path ? path : "<unknown>");
1282
0
    else
1283
0
      ret = index_mem(istate, oid, buf, size, type, path, flags);
1284
0
    free(buf);
1285
0
  } else {
1286
0
    void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1287
0
    ret = index_mem(istate, oid, buf, size, type, path, flags);
1288
0
    munmap(buf, size);
1289
0
  }
1290
0
  return ret;
1291
0
}
1292
1293
static int already_written(struct odb_transaction *transaction,
1294
         struct object_id *oid)
1295
0
{
1296
  /* The object may already exist in the repository */
1297
0
  if (odb_has_object(transaction->odb, oid,
1298
0
         HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
1299
0
    return 1;
1300
1301
  /* Might want to keep the list sorted */
1302
0
  for (uint32_t i = 0; i < transaction->packfile.nr_written; i++)
1303
0
    if (oideq(&transaction->packfile.written[i]->oid, oid))
1304
0
      return 1;
1305
1306
  /* This is a new object we need to keep */
1307
0
  return 0;
1308
0
}
1309
1310
/* Lazily create backing packfile for the state */
1311
static void prepare_packfile_transaction(struct odb_transaction *transaction,
1312
           unsigned flags)
1313
0
{
1314
0
  struct transaction_packfile *state = &transaction->packfile;
1315
0
  if (!(flags & INDEX_WRITE_OBJECT) || state->f)
1316
0
    return;
1317
1318
0
  state->f = create_tmp_packfile(transaction->odb->repo,
1319
0
               &state->pack_tmp_name);
1320
0
  reset_pack_idx_option(&state->pack_idx_opts);
1321
1322
  /* Pretend we are going to write only one object */
1323
0
  state->offset = write_pack_header(state->f, 1);
1324
0
  if (!state->offset)
1325
0
    die_errno("unable to write pack header");
1326
0
}
1327
1328
/*
1329
 * Read the contents from fd for size bytes, streaming it to the
1330
 * packfile in state while updating the hash in ctx. Signal a failure
1331
 * by returning a negative value when the resulting pack would exceed
1332
 * the pack size limit and this is not the first object in the pack,
1333
 * so that the caller can discard what we wrote from the current pack
1334
 * by truncating it and opening a new one. The caller will then call
1335
 * us again after rewinding the input fd.
1336
 *
1337
 * The already_hashed_to pointer is kept untouched by the caller to
1338
 * make sure we do not hash the same byte when we are called
1339
 * again. This way, the caller does not have to checkpoint its hash
1340
 * status before calling us just in case we ask it to call us again
1341
 * with a new pack.
1342
 */
1343
static int stream_blob_to_pack(struct transaction_packfile *state,
1344
             struct git_hash_ctx *ctx, off_t *already_hashed_to,
1345
             int fd, size_t size, const char *path,
1346
             unsigned flags)
1347
0
{
1348
0
  git_zstream s;
1349
0
  unsigned char ibuf[16384];
1350
0
  unsigned char obuf[16384];
1351
0
  unsigned hdrlen;
1352
0
  int status = Z_OK;
1353
0
  int write_object = (flags & INDEX_WRITE_OBJECT);
1354
0
  off_t offset = 0;
1355
1356
0
  git_deflate_init(&s, pack_compression_level);
1357
1358
0
  hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), OBJ_BLOB, size);
1359
0
  s.next_out = obuf + hdrlen;
1360
0
  s.avail_out = sizeof(obuf) - hdrlen;
1361
1362
0
  while (status != Z_STREAM_END) {
1363
0
    if (size && !s.avail_in) {
1364
0
      size_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
1365
0
      ssize_t read_result = read_in_full(fd, ibuf, rsize);
1366
0
      if (read_result < 0)
1367
0
        die_errno("failed to read from '%s'", path);
1368
0
      if ((size_t)read_result != rsize)
1369
0
        die("failed to read %u bytes from '%s'",
1370
0
            (unsigned)rsize, path);
1371
0
      offset += rsize;
1372
0
      if (*already_hashed_to < offset) {
1373
0
        size_t hsize = offset - *already_hashed_to;
1374
0
        if (rsize < hsize)
1375
0
          hsize = rsize;
1376
0
        if (hsize)
1377
0
          git_hash_update(ctx, ibuf, hsize);
1378
0
        *already_hashed_to = offset;
1379
0
      }
1380
0
      s.next_in = ibuf;
1381
0
      s.avail_in = rsize;
1382
0
      size -= rsize;
1383
0
    }
1384
1385
0
    status = git_deflate(&s, size ? 0 : Z_FINISH);
1386
1387
0
    if (!s.avail_out || status == Z_STREAM_END) {
1388
0
      if (write_object) {
1389
0
        size_t written = s.next_out - obuf;
1390
1391
        /* would we bust the size limit? */
1392
0
        if (state->nr_written &&
1393
0
            pack_size_limit_cfg &&
1394
0
            pack_size_limit_cfg < state->offset + written) {
1395
0
          git_deflate_abort(&s);
1396
0
          return -1;
1397
0
        }
1398
1399
0
        hashwrite(state->f, obuf, written);
1400
0
        state->offset += written;
1401
0
      }
1402
0
      s.next_out = obuf;
1403
0
      s.avail_out = sizeof(obuf);
1404
0
    }
1405
1406
0
    switch (status) {
1407
0
    case Z_OK:
1408
0
    case Z_BUF_ERROR:
1409
0
    case Z_STREAM_END:
1410
0
      continue;
1411
0
    default:
1412
0
      die("unexpected deflate failure: %d", status);
1413
0
    }
1414
0
  }
1415
0
  git_deflate_end(&s);
1416
0
  return 0;
1417
0
}
1418
1419
static void flush_packfile_transaction(struct odb_transaction *transaction)
1420
0
{
1421
0
  struct transaction_packfile *state = &transaction->packfile;
1422
0
  struct repository *repo = transaction->odb->repo;
1423
0
  unsigned char hash[GIT_MAX_RAWSZ];
1424
0
  struct strbuf packname = STRBUF_INIT;
1425
0
  char *idx_tmp_name = NULL;
1426
1427
0
  if (!state->f)
1428
0
    return;
1429
1430
0
  if (state->nr_written == 0) {
1431
0
    close(state->f->fd);
1432
0
    free_hashfile(state->f);
1433
0
    unlink(state->pack_tmp_name);
1434
0
    goto clear_exit;
1435
0
  } else if (state->nr_written == 1) {
1436
0
    finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK,
1437
0
          CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
1438
0
  } else {
1439
0
    int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
1440
0
    fixup_pack_header_footer(repo->hash_algo, fd, hash, state->pack_tmp_name,
1441
0
           state->nr_written, hash,
1442
0
           state->offset);
1443
0
    close(fd);
1444
0
  }
1445
1446
0
  strbuf_addf(&packname, "%s/pack/pack-%s.",
1447
0
        repo_get_object_directory(transaction->odb->repo),
1448
0
        hash_to_hex_algop(hash, repo->hash_algo));
1449
1450
0
  stage_tmp_packfiles(repo, &packname, state->pack_tmp_name,
1451
0
          state->written, state->nr_written, NULL,
1452
0
          &state->pack_idx_opts, hash, &idx_tmp_name);
1453
0
  rename_tmp_packfile_idx(repo, &packname, &idx_tmp_name);
1454
1455
0
  for (uint32_t i = 0; i < state->nr_written; i++)
1456
0
    free(state->written[i]);
1457
1458
0
clear_exit:
1459
0
  free(idx_tmp_name);
1460
0
  free(state->pack_tmp_name);
1461
0
  free(state->written);
1462
0
  memset(state, 0, sizeof(*state));
1463
1464
0
  strbuf_release(&packname);
1465
  /* Make objects we just wrote available to ourselves */
1466
0
  odb_reprepare(repo->objects);
1467
0
}
1468
1469
/*
1470
 * This writes the specified object to a packfile. Objects written here
1471
 * during the same transaction are written to the same packfile. The
1472
 * packfile is not flushed until the transaction is flushed. The caller
1473
 * is expected to ensure a valid transaction is setup for objects to be
1474
 * recorded to.
1475
 *
1476
 * This also bypasses the usual "convert-to-git" dance, and that is on
1477
 * purpose. We could write a streaming version of the converting
1478
 * functions and insert that before feeding the data to fast-import
1479
 * (or equivalent in-core API described above). However, that is
1480
 * somewhat complicated, as we do not know the size of the filter
1481
 * result, which we need to know beforehand when writing a git object.
1482
 * Since the primary motivation for trying to stream from the working
1483
 * tree file and to avoid mmaping it in core is to deal with large
1484
 * binary blobs, they generally do not want to get any conversion, and
1485
 * callers should avoid this code path when filters are requested.
1486
 */
1487
static int index_blob_packfile_transaction(struct odb_transaction *transaction,
1488
             struct object_id *result_oid, int fd,
1489
             size_t size, const char *path,
1490
             unsigned flags)
1491
0
{
1492
0
  struct transaction_packfile *state = &transaction->packfile;
1493
0
  off_t seekback, already_hashed_to;
1494
0
  struct git_hash_ctx ctx;
1495
0
  unsigned char obuf[16384];
1496
0
  unsigned header_len;
1497
0
  struct hashfile_checkpoint checkpoint;
1498
0
  struct pack_idx_entry *idx = NULL;
1499
1500
0
  seekback = lseek(fd, 0, SEEK_CUR);
1501
0
  if (seekback == (off_t)-1)
1502
0
    return error("cannot find the current offset");
1503
1504
0
  header_len = format_object_header((char *)obuf, sizeof(obuf),
1505
0
            OBJ_BLOB, size);
1506
0
  transaction->odb->repo->hash_algo->init_fn(&ctx);
1507
0
  git_hash_update(&ctx, obuf, header_len);
1508
1509
  /* Note: idx is non-NULL when we are writing */
1510
0
  if ((flags & INDEX_WRITE_OBJECT) != 0) {
1511
0
    CALLOC_ARRAY(idx, 1);
1512
1513
0
    prepare_packfile_transaction(transaction, flags);
1514
0
    hashfile_checkpoint_init(state->f, &checkpoint);
1515
0
  }
1516
1517
0
  already_hashed_to = 0;
1518
1519
0
  while (1) {
1520
0
    prepare_packfile_transaction(transaction, flags);
1521
0
    if (idx) {
1522
0
      hashfile_checkpoint(state->f, &checkpoint);
1523
0
      idx->offset = state->offset;
1524
0
      crc32_begin(state->f);
1525
0
    }
1526
0
    if (!stream_blob_to_pack(state, &ctx, &already_hashed_to,
1527
0
           fd, size, path, flags))
1528
0
      break;
1529
    /*
1530
     * Writing this object to the current pack will make
1531
     * it too big; we need to truncate it, start a new
1532
     * pack, and write into it.
1533
     */
1534
0
    if (!idx)
1535
0
      BUG("should not happen");
1536
0
    hashfile_truncate(state->f, &checkpoint);
1537
0
    state->offset = checkpoint.offset;
1538
0
    flush_packfile_transaction(transaction);
1539
0
    if (lseek(fd, seekback, SEEK_SET) == (off_t)-1)
1540
0
      return error("cannot seek back");
1541
0
  }
1542
0
  git_hash_final_oid(result_oid, &ctx);
1543
0
  if (!idx)
1544
0
    return 0;
1545
1546
0
  idx->crc32 = crc32_end(state->f);
1547
0
  if (already_written(transaction, result_oid)) {
1548
0
    hashfile_truncate(state->f, &checkpoint);
1549
0
    state->offset = checkpoint.offset;
1550
0
    free(idx);
1551
0
  } else {
1552
0
    oidcpy(&idx->oid, result_oid);
1553
0
    ALLOC_GROW(state->written,
1554
0
         state->nr_written + 1,
1555
0
         state->alloc_written);
1556
0
    state->written[state->nr_written++] = idx;
1557
0
  }
1558
0
  return 0;
1559
0
}
1560
1561
int index_fd(struct index_state *istate, struct object_id *oid,
1562
       int fd, struct stat *st,
1563
       enum object_type type, const char *path, unsigned flags)
1564
0
{
1565
0
  int ret;
1566
1567
  /*
1568
   * Call xsize_t() only when needed to avoid potentially unnecessary
1569
   * die() for large files.
1570
   */
1571
0
  if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(istate, path)) {
1572
0
    ret = index_stream_convert_blob(istate, oid, fd, path, flags);
1573
0
  } else if (!S_ISREG(st->st_mode)) {
1574
0
    ret = index_pipe(istate, oid, fd, type, path, flags);
1575
0
  } else if ((st->st_size >= 0 &&
1576
0
        (size_t)st->st_size <= repo_settings_get_big_file_threshold(istate->repo)) ||
1577
0
       type != OBJ_BLOB ||
1578
0
       (path && would_convert_to_git(istate, path))) {
1579
0
    ret = index_core(istate, oid, fd, xsize_t(st->st_size),
1580
0
         type, path, flags);
1581
0
  } else {
1582
0
    struct odb_transaction *transaction;
1583
1584
0
    transaction = odb_transaction_begin(the_repository->objects);
1585
0
    ret = index_blob_packfile_transaction(the_repository->objects->transaction,
1586
0
                  oid, fd,
1587
0
                  xsize_t(st->st_size),
1588
0
                  path, flags);
1589
0
    odb_transaction_commit(transaction);
1590
0
  }
1591
1592
0
  close(fd);
1593
0
  return ret;
1594
0
}
1595
1596
int index_path(struct index_state *istate, struct object_id *oid,
1597
         const char *path, struct stat *st, unsigned flags)
1598
0
{
1599
0
  int fd;
1600
0
  struct strbuf sb = STRBUF_INIT;
1601
0
  int rc = 0;
1602
1603
0
  switch (st->st_mode & S_IFMT) {
1604
0
  case S_IFREG:
1605
0
    fd = open(path, O_RDONLY);
1606
0
    if (fd < 0)
1607
0
      return error_errno("open(\"%s\")", path);
1608
0
    if (index_fd(istate, oid, fd, st, OBJ_BLOB, path, flags) < 0)
1609
0
      return error(_("%s: failed to insert into database"),
1610
0
             path);
1611
0
    break;
1612
0
  case S_IFLNK:
1613
0
    if (strbuf_readlink(&sb, path, st->st_size))
1614
0
      return error_errno("readlink(\"%s\")", path);
1615
0
    if (!(flags & INDEX_WRITE_OBJECT))
1616
0
      hash_object_file(istate->repo->hash_algo, sb.buf, sb.len,
1617
0
           OBJ_BLOB, oid);
1618
0
    else if (odb_write_object(istate->repo->objects, sb.buf, sb.len, OBJ_BLOB, oid))
1619
0
      rc = error(_("%s: failed to insert into database"), path);
1620
0
    strbuf_release(&sb);
1621
0
    break;
1622
0
  case S_IFDIR:
1623
0
    if (repo_resolve_gitlink_ref(istate->repo, path, "HEAD", oid))
1624
0
      return error(_("'%s' does not have a commit checked out"), path);
1625
0
    if (&hash_algos[oid->algo] != istate->repo->hash_algo)
1626
0
      return error(_("cannot add a submodule of a different hash algorithm"));
1627
0
    break;
1628
0
  default:
1629
0
    return error(_("%s: unsupported file type"), path);
1630
0
  }
1631
0
  return rc;
1632
0
}
1633
1634
int read_pack_header(int fd, struct pack_header *header)
1635
0
{
1636
0
  if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
1637
    /* "eof before pack header was fully read" */
1638
0
    return PH_ERROR_EOF;
1639
1640
0
  if (header->hdr_signature != htonl(PACK_SIGNATURE))
1641
    /* "protocol error (pack signature mismatch detected)" */
1642
0
    return PH_ERROR_PACK_SIGNATURE;
1643
0
  if (!pack_version_ok(header->hdr_version))
1644
    /* "protocol error (pack version unsupported)" */
1645
0
    return PH_ERROR_PROTOCOL;
1646
0
  return 0;
1647
0
}
1648
1649
static int for_each_file_in_obj_subdir(unsigned int subdir_nr,
1650
               struct strbuf *path,
1651
               const struct git_hash_algo *algop,
1652
               each_loose_object_fn obj_cb,
1653
               each_loose_cruft_fn cruft_cb,
1654
               each_loose_subdir_fn subdir_cb,
1655
               void *data)
1656
0
{
1657
0
  size_t origlen, baselen;
1658
0
  DIR *dir;
1659
0
  struct dirent *de;
1660
0
  int r = 0;
1661
0
  struct object_id oid;
1662
1663
0
  if (subdir_nr > 0xff)
1664
0
    BUG("invalid loose object subdirectory: %x", subdir_nr);
1665
1666
0
  origlen = path->len;
1667
0
  strbuf_complete(path, '/');
1668
0
  strbuf_addf(path, "%02x", subdir_nr);
1669
1670
0
  dir = opendir(path->buf);
1671
0
  if (!dir) {
1672
0
    if (errno != ENOENT)
1673
0
      r = error_errno(_("unable to open %s"), path->buf);
1674
0
    strbuf_setlen(path, origlen);
1675
0
    return r;
1676
0
  }
1677
1678
0
  oid.hash[0] = subdir_nr;
1679
0
  strbuf_addch(path, '/');
1680
0
  baselen = path->len;
1681
1682
0
  while ((de = readdir_skip_dot_and_dotdot(dir))) {
1683
0
    size_t namelen;
1684
1685
0
    namelen = strlen(de->d_name);
1686
0
    strbuf_setlen(path, baselen);
1687
0
    strbuf_add(path, de->d_name, namelen);
1688
0
    if (namelen == algop->hexsz - 2 &&
1689
0
        !hex_to_bytes(oid.hash + 1, de->d_name,
1690
0
          algop->rawsz - 1)) {
1691
0
      oid_set_algo(&oid, algop);
1692
0
      memset(oid.hash + algop->rawsz, 0,
1693
0
             GIT_MAX_RAWSZ - algop->rawsz);
1694
0
      if (obj_cb) {
1695
0
        r = obj_cb(&oid, path->buf, data);
1696
0
        if (r)
1697
0
          break;
1698
0
      }
1699
0
      continue;
1700
0
    }
1701
1702
0
    if (cruft_cb) {
1703
0
      r = cruft_cb(de->d_name, path->buf, data);
1704
0
      if (r)
1705
0
        break;
1706
0
    }
1707
0
  }
1708
0
  closedir(dir);
1709
1710
0
  strbuf_setlen(path, baselen - 1);
1711
0
  if (!r && subdir_cb)
1712
0
    r = subdir_cb(subdir_nr, path->buf, data);
1713
1714
0
  strbuf_setlen(path, origlen);
1715
1716
0
  return r;
1717
0
}
1718
1719
int for_each_loose_file_in_source(struct odb_source *source,
1720
          each_loose_object_fn obj_cb,
1721
          each_loose_cruft_fn cruft_cb,
1722
          each_loose_subdir_fn subdir_cb,
1723
          void *data)
1724
0
{
1725
0
  struct strbuf buf = STRBUF_INIT;
1726
0
  int r;
1727
1728
0
  strbuf_addstr(&buf, source->path);
1729
0
  for (int i = 0; i < 256; i++) {
1730
0
    r = for_each_file_in_obj_subdir(i, &buf, source->odb->repo->hash_algo,
1731
0
            obj_cb, cruft_cb, subdir_cb, data);
1732
0
    if (r)
1733
0
      break;
1734
0
  }
1735
1736
0
  strbuf_release(&buf);
1737
0
  return r;
1738
0
}
1739
1740
int for_each_loose_object(struct object_database *odb,
1741
        each_loose_object_fn cb, void *data,
1742
        enum for_each_object_flags flags)
1743
0
{
1744
0
  struct odb_source *source;
1745
1746
0
  odb_prepare_alternates(odb);
1747
0
  for (source = odb->sources; source; source = source->next) {
1748
0
    int r = for_each_loose_file_in_source(source, cb, NULL,
1749
0
                  NULL, data);
1750
0
    if (r)
1751
0
      return r;
1752
1753
0
    if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
1754
0
      break;
1755
0
  }
1756
1757
0
  return 0;
1758
0
}
1759
1760
static int append_loose_object(const struct object_id *oid,
1761
             const char *path UNUSED,
1762
             void *data)
1763
0
{
1764
0
  oidtree_insert(data, oid);
1765
0
  return 0;
1766
0
}
1767
1768
struct oidtree *odb_source_loose_cache(struct odb_source *source,
1769
               const struct object_id *oid)
1770
0
{
1771
0
  int subdir_nr = oid->hash[0];
1772
0
  struct strbuf buf = STRBUF_INIT;
1773
0
  size_t word_bits = bitsizeof(source->loose->subdir_seen[0]);
1774
0
  size_t word_index = subdir_nr / word_bits;
1775
0
  size_t mask = (size_t)1u << (subdir_nr % word_bits);
1776
0
  uint32_t *bitmap;
1777
1778
0
  if (subdir_nr < 0 ||
1779
0
      (size_t) subdir_nr >= bitsizeof(source->loose->subdir_seen))
1780
0
    BUG("subdir_nr out of range");
1781
1782
0
  bitmap = &source->loose->subdir_seen[word_index];
1783
0
  if (*bitmap & mask)
1784
0
    return source->loose->cache;
1785
0
  if (!source->loose->cache) {
1786
0
    ALLOC_ARRAY(source->loose->cache, 1);
1787
0
    oidtree_init(source->loose->cache);
1788
0
  }
1789
0
  strbuf_addstr(&buf, source->path);
1790
0
  for_each_file_in_obj_subdir(subdir_nr, &buf,
1791
0
            source->odb->repo->hash_algo,
1792
0
            append_loose_object,
1793
0
            NULL, NULL,
1794
0
            source->loose->cache);
1795
0
  *bitmap |= mask;
1796
0
  strbuf_release(&buf);
1797
0
  return source->loose->cache;
1798
0
}
1799
1800
static void odb_source_loose_clear_cache(struct odb_source_loose *loose)
1801
0
{
1802
0
  oidtree_clear(loose->cache);
1803
0
  FREE_AND_NULL(loose->cache);
1804
0
  memset(&loose->subdir_seen, 0,
1805
0
         sizeof(loose->subdir_seen));
1806
0
}
1807
1808
void odb_source_loose_reprepare(struct odb_source *source)
1809
0
{
1810
0
  odb_source_loose_clear_cache(source->loose);
1811
0
}
1812
1813
static int check_stream_oid(git_zstream *stream,
1814
          const char *hdr,
1815
          unsigned long size,
1816
          const char *path,
1817
          const struct object_id *expected_oid,
1818
          const struct git_hash_algo *algop)
1819
0
{
1820
0
  struct git_hash_ctx c;
1821
0
  struct object_id real_oid;
1822
0
  unsigned char buf[4096];
1823
0
  unsigned long total_read;
1824
0
  int status = Z_OK;
1825
1826
0
  algop->init_fn(&c);
1827
0
  git_hash_update(&c, hdr, stream->total_out);
1828
1829
  /*
1830
   * We already read some bytes into hdr, but the ones up to the NUL
1831
   * do not count against the object's content size.
1832
   */
1833
0
  total_read = stream->total_out - strlen(hdr) - 1;
1834
1835
  /*
1836
   * This size comparison must be "<=" to read the final zlib packets;
1837
   * see the comment in unpack_loose_rest for details.
1838
   */
1839
0
  while (total_read <= size &&
1840
0
         (status == Z_OK ||
1841
0
    (status == Z_BUF_ERROR && !stream->avail_out))) {
1842
0
    stream->next_out = buf;
1843
0
    stream->avail_out = sizeof(buf);
1844
0
    if (size - total_read < stream->avail_out)
1845
0
      stream->avail_out = size - total_read;
1846
0
    status = git_inflate(stream, Z_FINISH);
1847
0
    git_hash_update(&c, buf, stream->next_out - buf);
1848
0
    total_read += stream->next_out - buf;
1849
0
  }
1850
1851
0
  if (status != Z_STREAM_END) {
1852
0
    error(_("corrupt loose object '%s'"), oid_to_hex(expected_oid));
1853
0
    return -1;
1854
0
  }
1855
0
  if (stream->avail_in) {
1856
0
    error(_("garbage at end of loose object '%s'"),
1857
0
          oid_to_hex(expected_oid));
1858
0
    return -1;
1859
0
  }
1860
1861
0
  git_hash_final_oid(&real_oid, &c);
1862
0
  if (!oideq(expected_oid, &real_oid)) {
1863
0
    error(_("hash mismatch for %s (expected %s)"), path,
1864
0
          oid_to_hex(expected_oid));
1865
0
    return -1;
1866
0
  }
1867
1868
0
  return 0;
1869
0
}
1870
1871
int read_loose_object(struct repository *repo,
1872
          const char *path,
1873
          const struct object_id *expected_oid,
1874
          struct object_id *real_oid,
1875
          void **contents,
1876
          struct object_info *oi)
1877
0
{
1878
0
  int ret = -1;
1879
0
  int fd;
1880
0
  void *map = NULL;
1881
0
  unsigned long mapsize;
1882
0
  git_zstream stream;
1883
0
  char hdr[MAX_HEADER_LEN];
1884
0
  unsigned long *size = oi->sizep;
1885
1886
0
  fd = git_open(path);
1887
0
  if (fd >= 0)
1888
0
    map = map_fd(fd, path, &mapsize);
1889
0
  if (!map) {
1890
0
    error_errno(_("unable to mmap %s"), path);
1891
0
    goto out;
1892
0
  }
1893
1894
0
  if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) != ULHR_OK) {
1895
0
    error(_("unable to unpack header of %s"), path);
1896
0
    goto out_inflate;
1897
0
  }
1898
1899
0
  if (parse_loose_header(hdr, oi) < 0) {
1900
0
    error(_("unable to parse header of %s"), path);
1901
0
    goto out_inflate;
1902
0
  }
1903
1904
0
  if (*oi->typep < 0) {
1905
0
    error(_("unable to parse type from header '%s' of %s"),
1906
0
          hdr, path);
1907
0
    goto out_inflate;
1908
0
  }
1909
1910
0
  if (*oi->typep == OBJ_BLOB &&
1911
0
      *size > repo_settings_get_big_file_threshold(repo)) {
1912
0
    if (check_stream_oid(&stream, hdr, *size, path, expected_oid,
1913
0
             repo->hash_algo) < 0)
1914
0
      goto out_inflate;
1915
0
  } else {
1916
0
    *contents = unpack_loose_rest(&stream, hdr, *size, expected_oid);
1917
0
    if (!*contents) {
1918
0
      error(_("unable to unpack contents of %s"), path);
1919
0
      goto out_inflate;
1920
0
    }
1921
0
    hash_object_file(repo->hash_algo,
1922
0
         *contents, *size,
1923
0
         *oi->typep, real_oid);
1924
0
    if (!oideq(expected_oid, real_oid))
1925
0
      goto out_inflate;
1926
0
  }
1927
1928
0
  ret = 0; /* everything checks out */
1929
1930
0
out_inflate:
1931
0
  git_inflate_end(&stream);
1932
0
out:
1933
0
  if (map)
1934
0
    munmap(map, mapsize);
1935
0
  return ret;
1936
0
}
1937
1938
struct odb_transaction *object_file_transaction_begin(struct odb_source *source)
1939
0
{
1940
0
  struct object_database *odb = source->odb;
1941
1942
0
  if (odb->transaction)
1943
0
    return NULL;
1944
1945
0
  CALLOC_ARRAY(odb->transaction, 1);
1946
0
  odb->transaction->odb = odb;
1947
1948
0
  return odb->transaction;
1949
0
}
1950
1951
void object_file_transaction_commit(struct odb_transaction *transaction)
1952
0
{
1953
0
  if (!transaction)
1954
0
    return;
1955
1956
  /*
1957
   * Ensure the transaction ending matches the pending transaction.
1958
   */
1959
0
  ASSERT(transaction == transaction->odb->transaction);
1960
1961
0
  flush_loose_object_transaction(transaction);
1962
0
  flush_packfile_transaction(transaction);
1963
0
  transaction->odb->transaction = NULL;
1964
0
  free(transaction);
1965
0
}
1966
1967
struct odb_source_loose *odb_source_loose_new(struct odb_source *source)
1968
0
{
1969
0
  struct odb_source_loose *loose;
1970
0
  CALLOC_ARRAY(loose, 1);
1971
0
  loose->source = source;
1972
0
  return loose;
1973
0
}
1974
1975
void odb_source_loose_free(struct odb_source_loose *loose)
1976
0
{
1977
0
  if (!loose)
1978
0
    return;
1979
0
  odb_source_loose_clear_cache(loose);
1980
0
  loose_object_map_clear(&loose->map);
1981
0
  free(loose);
1982
0
}