Coverage Report

Created: 2026-02-14 06:27

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/git/object-file.c
Line
Count
Source
1
/*
2
 * GIT - The information manager from hell
3
 *
4
 * Copyright (C) Linus Torvalds, 2005
5
 *
6
 * This handles basic git object files - packing, unpacking,
7
 * creation etc.
8
 */
9
10
#define USE_THE_REPOSITORY_VARIABLE
11
12
#include "git-compat-util.h"
13
#include "convert.h"
14
#include "dir.h"
15
#include "environment.h"
16
#include "fsck.h"
17
#include "gettext.h"
18
#include "hex.h"
19
#include "loose.h"
20
#include "object-file-convert.h"
21
#include "object-file.h"
22
#include "odb.h"
23
#include "odb/streaming.h"
24
#include "oidtree.h"
25
#include "pack.h"
26
#include "packfile.h"
27
#include "path.h"
28
#include "read-cache-ll.h"
29
#include "setup.h"
30
#include "tempfile.h"
31
#include "tmp-objdir.h"
32
33
/* The maximum size for an object header. */
34
#define MAX_HEADER_LEN 32
35
36
static int get_conv_flags(unsigned flags)
37
0
{
38
0
  if (flags & INDEX_RENORMALIZE)
39
0
    return CONV_EOL_RENORMALIZE;
40
0
  else if (flags & INDEX_WRITE_OBJECT)
41
0
    return global_conv_flags_eol | CONV_WRITE_OBJECT;
42
0
  else
43
0
    return 0;
44
0
}
45
46
static void fill_loose_path(struct strbuf *buf,
47
          const struct object_id *oid,
48
          const struct git_hash_algo *algop)
49
0
{
50
0
  for (size_t i = 0; i < algop->rawsz; i++) {
51
0
    static char hex[] = "0123456789abcdef";
52
0
    unsigned int val = oid->hash[i];
53
0
    strbuf_addch(buf, hex[val >> 4]);
54
0
    strbuf_addch(buf, hex[val & 0xf]);
55
0
    if (!i)
56
0
      strbuf_addch(buf, '/');
57
0
  }
58
0
}
59
60
const char *odb_loose_path(struct odb_source *source,
61
         struct strbuf *buf,
62
         const struct object_id *oid)
63
0
{
64
0
  strbuf_reset(buf);
65
0
  strbuf_addstr(buf, source->path);
66
0
  strbuf_addch(buf, '/');
67
0
  fill_loose_path(buf, oid, source->odb->repo->hash_algo);
68
0
  return buf->buf;
69
0
}
70
71
/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
72
static int freshen_file(const char *fn)
73
0
{
74
0
  return !utime(fn, NULL);
75
0
}
76
77
/*
78
 * All of the check_and_freshen functions return 1 if the file exists and was
79
 * freshened (if freshening was requested), 0 otherwise. If they return
80
 * 0, you should not assume that it is safe to skip a write of the object (it
81
 * either does not exist on disk, or has a stale mtime and may be subject to
82
 * pruning).
83
 */
84
int check_and_freshen_file(const char *fn, int freshen)
85
0
{
86
0
  if (access(fn, F_OK))
87
0
    return 0;
88
0
  if (freshen && !freshen_file(fn))
89
0
    return 0;
90
0
  return 1;
91
0
}
92
93
static int check_and_freshen_source(struct odb_source *source,
94
            const struct object_id *oid,
95
            int freshen)
96
0
{
97
0
  static struct strbuf path = STRBUF_INIT;
98
0
  odb_loose_path(source, &path, oid);
99
0
  return check_and_freshen_file(path.buf, freshen);
100
0
}
101
102
int odb_source_loose_has_object(struct odb_source *source,
103
        const struct object_id *oid)
104
0
{
105
0
  return check_and_freshen_source(source, oid, 0);
106
0
}
107
108
int format_object_header(char *str, size_t size, enum object_type type,
109
       size_t objsize)
110
0
{
111
0
  const char *name = type_name(type);
112
113
0
  if (!name)
114
0
    BUG("could not get a type name for 'enum object_type' value %d", type);
115
116
0
  return xsnprintf(str, size, "%s %"PRIuMAX, name, (uintmax_t)objsize) + 1;
117
0
}
118
119
int check_object_signature(struct repository *r, const struct object_id *oid,
120
         void *buf, unsigned long size,
121
         enum object_type type)
122
0
{
123
0
  const struct git_hash_algo *algo =
124
0
    oid->algo ? &hash_algos[oid->algo] : r->hash_algo;
125
0
  struct object_id real_oid;
126
127
0
  hash_object_file(algo, buf, size, type, &real_oid);
128
129
0
  return !oideq(oid, &real_oid) ? -1 : 0;
130
0
}
131
132
int stream_object_signature(struct repository *r, const struct object_id *oid)
133
0
{
134
0
  struct object_id real_oid;
135
0
  struct odb_read_stream *st;
136
0
  struct git_hash_ctx c;
137
0
  char hdr[MAX_HEADER_LEN];
138
0
  int hdrlen;
139
140
0
  st = odb_read_stream_open(r->objects, oid, NULL);
141
0
  if (!st)
142
0
    return -1;
143
144
  /* Generate the header */
145
0
  hdrlen = format_object_header(hdr, sizeof(hdr), st->type, st->size);
146
147
  /* Sha1.. */
148
0
  r->hash_algo->init_fn(&c);
149
0
  git_hash_update(&c, hdr, hdrlen);
150
0
  for (;;) {
151
0
    char buf[1024 * 16];
152
0
    ssize_t readlen = odb_read_stream_read(st, buf, sizeof(buf));
153
154
0
    if (readlen < 0) {
155
0
      odb_read_stream_close(st);
156
0
      return -1;
157
0
    }
158
0
    if (!readlen)
159
0
      break;
160
0
    git_hash_update(&c, buf, readlen);
161
0
  }
162
0
  git_hash_final_oid(&real_oid, &c);
163
0
  odb_read_stream_close(st);
164
0
  return !oideq(oid, &real_oid) ? -1 : 0;
165
0
}
166
167
/*
168
 * Find "oid" as a loose object in given source.
169
 * Returns 0 on success, negative on failure.
170
 *
171
 * The "path" out-parameter will give the path of the object we found (if any).
172
 * Note that it may point to static storage and is only valid until another
173
 * call to stat_loose_object().
174
 */
175
static int stat_loose_object(struct odb_source_loose *loose,
176
           const struct object_id *oid,
177
           struct stat *st, const char **path)
178
0
{
179
0
  static struct strbuf buf = STRBUF_INIT;
180
181
0
  *path = odb_loose_path(loose->source, &buf, oid);
182
0
  if (!lstat(*path, st))
183
0
    return 0;
184
185
0
  return -1;
186
0
}
187
188
/*
189
 * Like stat_loose_object(), but actually open the object and return the
190
 * descriptor. See the caveats on the "path" parameter above.
191
 */
192
static int open_loose_object(struct odb_source_loose *loose,
193
           const struct object_id *oid, const char **path)
194
0
{
195
0
  static struct strbuf buf = STRBUF_INIT;
196
0
  int fd;
197
198
0
  *path = odb_loose_path(loose->source, &buf, oid);
199
0
  fd = git_open(*path);
200
0
  if (fd >= 0)
201
0
    return fd;
202
203
0
  return -1;
204
0
}
205
206
static int quick_has_loose(struct odb_source_loose *loose,
207
         const struct object_id *oid)
208
0
{
209
0
  return !!oidtree_contains(odb_source_loose_cache(loose->source, oid), oid);
210
0
}
211
212
/*
213
 * Map and close the given loose object fd. The path argument is used for
214
 * error reporting.
215
 */
216
static void *map_fd(int fd, const char *path, unsigned long *size)
217
0
{
218
0
  void *map = NULL;
219
0
  struct stat st;
220
221
0
  if (!fstat(fd, &st)) {
222
0
    *size = xsize_t(st.st_size);
223
0
    if (!*size) {
224
      /* mmap() is forbidden on empty files */
225
0
      error(_("object file %s is empty"), path);
226
0
      close(fd);
227
0
      return NULL;
228
0
    }
229
0
    map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
230
0
  }
231
0
  close(fd);
232
0
  return map;
233
0
}
234
235
static void *odb_source_loose_map_object(struct odb_source *source,
236
           const struct object_id *oid,
237
           unsigned long *size)
238
0
{
239
0
  const char *p;
240
0
  int fd = open_loose_object(source->loose, oid, &p);
241
242
0
  if (fd < 0)
243
0
    return NULL;
244
0
  return map_fd(fd, p, size);
245
0
}
246
247
enum unpack_loose_header_result {
248
  ULHR_OK,
249
  ULHR_BAD,
250
  ULHR_TOO_LONG,
251
};
252
253
/**
254
 * unpack_loose_header() initializes the data stream needed to unpack
255
 * a loose object header.
256
 *
257
 * Returns:
258
 *
259
 * - ULHR_OK on success
260
 * - ULHR_BAD on error
261
 * - ULHR_TOO_LONG if the header was too long
262
 *
263
 * It will only parse up to MAX_HEADER_LEN bytes.
264
 */
265
static enum unpack_loose_header_result unpack_loose_header(git_zstream *stream,
266
                 unsigned char *map,
267
                 unsigned long mapsize,
268
                 void *buffer,
269
                 unsigned long bufsiz)
270
0
{
271
0
  int status;
272
273
  /* Get the data stream */
274
0
  memset(stream, 0, sizeof(*stream));
275
0
  stream->next_in = map;
276
0
  stream->avail_in = mapsize;
277
0
  stream->next_out = buffer;
278
0
  stream->avail_out = bufsiz;
279
280
0
  git_inflate_init(stream);
281
0
  obj_read_unlock();
282
0
  status = git_inflate(stream, 0);
283
0
  obj_read_lock();
284
0
  if (status != Z_OK && status != Z_STREAM_END)
285
0
    return ULHR_BAD;
286
287
  /*
288
   * Check if entire header is unpacked in the first iteration.
289
   */
290
0
  if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
291
0
    return ULHR_OK;
292
293
  /*
294
   * We have a header longer than MAX_HEADER_LEN.
295
   */
296
0
  return ULHR_TOO_LONG;
297
0
}
298
299
static void *unpack_loose_rest(git_zstream *stream,
300
             void *buffer, unsigned long size,
301
             const struct object_id *oid)
302
0
{
303
0
  size_t bytes = strlen(buffer) + 1, n;
304
0
  unsigned char *buf = xmallocz(size);
305
0
  int status = Z_OK;
306
307
0
  n = stream->total_out - bytes;
308
0
  if (n > size)
309
0
    n = size;
310
0
  memcpy(buf, (char *) buffer + bytes, n);
311
0
  bytes = n;
312
0
  if (bytes <= size) {
313
    /*
314
     * The above condition must be (bytes <= size), not
315
     * (bytes < size).  In other words, even though we
316
     * expect no more output and set avail_out to zero,
317
     * the input zlib stream may have bytes that express
318
     * "this concludes the stream", and we *do* want to
319
     * eat that input.
320
     *
321
     * Otherwise we would not be able to test that we
322
     * consumed all the input to reach the expected size;
323
     * we also want to check that zlib tells us that all
324
     * went well with status == Z_STREAM_END at the end.
325
     */
326
0
    stream->next_out = buf + bytes;
327
0
    stream->avail_out = size - bytes;
328
0
    while (status == Z_OK) {
329
0
      obj_read_unlock();
330
0
      status = git_inflate(stream, Z_FINISH);
331
0
      obj_read_lock();
332
0
    }
333
0
  }
334
335
0
  if (status != Z_STREAM_END) {
336
0
    error(_("corrupt loose object '%s'"), oid_to_hex(oid));
337
0
    FREE_AND_NULL(buf);
338
0
  } else if (stream->avail_in) {
339
0
    error(_("garbage at end of loose object '%s'"),
340
0
          oid_to_hex(oid));
341
0
    FREE_AND_NULL(buf);
342
0
  }
343
344
0
  return buf;
345
0
}
346
347
/*
348
 * parse_loose_header() parses the starting "<type> <len>\0" of an
349
 * object. If it doesn't follow that format -1 is returned. To check
350
 * the validity of the <type> populate the "typep" in the "struct
351
 * object_info". It will be OBJ_BAD if the object type is unknown. The
352
 * parsed <len> can be retrieved via "oi->sizep", and from there
353
 * passed to unpack_loose_rest().
354
 *
355
 * We used to just use "sscanf()", but that's actually way
356
 * too permissive for what we want to check. So do an anal
357
 * object header parse by hand.
358
 */
359
static int parse_loose_header(const char *hdr, struct object_info *oi)
360
0
{
361
0
  const char *type_buf = hdr;
362
0
  size_t size;
363
0
  int type, type_len = 0;
364
365
  /*
366
   * The type can be of any size but is followed by
367
   * a space.
368
   */
369
0
  for (;;) {
370
0
    char c = *hdr++;
371
0
    if (!c)
372
0
      return -1;
373
0
    if (c == ' ')
374
0
      break;
375
0
    type_len++;
376
0
  }
377
378
0
  type = type_from_string_gently(type_buf, type_len, 1);
379
0
  if (oi->typep)
380
0
    *oi->typep = type;
381
382
  /*
383
   * The length must follow immediately, and be in canonical
384
   * decimal format (ie "010" is not valid).
385
   */
386
0
  size = *hdr++ - '0';
387
0
  if (size > 9)
388
0
    return -1;
389
0
  if (size) {
390
0
    for (;;) {
391
0
      unsigned long c = *hdr - '0';
392
0
      if (c > 9)
393
0
        break;
394
0
      hdr++;
395
0
      size = st_add(st_mult(size, 10), c);
396
0
    }
397
0
  }
398
399
0
  if (oi->sizep)
400
0
    *oi->sizep = cast_size_t_to_ulong(size);
401
402
  /*
403
   * The length must be followed by a zero byte
404
   */
405
0
  if (*hdr)
406
0
    return -1;
407
408
  /*
409
   * The format is valid, but the type may still be bogus. The
410
   * Caller needs to check its oi->typep.
411
   */
412
0
  return 0;
413
0
}
414
415
int odb_source_loose_read_object_info(struct odb_source *source,
416
              const struct object_id *oid,
417
              struct object_info *oi, int flags)
418
0
{
419
0
  int ret;
420
0
  int fd;
421
0
  unsigned long mapsize;
422
0
  const char *path;
423
0
  void *map = NULL;
424
0
  git_zstream stream, *stream_to_end = NULL;
425
0
  char hdr[MAX_HEADER_LEN];
426
0
  unsigned long size_scratch;
427
0
  enum object_type type_scratch;
428
429
  /*
430
   * If we don't care about type or size, then we don't
431
   * need to look inside the object at all. Note that we
432
   * do not optimize out the stat call, even if the
433
   * caller doesn't care about the disk-size, since our
434
   * return value implicitly indicates whether the
435
   * object even exists.
436
   */
437
0
  if (!oi || (!oi->typep && !oi->sizep && !oi->contentp)) {
438
0
    struct stat st;
439
440
0
    if ((!oi || !oi->disk_sizep) && (flags & OBJECT_INFO_QUICK)) {
441
0
      ret = quick_has_loose(source->loose, oid) ? 0 : -1;
442
0
      goto out;
443
0
    }
444
445
0
    if (stat_loose_object(source->loose, oid, &st, &path) < 0) {
446
0
      ret = -1;
447
0
      goto out;
448
0
    }
449
450
0
    if (oi && oi->disk_sizep)
451
0
      *oi->disk_sizep = st.st_size;
452
453
0
    ret = 0;
454
0
    goto out;
455
0
  }
456
457
0
  fd = open_loose_object(source->loose, oid, &path);
458
0
  if (fd < 0) {
459
0
    if (errno != ENOENT)
460
0
      error_errno(_("unable to open loose object %s"), oid_to_hex(oid));
461
0
    ret = -1;
462
0
    goto out;
463
0
  }
464
465
0
  map = map_fd(fd, path, &mapsize);
466
0
  if (!map) {
467
0
    ret = -1;
468
0
    goto out;
469
0
  }
470
471
0
  if (oi->disk_sizep)
472
0
    *oi->disk_sizep = mapsize;
473
474
0
  stream_to_end = &stream;
475
476
0
  switch (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr))) {
477
0
  case ULHR_OK:
478
0
    if (!oi->sizep)
479
0
      oi->sizep = &size_scratch;
480
0
    if (!oi->typep)
481
0
      oi->typep = &type_scratch;
482
483
0
    if (parse_loose_header(hdr, oi) < 0) {
484
0
      ret = error(_("unable to parse %s header"), oid_to_hex(oid));
485
0
      goto corrupt;
486
0
    }
487
488
0
    if (*oi->typep < 0)
489
0
      die(_("invalid object type"));
490
491
0
    if (oi->contentp) {
492
0
      *oi->contentp = unpack_loose_rest(&stream, hdr, *oi->sizep, oid);
493
0
      if (!*oi->contentp) {
494
0
        ret = -1;
495
0
        goto corrupt;
496
0
      }
497
0
    }
498
499
0
    break;
500
0
  case ULHR_BAD:
501
0
    ret = error(_("unable to unpack %s header"),
502
0
          oid_to_hex(oid));
503
0
    goto corrupt;
504
0
  case ULHR_TOO_LONG:
505
0
    ret = error(_("header for %s too long, exceeds %d bytes"),
506
0
          oid_to_hex(oid), MAX_HEADER_LEN);
507
0
    goto corrupt;
508
0
  }
509
510
0
  ret = 0;
511
512
0
corrupt:
513
0
  if (ret && (flags & OBJECT_INFO_DIE_IF_CORRUPT))
514
0
    die(_("loose object %s (stored in %s) is corrupt"),
515
0
        oid_to_hex(oid), path);
516
517
0
out:
518
0
  if (stream_to_end)
519
0
    git_inflate_end(stream_to_end);
520
0
  if (map)
521
0
    munmap(map, mapsize);
522
0
  if (oi) {
523
0
    if (oi->sizep == &size_scratch)
524
0
      oi->sizep = NULL;
525
0
    if (oi->typep == &type_scratch)
526
0
      oi->typep = NULL;
527
0
    if (oi->delta_base_oid)
528
0
      oidclr(oi->delta_base_oid, source->odb->repo->hash_algo);
529
0
    if (!ret)
530
0
      oi->whence = OI_LOOSE;
531
0
  }
532
533
0
  return ret;
534
0
}
535
536
static void hash_object_body(const struct git_hash_algo *algo, struct git_hash_ctx *c,
537
           const void *buf, unsigned long len,
538
           struct object_id *oid,
539
           char *hdr, int *hdrlen)
540
0
{
541
0
  algo->init_fn(c);
542
0
  git_hash_update(c, hdr, *hdrlen);
543
0
  git_hash_update(c, buf, len);
544
0
  git_hash_final_oid(oid, c);
545
0
}
546
547
static void write_object_file_prepare(const struct git_hash_algo *algo,
548
              const void *buf, unsigned long len,
549
              enum object_type type, struct object_id *oid,
550
              char *hdr, int *hdrlen)
551
0
{
552
0
  struct git_hash_ctx c;
553
554
  /* Generate the header */
555
0
  *hdrlen = format_object_header(hdr, *hdrlen, type, len);
556
557
  /* Sha1.. */
558
0
  hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen);
559
0
}
560
561
0
#define CHECK_COLLISION_DEST_VANISHED -2
562
563
static int check_collision(const char *source, const char *dest)
564
0
{
565
0
  char buf_source[4096], buf_dest[4096];
566
0
  int fd_source = -1, fd_dest = -1;
567
0
  int ret = 0;
568
569
0
  fd_source = open(source, O_RDONLY);
570
0
  if (fd_source < 0) {
571
0
    ret = error_errno(_("unable to open %s"), source);
572
0
    goto out;
573
0
  }
574
575
0
  fd_dest = open(dest, O_RDONLY);
576
0
  if (fd_dest < 0) {
577
0
    if (errno != ENOENT)
578
0
      ret = error_errno(_("unable to open %s"), dest);
579
0
    else
580
0
      ret = CHECK_COLLISION_DEST_VANISHED;
581
0
    goto out;
582
0
  }
583
584
0
  while (1) {
585
0
    ssize_t sz_a, sz_b;
586
587
0
    sz_a = read_in_full(fd_source, buf_source, sizeof(buf_source));
588
0
    if (sz_a < 0) {
589
0
      ret = error_errno(_("unable to read %s"), source);
590
0
      goto out;
591
0
    }
592
593
0
    sz_b = read_in_full(fd_dest, buf_dest, sizeof(buf_dest));
594
0
    if (sz_b < 0) {
595
0
      ret = error_errno(_("unable to read %s"), dest);
596
0
      goto out;
597
0
    }
598
599
0
    if (sz_a != sz_b || memcmp(buf_source, buf_dest, sz_a)) {
600
0
      ret = error(_("files '%s' and '%s' differ in contents"),
601
0
            source, dest);
602
0
      goto out;
603
0
    }
604
605
0
    if ((size_t) sz_a < sizeof(buf_source))
606
0
      break;
607
0
  }
608
609
0
out:
610
0
  if (fd_source > -1)
611
0
    close(fd_source);
612
0
  if (fd_dest > -1)
613
0
    close(fd_dest);
614
0
  return ret;
615
0
}
616
617
/*
618
 * Move the just written object into its final resting place.
619
 */
620
int finalize_object_file(struct repository *repo,
621
       const char *tmpfile, const char *filename)
622
0
{
623
0
  return finalize_object_file_flags(repo, tmpfile, filename, 0);
624
0
}
625
626
int finalize_object_file_flags(struct repository *repo,
627
             const char *tmpfile, const char *filename,
628
             enum finalize_object_file_flags flags)
629
0
{
630
0
  unsigned retries = 0;
631
0
  int ret;
632
633
0
retry:
634
0
  ret = 0;
635
636
0
  if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
637
0
    goto try_rename;
638
0
  else if (link(tmpfile, filename))
639
0
    ret = errno;
640
0
  else
641
0
    unlink_or_warn(tmpfile);
642
643
  /*
644
   * Coda hack - coda doesn't like cross-directory links,
645
   * so we fall back to a rename, which will mean that it
646
   * won't be able to check collisions, but that's not a
647
   * big deal.
648
   *
649
   * The same holds for FAT formatted media.
650
   *
651
   * When this succeeds, we just return.  We have nothing
652
   * left to unlink.
653
   */
654
0
  if (ret && ret != EEXIST) {
655
0
    struct stat st;
656
657
0
  try_rename:
658
0
    if (!stat(filename, &st))
659
0
      ret = EEXIST;
660
0
    else if (!rename(tmpfile, filename))
661
0
      goto out;
662
0
    else
663
0
      ret = errno;
664
0
  }
665
0
  if (ret) {
666
0
    if (ret != EEXIST) {
667
0
      int saved_errno = errno;
668
0
      unlink_or_warn(tmpfile);
669
0
      errno = saved_errno;
670
0
      return error_errno(_("unable to write file %s"), filename);
671
0
    }
672
0
    if (!(flags & FOF_SKIP_COLLISION_CHECK)) {
673
0
      ret = check_collision(tmpfile, filename);
674
0
      if (ret == CHECK_COLLISION_DEST_VANISHED) {
675
0
        if (retries++ > 5)
676
0
          return error(_("unable to write repeatedly vanishing file %s"),
677
0
                 filename);
678
0
        goto retry;
679
0
      }
680
0
      else if (ret)
681
0
        return -1;
682
0
    }
683
0
    unlink_or_warn(tmpfile);
684
0
  }
685
686
0
out:
687
0
  if (adjust_shared_perm(repo, filename))
688
0
    return error(_("unable to set permission to '%s'"), filename);
689
0
  return 0;
690
0
}
691
692
void hash_object_file(const struct git_hash_algo *algo, const void *buf,
693
          unsigned long len, enum object_type type,
694
          struct object_id *oid)
695
0
{
696
0
  char hdr[MAX_HEADER_LEN];
697
0
  int hdrlen = sizeof(hdr);
698
699
0
  write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
700
0
}
701
702
struct transaction_packfile {
703
  char *pack_tmp_name;
704
  struct hashfile *f;
705
  off_t offset;
706
  struct pack_idx_option pack_idx_opts;
707
708
  struct pack_idx_entry **written;
709
  uint32_t alloc_written;
710
  uint32_t nr_written;
711
};
712
713
struct odb_transaction_files {
714
  struct odb_transaction base;
715
716
  struct tmp_objdir *objdir;
717
  struct transaction_packfile packfile;
718
};
719
720
static void prepare_loose_object_transaction(struct odb_transaction *base)
721
0
{
722
0
  struct odb_transaction_files *transaction = (struct odb_transaction_files *)base;
723
724
  /*
725
   * We lazily create the temporary object directory
726
   * the first time an object might be added, since
727
   * callers may not know whether any objects will be
728
   * added at the time they call odb_transaction_files_begin.
729
   */
730
0
  if (!transaction || transaction->objdir)
731
0
    return;
732
733
0
  transaction->objdir = tmp_objdir_create(base->source->odb->repo, "bulk-fsync");
734
0
  if (transaction->objdir)
735
0
    tmp_objdir_replace_primary_odb(transaction->objdir, 0);
736
0
}
737
738
static void fsync_loose_object_transaction(struct odb_transaction *base,
739
             int fd, const char *filename)
740
0
{
741
0
  struct odb_transaction_files *transaction = (struct odb_transaction_files *)base;
742
743
  /*
744
   * If we have an active ODB transaction, we issue a call that
745
   * cleans the filesystem page cache but avoids a hardware flush
746
   * command. Later on we will issue a single hardware flush
747
   * before renaming the objects to their final names as part of
748
   * flush_batch_fsync.
749
   */
750
0
  if (!transaction || !transaction->objdir ||
751
0
      git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
752
0
    if (errno == ENOSYS)
753
0
      warning(_("core.fsyncMethod = batch is unsupported on this platform"));
754
0
    fsync_or_die(fd, filename);
755
0
  }
756
0
}
757
758
/*
759
 * Cleanup after batch-mode fsync_object_files.
760
 */
761
static void flush_loose_object_transaction(struct odb_transaction_files *transaction)
762
0
{
763
0
  struct strbuf temp_path = STRBUF_INIT;
764
0
  struct tempfile *temp;
765
766
0
  if (!transaction->objdir)
767
0
    return;
768
769
  /*
770
   * Issue a full hardware flush against a temporary file to ensure
771
   * that all objects are durable before any renames occur. The code in
772
   * fsync_loose_object_transaction has already issued a writeout
773
   * request, but it has not flushed any writeback cache in the storage
774
   * hardware or any filesystem logs. This fsync call acts as a barrier
775
   * to ensure that the data in each new object file is durable before
776
   * the final name is visible.
777
   */
778
0
  strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX",
779
0
        repo_get_object_directory(transaction->base.source->odb->repo));
780
0
  temp = xmks_tempfile(temp_path.buf);
781
0
  fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
782
0
  delete_tempfile(&temp);
783
0
  strbuf_release(&temp_path);
784
785
  /*
786
   * Make the object files visible in the primary ODB after their data is
787
   * fully durable.
788
   */
789
0
  tmp_objdir_migrate(transaction->objdir);
790
0
  transaction->objdir = NULL;
791
0
}
792
793
/* Finalize a file on disk, and close it. */
794
static void close_loose_object(struct odb_source *source,
795
             int fd, const char *filename)
796
0
{
797
0
  if (source->will_destroy)
798
0
    goto out;
799
800
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
801
0
    fsync_loose_object_transaction(source->odb->transaction, fd, filename);
802
0
  else if (fsync_object_files > 0)
803
0
    fsync_or_die(fd, filename);
804
0
  else
805
0
    fsync_component_or_die(FSYNC_COMPONENT_LOOSE_OBJECT, fd,
806
0
               filename);
807
808
0
out:
809
0
  if (close(fd) != 0)
810
0
    die_errno(_("error when closing loose object file"));
811
0
}
812
813
/* Size of directory component, including the ending '/' */
814
static inline int directory_size(const char *filename)
815
0
{
816
0
  const char *s = strrchr(filename, '/');
817
0
  if (!s)
818
0
    return 0;
819
0
  return s - filename + 1;
820
0
}
821
822
/*
823
 * This creates a temporary file in the same directory as the final
824
 * 'filename'
825
 *
826
 * We want to avoid cross-directory filename renames, because those
827
 * can have problems on various filesystems (FAT, NFS, Coda).
828
 */
829
static int create_tmpfile(struct repository *repo,
830
        struct strbuf *tmp, const char *filename)
831
0
{
832
0
  int fd, dirlen = directory_size(filename);
833
834
0
  strbuf_reset(tmp);
835
0
  strbuf_add(tmp, filename, dirlen);
836
0
  strbuf_addstr(tmp, "tmp_obj_XXXXXX");
837
0
  fd = git_mkstemp_mode(tmp->buf, 0444);
838
0
  if (fd < 0 && dirlen && errno == ENOENT) {
839
    /*
840
     * Make sure the directory exists; note that the contents
841
     * of the buffer are undefined after mkstemp returns an
842
     * error, so we have to rewrite the whole buffer from
843
     * scratch.
844
     */
845
0
    strbuf_reset(tmp);
846
0
    strbuf_add(tmp, filename, dirlen - 1);
847
0
    if (mkdir(tmp->buf, 0777) && errno != EEXIST)
848
0
      return -1;
849
0
    if (adjust_shared_perm(repo, tmp->buf))
850
0
      return -1;
851
852
    /* Try again */
853
0
    strbuf_addstr(tmp, "/tmp_obj_XXXXXX");
854
0
    fd = git_mkstemp_mode(tmp->buf, 0444);
855
0
  }
856
0
  return fd;
857
0
}
858
859
/**
860
 * Common steps for loose object writers to start writing loose
861
 * objects:
862
 *
863
 * - Create tmpfile for the loose object.
864
 * - Setup zlib stream for compression.
865
 * - Start to feed header to zlib stream.
866
 *
867
 * Returns a "fd", which should later be provided to
868
 * end_loose_object_common().
869
 */
870
static int start_loose_object_common(struct odb_source *source,
871
             struct strbuf *tmp_file,
872
             const char *filename, unsigned flags,
873
             git_zstream *stream,
874
             unsigned char *buf, size_t buflen,
875
             struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
876
             char *hdr, int hdrlen)
877
0
{
878
0
  const struct git_hash_algo *algo = source->odb->repo->hash_algo;
879
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
880
0
  int fd;
881
882
0
  fd = create_tmpfile(source->odb->repo, tmp_file, filename);
883
0
  if (fd < 0) {
884
0
    if (flags & WRITE_OBJECT_SILENT)
885
0
      return -1;
886
0
    else if (errno == EACCES)
887
0
      return error(_("insufficient permission for adding "
888
0
               "an object to repository database %s"),
889
0
             source->path);
890
0
    else
891
0
      return error_errno(
892
0
        _("unable to create temporary file"));
893
0
  }
894
895
  /*  Setup zlib stream for compression */
896
0
  git_deflate_init(stream, zlib_compression_level);
897
0
  stream->next_out = buf;
898
0
  stream->avail_out = buflen;
899
0
  algo->init_fn(c);
900
0
  if (compat && compat_c)
901
0
    compat->init_fn(compat_c);
902
903
  /*  Start to feed header to zlib stream */
904
0
  stream->next_in = (unsigned char *)hdr;
905
0
  stream->avail_in = hdrlen;
906
0
  while (git_deflate(stream, 0) == Z_OK)
907
0
    ; /* nothing */
908
0
  git_hash_update(c, hdr, hdrlen);
909
0
  if (compat && compat_c)
910
0
    git_hash_update(compat_c, hdr, hdrlen);
911
912
0
  return fd;
913
0
}
914
915
/**
916
 * Common steps for the inner git_deflate() loop for writing loose
917
 * objects. Returns what git_deflate() returns.
918
 */
919
static int write_loose_object_common(struct odb_source *source,
920
             struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
921
             git_zstream *stream, const int flush,
922
             unsigned char *in0, const int fd,
923
             unsigned char *compressed,
924
             const size_t compressed_len)
925
0
{
926
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
927
0
  int ret;
928
929
0
  ret = git_deflate(stream, flush ? Z_FINISH : 0);
930
0
  git_hash_update(c, in0, stream->next_in - in0);
931
0
  if (compat && compat_c)
932
0
    git_hash_update(compat_c, in0, stream->next_in - in0);
933
0
  if (write_in_full(fd, compressed, stream->next_out - compressed) < 0)
934
0
    die_errno(_("unable to write loose object file"));
935
0
  stream->next_out = compressed;
936
0
  stream->avail_out = compressed_len;
937
938
0
  return ret;
939
0
}
940
941
/**
942
 * Common steps for loose object writers to end writing loose objects:
943
 *
944
 * - End the compression of zlib stream.
945
 * - Get the calculated oid to "oid".
946
 */
947
static int end_loose_object_common(struct odb_source *source,
948
           struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
949
           git_zstream *stream, struct object_id *oid,
950
           struct object_id *compat_oid)
951
0
{
952
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
953
0
  int ret;
954
955
0
  ret = git_deflate_end_gently(stream);
956
0
  if (ret != Z_OK)
957
0
    return ret;
958
0
  git_hash_final_oid(oid, c);
959
0
  if (compat && compat_c)
960
0
    git_hash_final_oid(compat_oid, compat_c);
961
962
0
  return Z_OK;
963
0
}
964
965
static int write_loose_object(struct odb_source *source,
966
            const struct object_id *oid, char *hdr,
967
            int hdrlen, const void *buf, unsigned long len,
968
            time_t mtime, unsigned flags)
969
0
{
970
0
  int fd, ret;
971
0
  unsigned char compressed[4096];
972
0
  git_zstream stream;
973
0
  struct git_hash_ctx c;
974
0
  struct object_id parano_oid;
975
0
  static struct strbuf tmp_file = STRBUF_INIT;
976
0
  static struct strbuf filename = STRBUF_INIT;
977
978
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
979
0
    prepare_loose_object_transaction(source->odb->transaction);
980
981
0
  odb_loose_path(source, &filename, oid);
982
983
0
  fd = start_loose_object_common(source, &tmp_file, filename.buf, flags,
984
0
               &stream, compressed, sizeof(compressed),
985
0
               &c, NULL, hdr, hdrlen);
986
0
  if (fd < 0)
987
0
    return -1;
988
989
  /* Then the data itself.. */
990
0
  stream.next_in = (void *)buf;
991
0
  stream.avail_in = len;
992
0
  do {
993
0
    unsigned char *in0 = stream.next_in;
994
995
0
    ret = write_loose_object_common(source, &c, NULL, &stream, 1, in0, fd,
996
0
            compressed, sizeof(compressed));
997
0
  } while (ret == Z_OK);
998
999
0
  if (ret != Z_STREAM_END)
1000
0
    die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
1001
0
        ret);
1002
0
  ret = end_loose_object_common(source, &c, NULL, &stream, &parano_oid, NULL);
1003
0
  if (ret != Z_OK)
1004
0
    die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
1005
0
        ret);
1006
0
  if (!oideq(oid, &parano_oid))
1007
0
    die(_("confused by unstable object source data for %s"),
1008
0
        oid_to_hex(oid));
1009
1010
0
  close_loose_object(source, fd, tmp_file.buf);
1011
1012
0
  if (mtime) {
1013
0
    struct utimbuf utb;
1014
0
    utb.actime = mtime;
1015
0
    utb.modtime = mtime;
1016
0
    if (utime(tmp_file.buf, &utb) < 0 &&
1017
0
        !(flags & WRITE_OBJECT_SILENT))
1018
0
      warning_errno(_("failed utime() on %s"), tmp_file.buf);
1019
0
  }
1020
1021
0
  return finalize_object_file_flags(source->odb->repo, tmp_file.buf, filename.buf,
1022
0
            FOF_SKIP_COLLISION_CHECK);
1023
0
}
1024
1025
int odb_source_loose_freshen_object(struct odb_source *source,
1026
            const struct object_id *oid)
1027
0
{
1028
0
  return !!check_and_freshen_source(source, oid, 1);
1029
0
}
1030
1031
int odb_source_loose_write_stream(struct odb_source *source,
1032
          struct odb_write_stream *in_stream, size_t len,
1033
          struct object_id *oid)
1034
0
{
1035
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
1036
0
  struct object_id compat_oid;
1037
0
  int fd, ret, err = 0, flush = 0;
1038
0
  unsigned char compressed[4096];
1039
0
  git_zstream stream;
1040
0
  struct git_hash_ctx c, compat_c;
1041
0
  struct strbuf tmp_file = STRBUF_INIT;
1042
0
  struct strbuf filename = STRBUF_INIT;
1043
0
  int dirlen;
1044
0
  char hdr[MAX_HEADER_LEN];
1045
0
  int hdrlen;
1046
1047
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
1048
0
    prepare_loose_object_transaction(source->odb->transaction);
1049
1050
  /* Since oid is not determined, save tmp file to odb path. */
1051
0
  strbuf_addf(&filename, "%s/", source->path);
1052
0
  hdrlen = format_object_header(hdr, sizeof(hdr), OBJ_BLOB, len);
1053
1054
  /*
1055
   * Common steps for write_loose_object and stream_loose_object to
1056
   * start writing loose objects:
1057
   *
1058
   *  - Create tmpfile for the loose object.
1059
   *  - Setup zlib stream for compression.
1060
   *  - Start to feed header to zlib stream.
1061
   */
1062
0
  fd = start_loose_object_common(source, &tmp_file, filename.buf, 0,
1063
0
               &stream, compressed, sizeof(compressed),
1064
0
               &c, &compat_c, hdr, hdrlen);
1065
0
  if (fd < 0) {
1066
0
    err = -1;
1067
0
    goto cleanup;
1068
0
  }
1069
1070
  /* Then the data itself.. */
1071
0
  do {
1072
0
    unsigned char *in0 = stream.next_in;
1073
1074
0
    if (!stream.avail_in && !in_stream->is_finished) {
1075
0
      const void *in = in_stream->read(in_stream, &stream.avail_in);
1076
0
      stream.next_in = (void *)in;
1077
0
      in0 = (unsigned char *)in;
1078
      /* All data has been read. */
1079
0
      if (in_stream->is_finished)
1080
0
        flush = 1;
1081
0
    }
1082
0
    ret = write_loose_object_common(source, &c, &compat_c, &stream, flush, in0, fd,
1083
0
            compressed, sizeof(compressed));
1084
    /*
1085
     * Unlike write_loose_object(), we do not have the entire
1086
     * buffer. If we get Z_BUF_ERROR due to too few input bytes,
1087
     * then we'll replenish them in the next input_stream->read()
1088
     * call when we loop.
1089
     */
1090
0
  } while (ret == Z_OK || ret == Z_BUF_ERROR);
1091
1092
0
  if (stream.total_in != len + hdrlen)
1093
0
    die(_("write stream object %ld != %"PRIuMAX), stream.total_in,
1094
0
        (uintmax_t)len + hdrlen);
1095
1096
  /*
1097
   * Common steps for write_loose_object and stream_loose_object to
1098
   * end writing loose object:
1099
   *
1100
   *  - End the compression of zlib stream.
1101
   *  - Get the calculated oid.
1102
   */
1103
0
  if (ret != Z_STREAM_END)
1104
0
    die(_("unable to stream deflate new object (%d)"), ret);
1105
0
  ret = end_loose_object_common(source, &c, &compat_c, &stream, oid, &compat_oid);
1106
0
  if (ret != Z_OK)
1107
0
    die(_("deflateEnd on stream object failed (%d)"), ret);
1108
0
  close_loose_object(source, fd, tmp_file.buf);
1109
1110
0
  if (odb_freshen_object(source->odb, oid)) {
1111
0
    unlink_or_warn(tmp_file.buf);
1112
0
    goto cleanup;
1113
0
  }
1114
0
  odb_loose_path(source, &filename, oid);
1115
1116
  /* We finally know the object path, and create the missing dir. */
1117
0
  dirlen = directory_size(filename.buf);
1118
0
  if (dirlen) {
1119
0
    struct strbuf dir = STRBUF_INIT;
1120
0
    strbuf_add(&dir, filename.buf, dirlen);
1121
1122
0
    if (safe_create_dir_in_gitdir(source->odb->repo, dir.buf) &&
1123
0
        errno != EEXIST) {
1124
0
      err = error_errno(_("unable to create directory %s"), dir.buf);
1125
0
      strbuf_release(&dir);
1126
0
      goto cleanup;
1127
0
    }
1128
0
    strbuf_release(&dir);
1129
0
  }
1130
1131
0
  err = finalize_object_file_flags(source->odb->repo, tmp_file.buf, filename.buf,
1132
0
           FOF_SKIP_COLLISION_CHECK);
1133
0
  if (!err && compat)
1134
0
    err = repo_add_loose_object_map(source, oid, &compat_oid);
1135
0
cleanup:
1136
0
  strbuf_release(&tmp_file);
1137
0
  strbuf_release(&filename);
1138
0
  return err;
1139
0
}
1140
1141
int odb_source_loose_write_object(struct odb_source *source,
1142
          const void *buf, unsigned long len,
1143
          enum object_type type, struct object_id *oid,
1144
          struct object_id *compat_oid_in, unsigned flags)
1145
0
{
1146
0
  const struct git_hash_algo *algo = source->odb->repo->hash_algo;
1147
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
1148
0
  struct object_id compat_oid;
1149
0
  char hdr[MAX_HEADER_LEN];
1150
0
  int hdrlen = sizeof(hdr);
1151
1152
  /* Generate compat_oid */
1153
0
  if (compat) {
1154
0
    if (compat_oid_in)
1155
0
      oidcpy(&compat_oid, compat_oid_in);
1156
0
    else if (type == OBJ_BLOB)
1157
0
      hash_object_file(compat, buf, len, type, &compat_oid);
1158
0
    else {
1159
0
      struct strbuf converted = STRBUF_INIT;
1160
0
      convert_object_file(source->odb->repo, &converted, algo, compat,
1161
0
              buf, len, type, 0);
1162
0
      hash_object_file(compat, converted.buf, converted.len,
1163
0
           type, &compat_oid);
1164
0
      strbuf_release(&converted);
1165
0
    }
1166
0
  }
1167
1168
  /* Normally if we have it in the pack then we do not bother writing
1169
   * it out into .git/objects/??/?{38} file.
1170
   */
1171
0
  write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
1172
0
  if (odb_freshen_object(source->odb, oid))
1173
0
    return 0;
1174
0
  if (write_loose_object(source, oid, hdr, hdrlen, buf, len, 0, flags))
1175
0
    return -1;
1176
0
  if (compat)
1177
0
    return repo_add_loose_object_map(source, oid, &compat_oid);
1178
0
  return 0;
1179
0
}
1180
1181
int force_object_loose(struct odb_source *source,
1182
           const struct object_id *oid, time_t mtime)
1183
0
{
1184
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
1185
0
  void *buf;
1186
0
  unsigned long len;
1187
0
  struct object_info oi = OBJECT_INFO_INIT;
1188
0
  struct object_id compat_oid;
1189
0
  enum object_type type;
1190
0
  char hdr[MAX_HEADER_LEN];
1191
0
  int hdrlen;
1192
0
  int ret;
1193
1194
0
  for (struct odb_source *s = source->odb->sources; s; s = s->next)
1195
0
    if (odb_source_loose_has_object(s, oid))
1196
0
      return 0;
1197
1198
0
  oi.typep = &type;
1199
0
  oi.sizep = &len;
1200
0
  oi.contentp = &buf;
1201
0
  if (odb_read_object_info_extended(source->odb, oid, &oi, 0))
1202
0
    return error(_("cannot read object for %s"), oid_to_hex(oid));
1203
0
  if (compat) {
1204
0
    if (repo_oid_to_algop(source->odb->repo, oid, compat, &compat_oid))
1205
0
      return error(_("cannot map object %s to %s"),
1206
0
             oid_to_hex(oid), compat->name);
1207
0
  }
1208
0
  hdrlen = format_object_header(hdr, sizeof(hdr), type, len);
1209
0
  ret = write_loose_object(source, oid, hdr, hdrlen, buf, len, mtime, 0);
1210
0
  if (!ret && compat)
1211
0
    ret = repo_add_loose_object_map(source, oid, &compat_oid);
1212
0
  free(buf);
1213
1214
0
  return ret;
1215
0
}
1216
1217
/*
1218
 * We can't use the normal fsck_error_function() for index_mem(),
1219
 * because we don't yet have a valid oid for it to report. Instead,
1220
 * report the minimal fsck error here, and rely on the caller to
1221
 * give more context.
1222
 */
1223
static int hash_format_check_report(struct fsck_options *opts UNUSED,
1224
            void *fsck_report UNUSED,
1225
            enum fsck_msg_type msg_type UNUSED,
1226
            enum fsck_msg_id msg_id UNUSED,
1227
            const char *message)
1228
0
{
1229
0
  error(_("object fails fsck: %s"), message);
1230
0
  return 1;
1231
0
}
1232
1233
static int index_mem(struct index_state *istate,
1234
         struct object_id *oid,
1235
         const void *buf, size_t size,
1236
         enum object_type type,
1237
         const char *path, unsigned flags)
1238
0
{
1239
0
  struct strbuf nbuf = STRBUF_INIT;
1240
0
  int ret = 0;
1241
0
  int write_object = flags & INDEX_WRITE_OBJECT;
1242
1243
0
  if (!type)
1244
0
    type = OBJ_BLOB;
1245
1246
  /*
1247
   * Convert blobs to git internal format
1248
   */
1249
0
  if ((type == OBJ_BLOB) && path) {
1250
0
    if (convert_to_git(istate, path, buf, size, &nbuf,
1251
0
           get_conv_flags(flags))) {
1252
0
      buf = nbuf.buf;
1253
0
      size = nbuf.len;
1254
0
    }
1255
0
  }
1256
0
  if (flags & INDEX_FORMAT_CHECK) {
1257
0
    struct fsck_options opts = FSCK_OPTIONS_DEFAULT;
1258
1259
0
    opts.strict = 1;
1260
0
    opts.error_func = hash_format_check_report;
1261
0
    if (fsck_buffer(null_oid(istate->repo->hash_algo), type, buf, size, &opts))
1262
0
      die(_("refusing to create malformed object"));
1263
0
    fsck_finish(&opts);
1264
0
  }
1265
1266
0
  if (write_object)
1267
0
    ret = odb_write_object(istate->repo->objects, buf, size, type, oid);
1268
0
  else
1269
0
    hash_object_file(istate->repo->hash_algo, buf, size, type, oid);
1270
1271
0
  strbuf_release(&nbuf);
1272
0
  return ret;
1273
0
}
1274
1275
static int index_stream_convert_blob(struct index_state *istate,
1276
             struct object_id *oid,
1277
             int fd,
1278
             const char *path,
1279
             unsigned flags)
1280
0
{
1281
0
  int ret = 0;
1282
0
  const int write_object = flags & INDEX_WRITE_OBJECT;
1283
0
  struct strbuf sbuf = STRBUF_INIT;
1284
1285
0
  assert(path);
1286
0
  ASSERT(would_convert_to_git_filter_fd(istate, path));
1287
1288
0
  convert_to_git_filter_fd(istate, path, fd, &sbuf,
1289
0
         get_conv_flags(flags));
1290
1291
0
  if (write_object)
1292
0
    ret = odb_write_object(istate->repo->objects, sbuf.buf, sbuf.len, OBJ_BLOB,
1293
0
               oid);
1294
0
  else
1295
0
    hash_object_file(istate->repo->hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB,
1296
0
         oid);
1297
0
  strbuf_release(&sbuf);
1298
0
  return ret;
1299
0
}
1300
1301
static int index_pipe(struct index_state *istate, struct object_id *oid,
1302
          int fd, enum object_type type,
1303
          const char *path, unsigned flags)
1304
0
{
1305
0
  struct strbuf sbuf = STRBUF_INIT;
1306
0
  int ret;
1307
1308
0
  if (strbuf_read(&sbuf, fd, 4096) >= 0)
1309
0
    ret = index_mem(istate, oid, sbuf.buf, sbuf.len, type, path, flags);
1310
0
  else
1311
0
    ret = -1;
1312
0
  strbuf_release(&sbuf);
1313
0
  return ret;
1314
0
}
1315
1316
0
#define SMALL_FILE_SIZE (32*1024)
1317
1318
static int index_core(struct index_state *istate,
1319
          struct object_id *oid, int fd, size_t size,
1320
          enum object_type type, const char *path,
1321
          unsigned flags)
1322
0
{
1323
0
  int ret;
1324
1325
0
  if (!size) {
1326
0
    ret = index_mem(istate, oid, "", size, type, path, flags);
1327
0
  } else if (size <= SMALL_FILE_SIZE) {
1328
0
    char *buf = xmalloc(size);
1329
0
    ssize_t read_result = read_in_full(fd, buf, size);
1330
0
    if (read_result < 0)
1331
0
      ret = error_errno(_("read error while indexing %s"),
1332
0
            path ? path : "<unknown>");
1333
0
    else if ((size_t) read_result != size)
1334
0
      ret = error(_("short read while indexing %s"),
1335
0
            path ? path : "<unknown>");
1336
0
    else
1337
0
      ret = index_mem(istate, oid, buf, size, type, path, flags);
1338
0
    free(buf);
1339
0
  } else {
1340
0
    void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1341
0
    ret = index_mem(istate, oid, buf, size, type, path, flags);
1342
0
    munmap(buf, size);
1343
0
  }
1344
0
  return ret;
1345
0
}
1346
1347
static int already_written(struct odb_transaction_files *transaction,
1348
         struct object_id *oid)
1349
0
{
1350
  /* The object may already exist in the repository */
1351
0
  if (odb_has_object(transaction->base.source->odb, oid,
1352
0
         HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
1353
0
    return 1;
1354
1355
  /* Might want to keep the list sorted */
1356
0
  for (uint32_t i = 0; i < transaction->packfile.nr_written; i++)
1357
0
    if (oideq(&transaction->packfile.written[i]->oid, oid))
1358
0
      return 1;
1359
1360
  /* This is a new object we need to keep */
1361
0
  return 0;
1362
0
}
1363
1364
/* Lazily create backing packfile for the state */
1365
static void prepare_packfile_transaction(struct odb_transaction_files *transaction,
1366
           unsigned flags)
1367
0
{
1368
0
  struct transaction_packfile *state = &transaction->packfile;
1369
0
  if (!(flags & INDEX_WRITE_OBJECT) || state->f)
1370
0
    return;
1371
1372
0
  state->f = create_tmp_packfile(transaction->base.source->odb->repo,
1373
0
               &state->pack_tmp_name);
1374
0
  reset_pack_idx_option(&state->pack_idx_opts);
1375
1376
  /* Pretend we are going to write only one object */
1377
0
  state->offset = write_pack_header(state->f, 1);
1378
0
  if (!state->offset)
1379
0
    die_errno("unable to write pack header");
1380
0
}
1381
1382
/*
1383
 * Read the contents from fd for size bytes, streaming it to the
1384
 * packfile in state while updating the hash in ctx. Signal a failure
1385
 * by returning a negative value when the resulting pack would exceed
1386
 * the pack size limit and this is not the first object in the pack,
1387
 * so that the caller can discard what we wrote from the current pack
1388
 * by truncating it and opening a new one. The caller will then call
1389
 * us again after rewinding the input fd.
1390
 *
1391
 * The already_hashed_to pointer is kept untouched by the caller to
1392
 * make sure we do not hash the same byte when we are called
1393
 * again. This way, the caller does not have to checkpoint its hash
1394
 * status before calling us just in case we ask it to call us again
1395
 * with a new pack.
1396
 */
1397
static int stream_blob_to_pack(struct transaction_packfile *state,
1398
             struct git_hash_ctx *ctx, off_t *already_hashed_to,
1399
             int fd, size_t size, const char *path,
1400
             unsigned flags)
1401
0
{
1402
0
  git_zstream s;
1403
0
  unsigned char ibuf[16384];
1404
0
  unsigned char obuf[16384];
1405
0
  unsigned hdrlen;
1406
0
  int status = Z_OK;
1407
0
  int write_object = (flags & INDEX_WRITE_OBJECT);
1408
0
  off_t offset = 0;
1409
1410
0
  git_deflate_init(&s, pack_compression_level);
1411
1412
0
  hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), OBJ_BLOB, size);
1413
0
  s.next_out = obuf + hdrlen;
1414
0
  s.avail_out = sizeof(obuf) - hdrlen;
1415
1416
0
  while (status != Z_STREAM_END) {
1417
0
    if (size && !s.avail_in) {
1418
0
      size_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
1419
0
      ssize_t read_result = read_in_full(fd, ibuf, rsize);
1420
0
      if (read_result < 0)
1421
0
        die_errno("failed to read from '%s'", path);
1422
0
      if ((size_t)read_result != rsize)
1423
0
        die("failed to read %u bytes from '%s'",
1424
0
            (unsigned)rsize, path);
1425
0
      offset += rsize;
1426
0
      if (*already_hashed_to < offset) {
1427
0
        size_t hsize = offset - *already_hashed_to;
1428
0
        if (rsize < hsize)
1429
0
          hsize = rsize;
1430
0
        if (hsize)
1431
0
          git_hash_update(ctx, ibuf, hsize);
1432
0
        *already_hashed_to = offset;
1433
0
      }
1434
0
      s.next_in = ibuf;
1435
0
      s.avail_in = rsize;
1436
0
      size -= rsize;
1437
0
    }
1438
1439
0
    status = git_deflate(&s, size ? 0 : Z_FINISH);
1440
1441
0
    if (!s.avail_out || status == Z_STREAM_END) {
1442
0
      if (write_object) {
1443
0
        size_t written = s.next_out - obuf;
1444
1445
        /* would we bust the size limit? */
1446
0
        if (state->nr_written &&
1447
0
            pack_size_limit_cfg &&
1448
0
            pack_size_limit_cfg < state->offset + written) {
1449
0
          git_deflate_abort(&s);
1450
0
          return -1;
1451
0
        }
1452
1453
0
        hashwrite(state->f, obuf, written);
1454
0
        state->offset += written;
1455
0
      }
1456
0
      s.next_out = obuf;
1457
0
      s.avail_out = sizeof(obuf);
1458
0
    }
1459
1460
0
    switch (status) {
1461
0
    case Z_OK:
1462
0
    case Z_BUF_ERROR:
1463
0
    case Z_STREAM_END:
1464
0
      continue;
1465
0
    default:
1466
0
      die("unexpected deflate failure: %d", status);
1467
0
    }
1468
0
  }
1469
0
  git_deflate_end(&s);
1470
0
  return 0;
1471
0
}
1472
1473
static void flush_packfile_transaction(struct odb_transaction_files *transaction)
1474
0
{
1475
0
  struct transaction_packfile *state = &transaction->packfile;
1476
0
  struct repository *repo = transaction->base.source->odb->repo;
1477
0
  unsigned char hash[GIT_MAX_RAWSZ];
1478
0
  struct strbuf packname = STRBUF_INIT;
1479
0
  char *idx_tmp_name = NULL;
1480
1481
0
  if (!state->f)
1482
0
    return;
1483
1484
0
  if (state->nr_written == 0) {
1485
0
    close(state->f->fd);
1486
0
    free_hashfile(state->f);
1487
0
    unlink(state->pack_tmp_name);
1488
0
    goto clear_exit;
1489
0
  } else if (state->nr_written == 1) {
1490
0
    finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK,
1491
0
          CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
1492
0
  } else {
1493
0
    int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
1494
0
    fixup_pack_header_footer(repo->hash_algo, fd, hash, state->pack_tmp_name,
1495
0
           state->nr_written, hash,
1496
0
           state->offset);
1497
0
    close(fd);
1498
0
  }
1499
1500
0
  strbuf_addf(&packname, "%s/pack/pack-%s.",
1501
0
        repo_get_object_directory(transaction->base.source->odb->repo),
1502
0
        hash_to_hex_algop(hash, repo->hash_algo));
1503
1504
0
  stage_tmp_packfiles(repo, &packname, state->pack_tmp_name,
1505
0
          state->written, state->nr_written, NULL,
1506
0
          &state->pack_idx_opts, hash, &idx_tmp_name);
1507
0
  rename_tmp_packfile_idx(repo, &packname, &idx_tmp_name);
1508
1509
0
  for (uint32_t i = 0; i < state->nr_written; i++)
1510
0
    free(state->written[i]);
1511
1512
0
clear_exit:
1513
0
  free(idx_tmp_name);
1514
0
  free(state->pack_tmp_name);
1515
0
  free(state->written);
1516
0
  memset(state, 0, sizeof(*state));
1517
1518
0
  strbuf_release(&packname);
1519
  /* Make objects we just wrote available to ourselves */
1520
0
  odb_reprepare(repo->objects);
1521
0
}
1522
1523
/*
1524
 * This writes the specified object to a packfile. Objects written here
1525
 * during the same transaction are written to the same packfile. The
1526
 * packfile is not flushed until the transaction is flushed. The caller
1527
 * is expected to ensure a valid transaction is setup for objects to be
1528
 * recorded to.
1529
 *
1530
 * This also bypasses the usual "convert-to-git" dance, and that is on
1531
 * purpose. We could write a streaming version of the converting
1532
 * functions and insert that before feeding the data to fast-import
1533
 * (or equivalent in-core API described above). However, that is
1534
 * somewhat complicated, as we do not know the size of the filter
1535
 * result, which we need to know beforehand when writing a git object.
1536
 * Since the primary motivation for trying to stream from the working
1537
 * tree file and to avoid mmaping it in core is to deal with large
1538
 * binary blobs, they generally do not want to get any conversion, and
1539
 * callers should avoid this code path when filters are requested.
1540
 */
1541
static int index_blob_packfile_transaction(struct odb_transaction_files *transaction,
1542
             struct object_id *result_oid, int fd,
1543
             size_t size, const char *path,
1544
             unsigned flags)
1545
0
{
1546
0
  struct transaction_packfile *state = &transaction->packfile;
1547
0
  off_t seekback, already_hashed_to;
1548
0
  struct git_hash_ctx ctx;
1549
0
  unsigned char obuf[16384];
1550
0
  unsigned header_len;
1551
0
  struct hashfile_checkpoint checkpoint;
1552
0
  struct pack_idx_entry *idx = NULL;
1553
1554
0
  seekback = lseek(fd, 0, SEEK_CUR);
1555
0
  if (seekback == (off_t)-1)
1556
0
    return error("cannot find the current offset");
1557
1558
0
  header_len = format_object_header((char *)obuf, sizeof(obuf),
1559
0
            OBJ_BLOB, size);
1560
0
  transaction->base.source->odb->repo->hash_algo->init_fn(&ctx);
1561
0
  git_hash_update(&ctx, obuf, header_len);
1562
1563
  /* Note: idx is non-NULL when we are writing */
1564
0
  if ((flags & INDEX_WRITE_OBJECT) != 0) {
1565
0
    CALLOC_ARRAY(idx, 1);
1566
1567
0
    prepare_packfile_transaction(transaction, flags);
1568
0
    hashfile_checkpoint_init(state->f, &checkpoint);
1569
0
  }
1570
1571
0
  already_hashed_to = 0;
1572
1573
0
  while (1) {
1574
0
    prepare_packfile_transaction(transaction, flags);
1575
0
    if (idx) {
1576
0
      hashfile_checkpoint(state->f, &checkpoint);
1577
0
      idx->offset = state->offset;
1578
0
      crc32_begin(state->f);
1579
0
    }
1580
0
    if (!stream_blob_to_pack(state, &ctx, &already_hashed_to,
1581
0
           fd, size, path, flags))
1582
0
      break;
1583
    /*
1584
     * Writing this object to the current pack will make
1585
     * it too big; we need to truncate it, start a new
1586
     * pack, and write into it.
1587
     */
1588
0
    if (!idx)
1589
0
      BUG("should not happen");
1590
0
    hashfile_truncate(state->f, &checkpoint);
1591
0
    state->offset = checkpoint.offset;
1592
0
    flush_packfile_transaction(transaction);
1593
0
    if (lseek(fd, seekback, SEEK_SET) == (off_t)-1)
1594
0
      return error("cannot seek back");
1595
0
  }
1596
0
  git_hash_final_oid(result_oid, &ctx);
1597
0
  if (!idx)
1598
0
    return 0;
1599
1600
0
  idx->crc32 = crc32_end(state->f);
1601
0
  if (already_written(transaction, result_oid)) {
1602
0
    hashfile_truncate(state->f, &checkpoint);
1603
0
    state->offset = checkpoint.offset;
1604
0
    free(idx);
1605
0
  } else {
1606
0
    oidcpy(&idx->oid, result_oid);
1607
0
    ALLOC_GROW(state->written,
1608
0
         state->nr_written + 1,
1609
0
         state->alloc_written);
1610
0
    state->written[state->nr_written++] = idx;
1611
0
  }
1612
0
  return 0;
1613
0
}
1614
1615
int index_fd(struct index_state *istate, struct object_id *oid,
1616
       int fd, struct stat *st,
1617
       enum object_type type, const char *path, unsigned flags)
1618
0
{
1619
0
  int ret;
1620
1621
  /*
1622
   * Call xsize_t() only when needed to avoid potentially unnecessary
1623
   * die() for large files.
1624
   */
1625
0
  if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(istate, path)) {
1626
0
    ret = index_stream_convert_blob(istate, oid, fd, path, flags);
1627
0
  } else if (!S_ISREG(st->st_mode)) {
1628
0
    ret = index_pipe(istate, oid, fd, type, path, flags);
1629
0
  } else if ((st->st_size >= 0 &&
1630
0
        (size_t)st->st_size <= repo_settings_get_big_file_threshold(istate->repo)) ||
1631
0
       type != OBJ_BLOB ||
1632
0
       (path && would_convert_to_git(istate, path))) {
1633
0
    ret = index_core(istate, oid, fd, xsize_t(st->st_size),
1634
0
         type, path, flags);
1635
0
  } else {
1636
0
    struct object_database *odb = the_repository->objects;
1637
0
    struct odb_transaction *transaction;
1638
1639
0
    transaction = odb_transaction_begin(odb);
1640
0
    ret = index_blob_packfile_transaction((struct odb_transaction_files *)odb->transaction,
1641
0
                  oid, fd,
1642
0
                  xsize_t(st->st_size),
1643
0
                  path, flags);
1644
0
    odb_transaction_commit(transaction);
1645
0
  }
1646
1647
0
  close(fd);
1648
0
  return ret;
1649
0
}
1650
1651
int index_path(struct index_state *istate, struct object_id *oid,
1652
         const char *path, struct stat *st, unsigned flags)
1653
0
{
1654
0
  int fd;
1655
0
  struct strbuf sb = STRBUF_INIT;
1656
0
  int rc = 0;
1657
1658
0
  switch (st->st_mode & S_IFMT) {
1659
0
  case S_IFREG:
1660
0
    fd = open(path, O_RDONLY);
1661
0
    if (fd < 0)
1662
0
      return error_errno("open(\"%s\")", path);
1663
0
    if (index_fd(istate, oid, fd, st, OBJ_BLOB, path, flags) < 0)
1664
0
      return error(_("%s: failed to insert into database"),
1665
0
             path);
1666
0
    break;
1667
0
  case S_IFLNK:
1668
0
    if (strbuf_readlink(&sb, path, st->st_size))
1669
0
      return error_errno("readlink(\"%s\")", path);
1670
0
    if (!(flags & INDEX_WRITE_OBJECT))
1671
0
      hash_object_file(istate->repo->hash_algo, sb.buf, sb.len,
1672
0
           OBJ_BLOB, oid);
1673
0
    else if (odb_write_object(istate->repo->objects, sb.buf, sb.len, OBJ_BLOB, oid))
1674
0
      rc = error(_("%s: failed to insert into database"), path);
1675
0
    strbuf_release(&sb);
1676
0
    break;
1677
0
  case S_IFDIR:
1678
0
    if (repo_resolve_gitlink_ref(istate->repo, path, "HEAD", oid))
1679
0
      return error(_("'%s' does not have a commit checked out"), path);
1680
0
    if (&hash_algos[oid->algo] != istate->repo->hash_algo)
1681
0
      return error(_("cannot add a submodule of a different hash algorithm"));
1682
0
    break;
1683
0
  default:
1684
0
    return error(_("%s: unsupported file type"), path);
1685
0
  }
1686
0
  return rc;
1687
0
}
1688
1689
int read_pack_header(int fd, struct pack_header *header)
1690
0
{
1691
0
  if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
1692
    /* "eof before pack header was fully read" */
1693
0
    return PH_ERROR_EOF;
1694
1695
0
  if (header->hdr_signature != htonl(PACK_SIGNATURE))
1696
    /* "protocol error (pack signature mismatch detected)" */
1697
0
    return PH_ERROR_PACK_SIGNATURE;
1698
0
  if (!pack_version_ok(header->hdr_version))
1699
    /* "protocol error (pack version unsupported)" */
1700
0
    return PH_ERROR_PROTOCOL;
1701
0
  return 0;
1702
0
}
1703
1704
static int for_each_file_in_obj_subdir(unsigned int subdir_nr,
1705
               struct strbuf *path,
1706
               const struct git_hash_algo *algop,
1707
               each_loose_object_fn obj_cb,
1708
               each_loose_cruft_fn cruft_cb,
1709
               each_loose_subdir_fn subdir_cb,
1710
               void *data)
1711
0
{
1712
0
  size_t origlen, baselen;
1713
0
  DIR *dir;
1714
0
  struct dirent *de;
1715
0
  int r = 0;
1716
0
  struct object_id oid;
1717
1718
0
  if (subdir_nr > 0xff)
1719
0
    BUG("invalid loose object subdirectory: %x", subdir_nr);
1720
1721
0
  origlen = path->len;
1722
0
  strbuf_complete(path, '/');
1723
0
  strbuf_addf(path, "%02x", subdir_nr);
1724
1725
0
  dir = opendir(path->buf);
1726
0
  if (!dir) {
1727
0
    if (errno != ENOENT)
1728
0
      r = error_errno(_("unable to open %s"), path->buf);
1729
0
    strbuf_setlen(path, origlen);
1730
0
    return r;
1731
0
  }
1732
1733
0
  oid.hash[0] = subdir_nr;
1734
0
  strbuf_addch(path, '/');
1735
0
  baselen = path->len;
1736
1737
0
  while ((de = readdir_skip_dot_and_dotdot(dir))) {
1738
0
    size_t namelen;
1739
1740
0
    namelen = strlen(de->d_name);
1741
0
    strbuf_setlen(path, baselen);
1742
0
    strbuf_add(path, de->d_name, namelen);
1743
0
    if (namelen == algop->hexsz - 2 &&
1744
0
        !hex_to_bytes(oid.hash + 1, de->d_name,
1745
0
          algop->rawsz - 1)) {
1746
0
      oid_set_algo(&oid, algop);
1747
0
      memset(oid.hash + algop->rawsz, 0,
1748
0
             GIT_MAX_RAWSZ - algop->rawsz);
1749
0
      if (obj_cb) {
1750
0
        r = obj_cb(&oid, path->buf, data);
1751
0
        if (r)
1752
0
          break;
1753
0
      }
1754
0
      continue;
1755
0
    }
1756
1757
0
    if (cruft_cb) {
1758
0
      r = cruft_cb(de->d_name, path->buf, data);
1759
0
      if (r)
1760
0
        break;
1761
0
    }
1762
0
  }
1763
0
  closedir(dir);
1764
1765
0
  strbuf_setlen(path, baselen - 1);
1766
0
  if (!r && subdir_cb)
1767
0
    r = subdir_cb(subdir_nr, path->buf, data);
1768
1769
0
  strbuf_setlen(path, origlen);
1770
1771
0
  return r;
1772
0
}
1773
1774
int for_each_loose_file_in_source(struct odb_source *source,
1775
          each_loose_object_fn obj_cb,
1776
          each_loose_cruft_fn cruft_cb,
1777
          each_loose_subdir_fn subdir_cb,
1778
          void *data)
1779
0
{
1780
0
  struct strbuf buf = STRBUF_INIT;
1781
0
  int r;
1782
1783
0
  strbuf_addstr(&buf, source->path);
1784
0
  for (int i = 0; i < 256; i++) {
1785
0
    r = for_each_file_in_obj_subdir(i, &buf, source->odb->repo->hash_algo,
1786
0
            obj_cb, cruft_cb, subdir_cb, data);
1787
0
    if (r)
1788
0
      break;
1789
0
  }
1790
1791
0
  strbuf_release(&buf);
1792
0
  return r;
1793
0
}
1794
1795
int for_each_loose_object(struct object_database *odb,
1796
        each_loose_object_fn cb, void *data,
1797
        enum for_each_object_flags flags)
1798
0
{
1799
0
  struct odb_source *source;
1800
1801
0
  odb_prepare_alternates(odb);
1802
0
  for (source = odb->sources; source; source = source->next) {
1803
0
    int r = for_each_loose_file_in_source(source, cb, NULL,
1804
0
                  NULL, data);
1805
0
    if (r)
1806
0
      return r;
1807
1808
0
    if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
1809
0
      break;
1810
0
  }
1811
1812
0
  return 0;
1813
0
}
1814
1815
static int append_loose_object(const struct object_id *oid,
1816
             const char *path UNUSED,
1817
             void *data)
1818
0
{
1819
0
  oidtree_insert(data, oid);
1820
0
  return 0;
1821
0
}
1822
1823
struct oidtree *odb_source_loose_cache(struct odb_source *source,
1824
               const struct object_id *oid)
1825
0
{
1826
0
  int subdir_nr = oid->hash[0];
1827
0
  struct strbuf buf = STRBUF_INIT;
1828
0
  size_t word_bits = bitsizeof(source->loose->subdir_seen[0]);
1829
0
  size_t word_index = subdir_nr / word_bits;
1830
0
  size_t mask = (size_t)1u << (subdir_nr % word_bits);
1831
0
  uint32_t *bitmap;
1832
1833
0
  if (subdir_nr < 0 ||
1834
0
      (size_t) subdir_nr >= bitsizeof(source->loose->subdir_seen))
1835
0
    BUG("subdir_nr out of range");
1836
1837
0
  bitmap = &source->loose->subdir_seen[word_index];
1838
0
  if (*bitmap & mask)
1839
0
    return source->loose->cache;
1840
0
  if (!source->loose->cache) {
1841
0
    ALLOC_ARRAY(source->loose->cache, 1);
1842
0
    oidtree_init(source->loose->cache);
1843
0
  }
1844
0
  strbuf_addstr(&buf, source->path);
1845
0
  for_each_file_in_obj_subdir(subdir_nr, &buf,
1846
0
            source->odb->repo->hash_algo,
1847
0
            append_loose_object,
1848
0
            NULL, NULL,
1849
0
            source->loose->cache);
1850
0
  *bitmap |= mask;
1851
0
  strbuf_release(&buf);
1852
0
  return source->loose->cache;
1853
0
}
1854
1855
static void odb_source_loose_clear_cache(struct odb_source_loose *loose)
1856
0
{
1857
0
  oidtree_clear(loose->cache);
1858
0
  FREE_AND_NULL(loose->cache);
1859
0
  memset(&loose->subdir_seen, 0,
1860
0
         sizeof(loose->subdir_seen));
1861
0
}
1862
1863
void odb_source_loose_reprepare(struct odb_source *source)
1864
0
{
1865
0
  odb_source_loose_clear_cache(source->loose);
1866
0
}
1867
1868
static int check_stream_oid(git_zstream *stream,
1869
          const char *hdr,
1870
          unsigned long size,
1871
          const char *path,
1872
          const struct object_id *expected_oid,
1873
          const struct git_hash_algo *algop)
1874
0
{
1875
0
  struct git_hash_ctx c;
1876
0
  struct object_id real_oid;
1877
0
  unsigned char buf[4096];
1878
0
  unsigned long total_read;
1879
0
  int status = Z_OK;
1880
1881
0
  algop->init_fn(&c);
1882
0
  git_hash_update(&c, hdr, stream->total_out);
1883
1884
  /*
1885
   * We already read some bytes into hdr, but the ones up to the NUL
1886
   * do not count against the object's content size.
1887
   */
1888
0
  total_read = stream->total_out - strlen(hdr) - 1;
1889
1890
  /*
1891
   * This size comparison must be "<=" to read the final zlib packets;
1892
   * see the comment in unpack_loose_rest for details.
1893
   */
1894
0
  while (total_read <= size &&
1895
0
         (status == Z_OK ||
1896
0
    (status == Z_BUF_ERROR && !stream->avail_out))) {
1897
0
    stream->next_out = buf;
1898
0
    stream->avail_out = sizeof(buf);
1899
0
    if (size - total_read < stream->avail_out)
1900
0
      stream->avail_out = size - total_read;
1901
0
    status = git_inflate(stream, Z_FINISH);
1902
0
    git_hash_update(&c, buf, stream->next_out - buf);
1903
0
    total_read += stream->next_out - buf;
1904
0
  }
1905
1906
0
  if (status != Z_STREAM_END) {
1907
0
    error(_("corrupt loose object '%s'"), oid_to_hex(expected_oid));
1908
0
    return -1;
1909
0
  }
1910
0
  if (stream->avail_in) {
1911
0
    error(_("garbage at end of loose object '%s'"),
1912
0
          oid_to_hex(expected_oid));
1913
0
    return -1;
1914
0
  }
1915
1916
0
  git_hash_final_oid(&real_oid, &c);
1917
0
  if (!oideq(expected_oid, &real_oid)) {
1918
0
    error(_("hash mismatch for %s (expected %s)"), path,
1919
0
          oid_to_hex(expected_oid));
1920
0
    return -1;
1921
0
  }
1922
1923
0
  return 0;
1924
0
}
1925
1926
int read_loose_object(struct repository *repo,
1927
          const char *path,
1928
          const struct object_id *expected_oid,
1929
          struct object_id *real_oid,
1930
          void **contents,
1931
          struct object_info *oi)
1932
0
{
1933
0
  int ret = -1;
1934
0
  int fd;
1935
0
  void *map = NULL;
1936
0
  unsigned long mapsize;
1937
0
  git_zstream stream;
1938
0
  char hdr[MAX_HEADER_LEN];
1939
0
  unsigned long *size = oi->sizep;
1940
1941
0
  fd = git_open(path);
1942
0
  if (fd >= 0)
1943
0
    map = map_fd(fd, path, &mapsize);
1944
0
  if (!map) {
1945
0
    error_errno(_("unable to mmap %s"), path);
1946
0
    goto out;
1947
0
  }
1948
1949
0
  if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) != ULHR_OK) {
1950
0
    error(_("unable to unpack header of %s"), path);
1951
0
    goto out_inflate;
1952
0
  }
1953
1954
0
  if (parse_loose_header(hdr, oi) < 0) {
1955
0
    error(_("unable to parse header of %s"), path);
1956
0
    goto out_inflate;
1957
0
  }
1958
1959
0
  if (*oi->typep < 0) {
1960
0
    error(_("unable to parse type from header '%s' of %s"),
1961
0
          hdr, path);
1962
0
    goto out_inflate;
1963
0
  }
1964
1965
0
  if (*oi->typep == OBJ_BLOB &&
1966
0
      *size > repo_settings_get_big_file_threshold(repo)) {
1967
0
    if (check_stream_oid(&stream, hdr, *size, path, expected_oid,
1968
0
             repo->hash_algo) < 0)
1969
0
      goto out_inflate;
1970
0
  } else {
1971
0
    *contents = unpack_loose_rest(&stream, hdr, *size, expected_oid);
1972
0
    if (!*contents) {
1973
0
      error(_("unable to unpack contents of %s"), path);
1974
0
      goto out_inflate;
1975
0
    }
1976
0
    hash_object_file(repo->hash_algo,
1977
0
         *contents, *size,
1978
0
         *oi->typep, real_oid);
1979
0
    if (!oideq(expected_oid, real_oid))
1980
0
      goto out_inflate;
1981
0
  }
1982
1983
0
  ret = 0; /* everything checks out */
1984
1985
0
out_inflate:
1986
0
  git_inflate_end(&stream);
1987
0
out:
1988
0
  if (map)
1989
0
    munmap(map, mapsize);
1990
0
  return ret;
1991
0
}
1992
1993
static void odb_transaction_files_commit(struct odb_transaction *base)
1994
0
{
1995
0
  struct odb_transaction_files *transaction = (struct odb_transaction_files *)base;
1996
1997
0
  flush_loose_object_transaction(transaction);
1998
0
  flush_packfile_transaction(transaction);
1999
0
}
2000
2001
struct odb_transaction *odb_transaction_files_begin(struct odb_source *source)
2002
0
{
2003
0
  struct odb_transaction_files *transaction;
2004
0
  struct object_database *odb = source->odb;
2005
2006
0
  if (odb->transaction)
2007
0
    return NULL;
2008
2009
0
  transaction = xcalloc(1, sizeof(*transaction));
2010
0
  transaction->base.source = source;
2011
0
  transaction->base.commit = odb_transaction_files_commit;
2012
2013
0
  return &transaction->base;
2014
0
}
2015
2016
struct odb_source_loose *odb_source_loose_new(struct odb_source *source)
2017
0
{
2018
0
  struct odb_source_loose *loose;
2019
0
  CALLOC_ARRAY(loose, 1);
2020
0
  loose->source = source;
2021
0
  return loose;
2022
0
}
2023
2024
void odb_source_loose_free(struct odb_source_loose *loose)
2025
0
{
2026
0
  if (!loose)
2027
0
    return;
2028
0
  odb_source_loose_clear_cache(loose);
2029
0
  loose_object_map_clear(&loose->map);
2030
0
  free(loose);
2031
0
}
2032
2033
struct odb_loose_read_stream {
2034
  struct odb_read_stream base;
2035
  git_zstream z;
2036
  enum {
2037
    ODB_LOOSE_READ_STREAM_INUSE,
2038
    ODB_LOOSE_READ_STREAM_DONE,
2039
    ODB_LOOSE_READ_STREAM_ERROR,
2040
  } z_state;
2041
  void *mapped;
2042
  unsigned long mapsize;
2043
  char hdr[32];
2044
  int hdr_avail;
2045
  int hdr_used;
2046
};
2047
2048
static ssize_t read_istream_loose(struct odb_read_stream *_st, char *buf, size_t sz)
2049
0
{
2050
0
  struct odb_loose_read_stream *st = (struct odb_loose_read_stream *)_st;
2051
0
  size_t total_read = 0;
2052
2053
0
  switch (st->z_state) {
2054
0
  case ODB_LOOSE_READ_STREAM_DONE:
2055
0
    return 0;
2056
0
  case ODB_LOOSE_READ_STREAM_ERROR:
2057
0
    return -1;
2058
0
  default:
2059
0
    break;
2060
0
  }
2061
2062
0
  if (st->hdr_used < st->hdr_avail) {
2063
0
    size_t to_copy = st->hdr_avail - st->hdr_used;
2064
0
    if (sz < to_copy)
2065
0
      to_copy = sz;
2066
0
    memcpy(buf, st->hdr + st->hdr_used, to_copy);
2067
0
    st->hdr_used += to_copy;
2068
0
    total_read += to_copy;
2069
0
  }
2070
2071
0
  while (total_read < sz) {
2072
0
    int status;
2073
2074
0
    st->z.next_out = (unsigned char *)buf + total_read;
2075
0
    st->z.avail_out = sz - total_read;
2076
0
    status = git_inflate(&st->z, Z_FINISH);
2077
2078
0
    total_read = st->z.next_out - (unsigned char *)buf;
2079
2080
0
    if (status == Z_STREAM_END) {
2081
0
      git_inflate_end(&st->z);
2082
0
      st->z_state = ODB_LOOSE_READ_STREAM_DONE;
2083
0
      break;
2084
0
    }
2085
0
    if (status != Z_OK && (status != Z_BUF_ERROR || total_read < sz)) {
2086
0
      git_inflate_end(&st->z);
2087
0
      st->z_state = ODB_LOOSE_READ_STREAM_ERROR;
2088
0
      return -1;
2089
0
    }
2090
0
  }
2091
0
  return total_read;
2092
0
}
2093
2094
static int close_istream_loose(struct odb_read_stream *_st)
2095
0
{
2096
0
  struct odb_loose_read_stream *st = (struct odb_loose_read_stream *)_st;
2097
0
  if (st->z_state == ODB_LOOSE_READ_STREAM_INUSE)
2098
0
    git_inflate_end(&st->z);
2099
0
  munmap(st->mapped, st->mapsize);
2100
0
  return 0;
2101
0
}
2102
2103
int odb_source_loose_read_object_stream(struct odb_read_stream **out,
2104
          struct odb_source *source,
2105
          const struct object_id *oid)
2106
0
{
2107
0
  struct object_info oi = OBJECT_INFO_INIT;
2108
0
  struct odb_loose_read_stream *st;
2109
0
  unsigned long mapsize;
2110
0
  void *mapped;
2111
2112
0
  mapped = odb_source_loose_map_object(source, oid, &mapsize);
2113
0
  if (!mapped)
2114
0
    return -1;
2115
2116
  /*
2117
   * Note: we must allocate this structure early even though we may still
2118
   * fail. This is because we need to initialize the zlib stream, and it
2119
   * is not possible to copy the stream around after the fact because it
2120
   * has self-referencing pointers.
2121
   */
2122
0
  CALLOC_ARRAY(st, 1);
2123
2124
0
  switch (unpack_loose_header(&st->z, mapped, mapsize, st->hdr,
2125
0
            sizeof(st->hdr))) {
2126
0
  case ULHR_OK:
2127
0
    break;
2128
0
  case ULHR_BAD:
2129
0
  case ULHR_TOO_LONG:
2130
0
    goto error;
2131
0
  }
2132
2133
0
  oi.sizep = &st->base.size;
2134
0
  oi.typep = &st->base.type;
2135
2136
0
  if (parse_loose_header(st->hdr, &oi) < 0 || st->base.type < 0)
2137
0
    goto error;
2138
2139
0
  st->mapped = mapped;
2140
0
  st->mapsize = mapsize;
2141
0
  st->hdr_used = strlen(st->hdr) + 1;
2142
0
  st->hdr_avail = st->z.total_out;
2143
0
  st->z_state = ODB_LOOSE_READ_STREAM_INUSE;
2144
0
  st->base.close = close_istream_loose;
2145
0
  st->base.read = read_istream_loose;
2146
2147
0
  *out = &st->base;
2148
2149
0
  return 0;
2150
0
error:
2151
0
  git_inflate_end(&st->z);
2152
0
  munmap(st->mapped, st->mapsize);
2153
0
  free(st);
2154
0
  return -1;
2155
0
}