Coverage Report

Created: 2026-01-09 07:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/git/object-file.c
Line
Count
Source
1
/*
2
 * GIT - The information manager from hell
3
 *
4
 * Copyright (C) Linus Torvalds, 2005
5
 *
6
 * This handles basic git object files - packing, unpacking,
7
 * creation etc.
8
 */
9
10
#define USE_THE_REPOSITORY_VARIABLE
11
12
#include "git-compat-util.h"
13
#include "convert.h"
14
#include "dir.h"
15
#include "environment.h"
16
#include "fsck.h"
17
#include "gettext.h"
18
#include "hex.h"
19
#include "loose.h"
20
#include "object-file-convert.h"
21
#include "object-file.h"
22
#include "odb.h"
23
#include "odb/streaming.h"
24
#include "oidtree.h"
25
#include "pack.h"
26
#include "packfile.h"
27
#include "path.h"
28
#include "read-cache-ll.h"
29
#include "setup.h"
30
#include "tempfile.h"
31
#include "tmp-objdir.h"
32
33
/* The maximum size for an object header. */
34
#define MAX_HEADER_LEN 32
35
36
static int get_conv_flags(unsigned flags)
37
0
{
38
0
  if (flags & INDEX_RENORMALIZE)
39
0
    return CONV_EOL_RENORMALIZE;
40
0
  else if (flags & INDEX_WRITE_OBJECT)
41
0
    return global_conv_flags_eol | CONV_WRITE_OBJECT;
42
0
  else
43
0
    return 0;
44
0
}
45
46
static void fill_loose_path(struct strbuf *buf,
47
          const struct object_id *oid,
48
          const struct git_hash_algo *algop)
49
0
{
50
0
  for (size_t i = 0; i < algop->rawsz; i++) {
51
0
    static char hex[] = "0123456789abcdef";
52
0
    unsigned int val = oid->hash[i];
53
0
    strbuf_addch(buf, hex[val >> 4]);
54
0
    strbuf_addch(buf, hex[val & 0xf]);
55
0
    if (!i)
56
0
      strbuf_addch(buf, '/');
57
0
  }
58
0
}
59
60
const char *odb_loose_path(struct odb_source *source,
61
         struct strbuf *buf,
62
         const struct object_id *oid)
63
0
{
64
0
  strbuf_reset(buf);
65
0
  strbuf_addstr(buf, source->path);
66
0
  strbuf_addch(buf, '/');
67
0
  fill_loose_path(buf, oid, source->odb->repo->hash_algo);
68
0
  return buf->buf;
69
0
}
70
71
/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
72
static int freshen_file(const char *fn)
73
0
{
74
0
  return !utime(fn, NULL);
75
0
}
76
77
/*
78
 * All of the check_and_freshen functions return 1 if the file exists and was
79
 * freshened (if freshening was requested), 0 otherwise. If they return
80
 * 0, you should not assume that it is safe to skip a write of the object (it
81
 * either does not exist on disk, or has a stale mtime and may be subject to
82
 * pruning).
83
 */
84
int check_and_freshen_file(const char *fn, int freshen)
85
0
{
86
0
  if (access(fn, F_OK))
87
0
    return 0;
88
0
  if (freshen && !freshen_file(fn))
89
0
    return 0;
90
0
  return 1;
91
0
}
92
93
static int check_and_freshen_source(struct odb_source *source,
94
            const struct object_id *oid,
95
            int freshen)
96
0
{
97
0
  static struct strbuf path = STRBUF_INIT;
98
0
  odb_loose_path(source, &path, oid);
99
0
  return check_and_freshen_file(path.buf, freshen);
100
0
}
101
102
int odb_source_loose_has_object(struct odb_source *source,
103
        const struct object_id *oid)
104
0
{
105
0
  return check_and_freshen_source(source, oid, 0);
106
0
}
107
108
int format_object_header(char *str, size_t size, enum object_type type,
109
       size_t objsize)
110
0
{
111
0
  const char *name = type_name(type);
112
113
0
  if (!name)
114
0
    BUG("could not get a type name for 'enum object_type' value %d", type);
115
116
0
  return xsnprintf(str, size, "%s %"PRIuMAX, name, (uintmax_t)objsize) + 1;
117
0
}
118
119
int check_object_signature(struct repository *r, const struct object_id *oid,
120
         void *buf, unsigned long size,
121
         enum object_type type)
122
0
{
123
0
  const struct git_hash_algo *algo =
124
0
    oid->algo ? &hash_algos[oid->algo] : r->hash_algo;
125
0
  struct object_id real_oid;
126
127
0
  hash_object_file(algo, buf, size, type, &real_oid);
128
129
0
  return !oideq(oid, &real_oid) ? -1 : 0;
130
0
}
131
132
int stream_object_signature(struct repository *r, const struct object_id *oid)
133
0
{
134
0
  struct object_id real_oid;
135
0
  struct odb_read_stream *st;
136
0
  struct git_hash_ctx c;
137
0
  char hdr[MAX_HEADER_LEN];
138
0
  int hdrlen;
139
140
0
  st = odb_read_stream_open(r->objects, oid, NULL);
141
0
  if (!st)
142
0
    return -1;
143
144
  /* Generate the header */
145
0
  hdrlen = format_object_header(hdr, sizeof(hdr), st->type, st->size);
146
147
  /* Sha1.. */
148
0
  r->hash_algo->init_fn(&c);
149
0
  git_hash_update(&c, hdr, hdrlen);
150
0
  for (;;) {
151
0
    char buf[1024 * 16];
152
0
    ssize_t readlen = odb_read_stream_read(st, buf, sizeof(buf));
153
154
0
    if (readlen < 0) {
155
0
      odb_read_stream_close(st);
156
0
      return -1;
157
0
    }
158
0
    if (!readlen)
159
0
      break;
160
0
    git_hash_update(&c, buf, readlen);
161
0
  }
162
0
  git_hash_final_oid(&real_oid, &c);
163
0
  odb_read_stream_close(st);
164
0
  return !oideq(oid, &real_oid) ? -1 : 0;
165
0
}
166
167
/*
168
 * Find "oid" as a loose object in given source.
169
 * Returns 0 on success, negative on failure.
170
 *
171
 * The "path" out-parameter will give the path of the object we found (if any).
172
 * Note that it may point to static storage and is only valid until another
173
 * call to stat_loose_object().
174
 */
175
static int stat_loose_object(struct odb_source_loose *loose,
176
           const struct object_id *oid,
177
           struct stat *st, const char **path)
178
0
{
179
0
  static struct strbuf buf = STRBUF_INIT;
180
181
0
  *path = odb_loose_path(loose->source, &buf, oid);
182
0
  if (!lstat(*path, st))
183
0
    return 0;
184
185
0
  return -1;
186
0
}
187
188
/*
189
 * Like stat_loose_object(), but actually open the object and return the
190
 * descriptor. See the caveats on the "path" parameter above.
191
 */
192
static int open_loose_object(struct odb_source_loose *loose,
193
           const struct object_id *oid, const char **path)
194
0
{
195
0
  static struct strbuf buf = STRBUF_INIT;
196
0
  int fd;
197
198
0
  *path = odb_loose_path(loose->source, &buf, oid);
199
0
  fd = git_open(*path);
200
0
  if (fd >= 0)
201
0
    return fd;
202
203
0
  return -1;
204
0
}
205
206
static int quick_has_loose(struct odb_source_loose *loose,
207
         const struct object_id *oid)
208
0
{
209
0
  return !!oidtree_contains(odb_source_loose_cache(loose->source, oid), oid);
210
0
}
211
212
/*
213
 * Map and close the given loose object fd. The path argument is used for
214
 * error reporting.
215
 */
216
static void *map_fd(int fd, const char *path, unsigned long *size)
217
0
{
218
0
  void *map = NULL;
219
0
  struct stat st;
220
221
0
  if (!fstat(fd, &st)) {
222
0
    *size = xsize_t(st.st_size);
223
0
    if (!*size) {
224
      /* mmap() is forbidden on empty files */
225
0
      error(_("object file %s is empty"), path);
226
0
      close(fd);
227
0
      return NULL;
228
0
    }
229
0
    map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
230
0
  }
231
0
  close(fd);
232
0
  return map;
233
0
}
234
235
static void *odb_source_loose_map_object(struct odb_source *source,
236
           const struct object_id *oid,
237
           unsigned long *size)
238
0
{
239
0
  const char *p;
240
0
  int fd = open_loose_object(source->loose, oid, &p);
241
242
0
  if (fd < 0)
243
0
    return NULL;
244
0
  return map_fd(fd, p, size);
245
0
}
246
247
enum unpack_loose_header_result {
248
  ULHR_OK,
249
  ULHR_BAD,
250
  ULHR_TOO_LONG,
251
};
252
253
/**
254
 * unpack_loose_header() initializes the data stream needed to unpack
255
 * a loose object header.
256
 *
257
 * Returns:
258
 *
259
 * - ULHR_OK on success
260
 * - ULHR_BAD on error
261
 * - ULHR_TOO_LONG if the header was too long
262
 *
263
 * It will only parse up to MAX_HEADER_LEN bytes.
264
 */
265
static enum unpack_loose_header_result unpack_loose_header(git_zstream *stream,
266
                 unsigned char *map,
267
                 unsigned long mapsize,
268
                 void *buffer,
269
                 unsigned long bufsiz)
270
0
{
271
0
  int status;
272
273
  /* Get the data stream */
274
0
  memset(stream, 0, sizeof(*stream));
275
0
  stream->next_in = map;
276
0
  stream->avail_in = mapsize;
277
0
  stream->next_out = buffer;
278
0
  stream->avail_out = bufsiz;
279
280
0
  git_inflate_init(stream);
281
0
  obj_read_unlock();
282
0
  status = git_inflate(stream, 0);
283
0
  obj_read_lock();
284
0
  if (status != Z_OK && status != Z_STREAM_END)
285
0
    return ULHR_BAD;
286
287
  /*
288
   * Check if entire header is unpacked in the first iteration.
289
   */
290
0
  if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
291
0
    return ULHR_OK;
292
293
  /*
294
   * We have a header longer than MAX_HEADER_LEN.
295
   */
296
0
  return ULHR_TOO_LONG;
297
0
}
298
299
static void *unpack_loose_rest(git_zstream *stream,
300
             void *buffer, unsigned long size,
301
             const struct object_id *oid)
302
0
{
303
0
  size_t bytes = strlen(buffer) + 1, n;
304
0
  unsigned char *buf = xmallocz(size);
305
0
  int status = Z_OK;
306
307
0
  n = stream->total_out - bytes;
308
0
  if (n > size)
309
0
    n = size;
310
0
  memcpy(buf, (char *) buffer + bytes, n);
311
0
  bytes = n;
312
0
  if (bytes <= size) {
313
    /*
314
     * The above condition must be (bytes <= size), not
315
     * (bytes < size).  In other words, even though we
316
     * expect no more output and set avail_out to zero,
317
     * the input zlib stream may have bytes that express
318
     * "this concludes the stream", and we *do* want to
319
     * eat that input.
320
     *
321
     * Otherwise we would not be able to test that we
322
     * consumed all the input to reach the expected size;
323
     * we also want to check that zlib tells us that all
324
     * went well with status == Z_STREAM_END at the end.
325
     */
326
0
    stream->next_out = buf + bytes;
327
0
    stream->avail_out = size - bytes;
328
0
    while (status == Z_OK) {
329
0
      obj_read_unlock();
330
0
      status = git_inflate(stream, Z_FINISH);
331
0
      obj_read_lock();
332
0
    }
333
0
  }
334
335
0
  if (status != Z_STREAM_END) {
336
0
    error(_("corrupt loose object '%s'"), oid_to_hex(oid));
337
0
    FREE_AND_NULL(buf);
338
0
  } else if (stream->avail_in) {
339
0
    error(_("garbage at end of loose object '%s'"),
340
0
          oid_to_hex(oid));
341
0
    FREE_AND_NULL(buf);
342
0
  }
343
344
0
  return buf;
345
0
}
346
347
/*
348
 * parse_loose_header() parses the starting "<type> <len>\0" of an
349
 * object. If it doesn't follow that format -1 is returned. To check
350
 * the validity of the <type> populate the "typep" in the "struct
351
 * object_info". It will be OBJ_BAD if the object type is unknown. The
352
 * parsed <len> can be retrieved via "oi->sizep", and from there
353
 * passed to unpack_loose_rest().
354
 *
355
 * We used to just use "sscanf()", but that's actually way
356
 * too permissive for what we want to check. So do an anal
357
 * object header parse by hand.
358
 */
359
static int parse_loose_header(const char *hdr, struct object_info *oi)
360
0
{
361
0
  const char *type_buf = hdr;
362
0
  size_t size;
363
0
  int type, type_len = 0;
364
365
  /*
366
   * The type can be of any size but is followed by
367
   * a space.
368
   */
369
0
  for (;;) {
370
0
    char c = *hdr++;
371
0
    if (!c)
372
0
      return -1;
373
0
    if (c == ' ')
374
0
      break;
375
0
    type_len++;
376
0
  }
377
378
0
  type = type_from_string_gently(type_buf, type_len, 1);
379
0
  if (oi->typep)
380
0
    *oi->typep = type;
381
382
  /*
383
   * The length must follow immediately, and be in canonical
384
   * decimal format (ie "010" is not valid).
385
   */
386
0
  size = *hdr++ - '0';
387
0
  if (size > 9)
388
0
    return -1;
389
0
  if (size) {
390
0
    for (;;) {
391
0
      unsigned long c = *hdr - '0';
392
0
      if (c > 9)
393
0
        break;
394
0
      hdr++;
395
0
      size = st_add(st_mult(size, 10), c);
396
0
    }
397
0
  }
398
399
0
  if (oi->sizep)
400
0
    *oi->sizep = cast_size_t_to_ulong(size);
401
402
  /*
403
   * The length must be followed by a zero byte
404
   */
405
0
  if (*hdr)
406
0
    return -1;
407
408
  /*
409
   * The format is valid, but the type may still be bogus. The
410
   * Caller needs to check its oi->typep.
411
   */
412
0
  return 0;
413
0
}
414
415
int odb_source_loose_read_object_info(struct odb_source *source,
416
              const struct object_id *oid,
417
              struct object_info *oi, int flags)
418
0
{
419
0
  int status = 0;
420
0
  int fd;
421
0
  unsigned long mapsize;
422
0
  const char *path;
423
0
  void *map;
424
0
  git_zstream stream;
425
0
  char hdr[MAX_HEADER_LEN];
426
0
  unsigned long size_scratch;
427
0
  enum object_type type_scratch;
428
429
0
  if (oi && oi->delta_base_oid)
430
0
    oidclr(oi->delta_base_oid, source->odb->repo->hash_algo);
431
432
  /*
433
   * If we don't care about type or size, then we don't
434
   * need to look inside the object at all. Note that we
435
   * do not optimize out the stat call, even if the
436
   * caller doesn't care about the disk-size, since our
437
   * return value implicitly indicates whether the
438
   * object even exists.
439
   */
440
0
  if (!oi || (!oi->typep && !oi->sizep && !oi->contentp)) {
441
0
    struct stat st;
442
0
    if ((!oi || !oi->disk_sizep) && (flags & OBJECT_INFO_QUICK))
443
0
      return quick_has_loose(source->loose, oid) ? 0 : -1;
444
0
    if (stat_loose_object(source->loose, oid, &st, &path) < 0)
445
0
      return -1;
446
0
    if (oi && oi->disk_sizep)
447
0
      *oi->disk_sizep = st.st_size;
448
0
    return 0;
449
0
  }
450
451
0
  fd = open_loose_object(source->loose, oid, &path);
452
0
  if (fd < 0) {
453
0
    if (errno != ENOENT)
454
0
      error_errno(_("unable to open loose object %s"), oid_to_hex(oid));
455
0
    return -1;
456
0
  }
457
0
  map = map_fd(fd, path, &mapsize);
458
0
  if (!map)
459
0
    return -1;
460
461
0
  if (!oi->sizep)
462
0
    oi->sizep = &size_scratch;
463
0
  if (!oi->typep)
464
0
    oi->typep = &type_scratch;
465
466
0
  if (oi->disk_sizep)
467
0
    *oi->disk_sizep = mapsize;
468
469
0
  switch (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr))) {
470
0
  case ULHR_OK:
471
0
    if (parse_loose_header(hdr, oi) < 0)
472
0
      status = error(_("unable to parse %s header"), oid_to_hex(oid));
473
0
    else if (*oi->typep < 0)
474
0
      die(_("invalid object type"));
475
476
0
    if (!oi->contentp)
477
0
      break;
478
0
    *oi->contentp = unpack_loose_rest(&stream, hdr, *oi->sizep, oid);
479
0
    if (*oi->contentp)
480
0
      goto cleanup;
481
482
0
    status = -1;
483
0
    break;
484
0
  case ULHR_BAD:
485
0
    status = error(_("unable to unpack %s header"),
486
0
             oid_to_hex(oid));
487
0
    break;
488
0
  case ULHR_TOO_LONG:
489
0
    status = error(_("header for %s too long, exceeds %d bytes"),
490
0
             oid_to_hex(oid), MAX_HEADER_LEN);
491
0
    break;
492
0
  }
493
494
0
  if (status && (flags & OBJECT_INFO_DIE_IF_CORRUPT))
495
0
    die(_("loose object %s (stored in %s) is corrupt"),
496
0
        oid_to_hex(oid), path);
497
498
0
cleanup:
499
0
  git_inflate_end(&stream);
500
0
  munmap(map, mapsize);
501
0
  if (oi->sizep == &size_scratch)
502
0
    oi->sizep = NULL;
503
0
  if (oi->typep == &type_scratch)
504
0
    oi->typep = NULL;
505
0
  oi->whence = OI_LOOSE;
506
0
  return status;
507
0
}
508
509
static void hash_object_body(const struct git_hash_algo *algo, struct git_hash_ctx *c,
510
           const void *buf, unsigned long len,
511
           struct object_id *oid,
512
           char *hdr, int *hdrlen)
513
0
{
514
0
  algo->init_fn(c);
515
0
  git_hash_update(c, hdr, *hdrlen);
516
0
  git_hash_update(c, buf, len);
517
0
  git_hash_final_oid(oid, c);
518
0
}
519
520
static void write_object_file_prepare(const struct git_hash_algo *algo,
521
              const void *buf, unsigned long len,
522
              enum object_type type, struct object_id *oid,
523
              char *hdr, int *hdrlen)
524
0
{
525
0
  struct git_hash_ctx c;
526
527
  /* Generate the header */
528
0
  *hdrlen = format_object_header(hdr, *hdrlen, type, len);
529
530
  /* Sha1.. */
531
0
  hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen);
532
0
}
533
534
0
#define CHECK_COLLISION_DEST_VANISHED -2
535
536
static int check_collision(const char *source, const char *dest)
537
0
{
538
0
  char buf_source[4096], buf_dest[4096];
539
0
  int fd_source = -1, fd_dest = -1;
540
0
  int ret = 0;
541
542
0
  fd_source = open(source, O_RDONLY);
543
0
  if (fd_source < 0) {
544
0
    ret = error_errno(_("unable to open %s"), source);
545
0
    goto out;
546
0
  }
547
548
0
  fd_dest = open(dest, O_RDONLY);
549
0
  if (fd_dest < 0) {
550
0
    if (errno != ENOENT)
551
0
      ret = error_errno(_("unable to open %s"), dest);
552
0
    else
553
0
      ret = CHECK_COLLISION_DEST_VANISHED;
554
0
    goto out;
555
0
  }
556
557
0
  while (1) {
558
0
    ssize_t sz_a, sz_b;
559
560
0
    sz_a = read_in_full(fd_source, buf_source, sizeof(buf_source));
561
0
    if (sz_a < 0) {
562
0
      ret = error_errno(_("unable to read %s"), source);
563
0
      goto out;
564
0
    }
565
566
0
    sz_b = read_in_full(fd_dest, buf_dest, sizeof(buf_dest));
567
0
    if (sz_b < 0) {
568
0
      ret = error_errno(_("unable to read %s"), dest);
569
0
      goto out;
570
0
    }
571
572
0
    if (sz_a != sz_b || memcmp(buf_source, buf_dest, sz_a)) {
573
0
      ret = error(_("files '%s' and '%s' differ in contents"),
574
0
            source, dest);
575
0
      goto out;
576
0
    }
577
578
0
    if ((size_t) sz_a < sizeof(buf_source))
579
0
      break;
580
0
  }
581
582
0
out:
583
0
  if (fd_source > -1)
584
0
    close(fd_source);
585
0
  if (fd_dest > -1)
586
0
    close(fd_dest);
587
0
  return ret;
588
0
}
589
590
/*
591
 * Move the just written object into its final resting place.
592
 */
593
int finalize_object_file(struct repository *repo,
594
       const char *tmpfile, const char *filename)
595
0
{
596
0
  return finalize_object_file_flags(repo, tmpfile, filename, 0);
597
0
}
598
599
int finalize_object_file_flags(struct repository *repo,
600
             const char *tmpfile, const char *filename,
601
             enum finalize_object_file_flags flags)
602
0
{
603
0
  unsigned retries = 0;
604
0
  int ret;
605
606
0
retry:
607
0
  ret = 0;
608
609
0
  if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
610
0
    goto try_rename;
611
0
  else if (link(tmpfile, filename))
612
0
    ret = errno;
613
0
  else
614
0
    unlink_or_warn(tmpfile);
615
616
  /*
617
   * Coda hack - coda doesn't like cross-directory links,
618
   * so we fall back to a rename, which will mean that it
619
   * won't be able to check collisions, but that's not a
620
   * big deal.
621
   *
622
   * The same holds for FAT formatted media.
623
   *
624
   * When this succeeds, we just return.  We have nothing
625
   * left to unlink.
626
   */
627
0
  if (ret && ret != EEXIST) {
628
0
    struct stat st;
629
630
0
  try_rename:
631
0
    if (!stat(filename, &st))
632
0
      ret = EEXIST;
633
0
    else if (!rename(tmpfile, filename))
634
0
      goto out;
635
0
    else
636
0
      ret = errno;
637
0
  }
638
0
  if (ret) {
639
0
    if (ret != EEXIST) {
640
0
      int saved_errno = errno;
641
0
      unlink_or_warn(tmpfile);
642
0
      errno = saved_errno;
643
0
      return error_errno(_("unable to write file %s"), filename);
644
0
    }
645
0
    if (!(flags & FOF_SKIP_COLLISION_CHECK)) {
646
0
      ret = check_collision(tmpfile, filename);
647
0
      if (ret == CHECK_COLLISION_DEST_VANISHED) {
648
0
        if (retries++ > 5)
649
0
          return error(_("unable to write repeatedly vanishing file %s"),
650
0
                 filename);
651
0
        goto retry;
652
0
      }
653
0
      else if (ret)
654
0
        return -1;
655
0
    }
656
0
    unlink_or_warn(tmpfile);
657
0
  }
658
659
0
out:
660
0
  if (adjust_shared_perm(repo, filename))
661
0
    return error(_("unable to set permission to '%s'"), filename);
662
0
  return 0;
663
0
}
664
665
void hash_object_file(const struct git_hash_algo *algo, const void *buf,
666
          unsigned long len, enum object_type type,
667
          struct object_id *oid)
668
0
{
669
0
  char hdr[MAX_HEADER_LEN];
670
0
  int hdrlen = sizeof(hdr);
671
672
0
  write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
673
0
}
674
675
struct transaction_packfile {
676
  char *pack_tmp_name;
677
  struct hashfile *f;
678
  off_t offset;
679
  struct pack_idx_option pack_idx_opts;
680
681
  struct pack_idx_entry **written;
682
  uint32_t alloc_written;
683
  uint32_t nr_written;
684
};
685
686
struct odb_transaction {
687
  struct object_database *odb;
688
689
  struct tmp_objdir *objdir;
690
  struct transaction_packfile packfile;
691
};
692
693
static void prepare_loose_object_transaction(struct odb_transaction *transaction)
694
0
{
695
  /*
696
   * We lazily create the temporary object directory
697
   * the first time an object might be added, since
698
   * callers may not know whether any objects will be
699
   * added at the time they call object_file_transaction_begin.
700
   */
701
0
  if (!transaction || transaction->objdir)
702
0
    return;
703
704
0
  transaction->objdir = tmp_objdir_create(transaction->odb->repo, "bulk-fsync");
705
0
  if (transaction->objdir)
706
0
    tmp_objdir_replace_primary_odb(transaction->objdir, 0);
707
0
}
708
709
static void fsync_loose_object_transaction(struct odb_transaction *transaction,
710
             int fd, const char *filename)
711
0
{
712
  /*
713
   * If we have an active ODB transaction, we issue a call that
714
   * cleans the filesystem page cache but avoids a hardware flush
715
   * command. Later on we will issue a single hardware flush
716
   * before renaming the objects to their final names as part of
717
   * flush_batch_fsync.
718
   */
719
0
  if (!transaction || !transaction->objdir ||
720
0
      git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
721
0
    if (errno == ENOSYS)
722
0
      warning(_("core.fsyncMethod = batch is unsupported on this platform"));
723
0
    fsync_or_die(fd, filename);
724
0
  }
725
0
}
726
727
/*
728
 * Cleanup after batch-mode fsync_object_files.
729
 */
730
static void flush_loose_object_transaction(struct odb_transaction *transaction)
731
0
{
732
0
  struct strbuf temp_path = STRBUF_INIT;
733
0
  struct tempfile *temp;
734
735
0
  if (!transaction->objdir)
736
0
    return;
737
738
  /*
739
   * Issue a full hardware flush against a temporary file to ensure
740
   * that all objects are durable before any renames occur. The code in
741
   * fsync_loose_object_transaction has already issued a writeout
742
   * request, but it has not flushed any writeback cache in the storage
743
   * hardware or any filesystem logs. This fsync call acts as a barrier
744
   * to ensure that the data in each new object file is durable before
745
   * the final name is visible.
746
   */
747
0
  strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX",
748
0
        repo_get_object_directory(transaction->odb->repo));
749
0
  temp = xmks_tempfile(temp_path.buf);
750
0
  fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
751
0
  delete_tempfile(&temp);
752
0
  strbuf_release(&temp_path);
753
754
  /*
755
   * Make the object files visible in the primary ODB after their data is
756
   * fully durable.
757
   */
758
0
  tmp_objdir_migrate(transaction->objdir);
759
0
  transaction->objdir = NULL;
760
0
}
761
762
/* Finalize a file on disk, and close it. */
763
static void close_loose_object(struct odb_source *source,
764
             int fd, const char *filename)
765
0
{
766
0
  if (source->will_destroy)
767
0
    goto out;
768
769
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
770
0
    fsync_loose_object_transaction(source->odb->transaction, fd, filename);
771
0
  else if (fsync_object_files > 0)
772
0
    fsync_or_die(fd, filename);
773
0
  else
774
0
    fsync_component_or_die(FSYNC_COMPONENT_LOOSE_OBJECT, fd,
775
0
               filename);
776
777
0
out:
778
0
  if (close(fd) != 0)
779
0
    die_errno(_("error when closing loose object file"));
780
0
}
781
782
/* Size of directory component, including the ending '/' */
783
static inline int directory_size(const char *filename)
784
0
{
785
0
  const char *s = strrchr(filename, '/');
786
0
  if (!s)
787
0
    return 0;
788
0
  return s - filename + 1;
789
0
}
790
791
/*
792
 * This creates a temporary file in the same directory as the final
793
 * 'filename'
794
 *
795
 * We want to avoid cross-directory filename renames, because those
796
 * can have problems on various filesystems (FAT, NFS, Coda).
797
 */
798
static int create_tmpfile(struct repository *repo,
799
        struct strbuf *tmp, const char *filename)
800
0
{
801
0
  int fd, dirlen = directory_size(filename);
802
803
0
  strbuf_reset(tmp);
804
0
  strbuf_add(tmp, filename, dirlen);
805
0
  strbuf_addstr(tmp, "tmp_obj_XXXXXX");
806
0
  fd = git_mkstemp_mode(tmp->buf, 0444);
807
0
  if (fd < 0 && dirlen && errno == ENOENT) {
808
    /*
809
     * Make sure the directory exists; note that the contents
810
     * of the buffer are undefined after mkstemp returns an
811
     * error, so we have to rewrite the whole buffer from
812
     * scratch.
813
     */
814
0
    strbuf_reset(tmp);
815
0
    strbuf_add(tmp, filename, dirlen - 1);
816
0
    if (mkdir(tmp->buf, 0777) && errno != EEXIST)
817
0
      return -1;
818
0
    if (adjust_shared_perm(repo, tmp->buf))
819
0
      return -1;
820
821
    /* Try again */
822
0
    strbuf_addstr(tmp, "/tmp_obj_XXXXXX");
823
0
    fd = git_mkstemp_mode(tmp->buf, 0444);
824
0
  }
825
0
  return fd;
826
0
}
827
828
/**
829
 * Common steps for loose object writers to start writing loose
830
 * objects:
831
 *
832
 * - Create tmpfile for the loose object.
833
 * - Setup zlib stream for compression.
834
 * - Start to feed header to zlib stream.
835
 *
836
 * Returns a "fd", which should later be provided to
837
 * end_loose_object_common().
838
 */
839
static int start_loose_object_common(struct odb_source *source,
840
             struct strbuf *tmp_file,
841
             const char *filename, unsigned flags,
842
             git_zstream *stream,
843
             unsigned char *buf, size_t buflen,
844
             struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
845
             char *hdr, int hdrlen)
846
0
{
847
0
  const struct git_hash_algo *algo = source->odb->repo->hash_algo;
848
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
849
0
  int fd;
850
851
0
  fd = create_tmpfile(source->odb->repo, tmp_file, filename);
852
0
  if (fd < 0) {
853
0
    if (flags & WRITE_OBJECT_SILENT)
854
0
      return -1;
855
0
    else if (errno == EACCES)
856
0
      return error(_("insufficient permission for adding "
857
0
               "an object to repository database %s"),
858
0
             source->path);
859
0
    else
860
0
      return error_errno(
861
0
        _("unable to create temporary file"));
862
0
  }
863
864
  /*  Setup zlib stream for compression */
865
0
  git_deflate_init(stream, zlib_compression_level);
866
0
  stream->next_out = buf;
867
0
  stream->avail_out = buflen;
868
0
  algo->init_fn(c);
869
0
  if (compat && compat_c)
870
0
    compat->init_fn(compat_c);
871
872
  /*  Start to feed header to zlib stream */
873
0
  stream->next_in = (unsigned char *)hdr;
874
0
  stream->avail_in = hdrlen;
875
0
  while (git_deflate(stream, 0) == Z_OK)
876
0
    ; /* nothing */
877
0
  git_hash_update(c, hdr, hdrlen);
878
0
  if (compat && compat_c)
879
0
    git_hash_update(compat_c, hdr, hdrlen);
880
881
0
  return fd;
882
0
}
883
884
/**
885
 * Common steps for the inner git_deflate() loop for writing loose
886
 * objects. Returns what git_deflate() returns.
887
 */
888
static int write_loose_object_common(struct odb_source *source,
889
             struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
890
             git_zstream *stream, const int flush,
891
             unsigned char *in0, const int fd,
892
             unsigned char *compressed,
893
             const size_t compressed_len)
894
0
{
895
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
896
0
  int ret;
897
898
0
  ret = git_deflate(stream, flush ? Z_FINISH : 0);
899
0
  git_hash_update(c, in0, stream->next_in - in0);
900
0
  if (compat && compat_c)
901
0
    git_hash_update(compat_c, in0, stream->next_in - in0);
902
0
  if (write_in_full(fd, compressed, stream->next_out - compressed) < 0)
903
0
    die_errno(_("unable to write loose object file"));
904
0
  stream->next_out = compressed;
905
0
  stream->avail_out = compressed_len;
906
907
0
  return ret;
908
0
}
909
910
/**
911
 * Common steps for loose object writers to end writing loose objects:
912
 *
913
 * - End the compression of zlib stream.
914
 * - Get the calculated oid to "oid".
915
 */
916
static int end_loose_object_common(struct odb_source *source,
917
           struct git_hash_ctx *c, struct git_hash_ctx *compat_c,
918
           git_zstream *stream, struct object_id *oid,
919
           struct object_id *compat_oid)
920
0
{
921
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
922
0
  int ret;
923
924
0
  ret = git_deflate_end_gently(stream);
925
0
  if (ret != Z_OK)
926
0
    return ret;
927
0
  git_hash_final_oid(oid, c);
928
0
  if (compat && compat_c)
929
0
    git_hash_final_oid(compat_oid, compat_c);
930
931
0
  return Z_OK;
932
0
}
933
934
static int write_loose_object(struct odb_source *source,
935
            const struct object_id *oid, char *hdr,
936
            int hdrlen, const void *buf, unsigned long len,
937
            time_t mtime, unsigned flags)
938
0
{
939
0
  int fd, ret;
940
0
  unsigned char compressed[4096];
941
0
  git_zstream stream;
942
0
  struct git_hash_ctx c;
943
0
  struct object_id parano_oid;
944
0
  static struct strbuf tmp_file = STRBUF_INIT;
945
0
  static struct strbuf filename = STRBUF_INIT;
946
947
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
948
0
    prepare_loose_object_transaction(source->odb->transaction);
949
950
0
  odb_loose_path(source, &filename, oid);
951
952
0
  fd = start_loose_object_common(source, &tmp_file, filename.buf, flags,
953
0
               &stream, compressed, sizeof(compressed),
954
0
               &c, NULL, hdr, hdrlen);
955
0
  if (fd < 0)
956
0
    return -1;
957
958
  /* Then the data itself.. */
959
0
  stream.next_in = (void *)buf;
960
0
  stream.avail_in = len;
961
0
  do {
962
0
    unsigned char *in0 = stream.next_in;
963
964
0
    ret = write_loose_object_common(source, &c, NULL, &stream, 1, in0, fd,
965
0
            compressed, sizeof(compressed));
966
0
  } while (ret == Z_OK);
967
968
0
  if (ret != Z_STREAM_END)
969
0
    die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
970
0
        ret);
971
0
  ret = end_loose_object_common(source, &c, NULL, &stream, &parano_oid, NULL);
972
0
  if (ret != Z_OK)
973
0
    die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
974
0
        ret);
975
0
  if (!oideq(oid, &parano_oid))
976
0
    die(_("confused by unstable object source data for %s"),
977
0
        oid_to_hex(oid));
978
979
0
  close_loose_object(source, fd, tmp_file.buf);
980
981
0
  if (mtime) {
982
0
    struct utimbuf utb;
983
0
    utb.actime = mtime;
984
0
    utb.modtime = mtime;
985
0
    if (utime(tmp_file.buf, &utb) < 0 &&
986
0
        !(flags & WRITE_OBJECT_SILENT))
987
0
      warning_errno(_("failed utime() on %s"), tmp_file.buf);
988
0
  }
989
990
0
  return finalize_object_file_flags(source->odb->repo, tmp_file.buf, filename.buf,
991
0
            FOF_SKIP_COLLISION_CHECK);
992
0
}
993
994
int odb_source_loose_freshen_object(struct odb_source *source,
995
            const struct object_id *oid)
996
0
{
997
0
  return !!check_and_freshen_source(source, oid, 1);
998
0
}
999
1000
int odb_source_loose_write_stream(struct odb_source *source,
1001
          struct odb_write_stream *in_stream, size_t len,
1002
          struct object_id *oid)
1003
0
{
1004
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
1005
0
  struct object_id compat_oid;
1006
0
  int fd, ret, err = 0, flush = 0;
1007
0
  unsigned char compressed[4096];
1008
0
  git_zstream stream;
1009
0
  struct git_hash_ctx c, compat_c;
1010
0
  struct strbuf tmp_file = STRBUF_INIT;
1011
0
  struct strbuf filename = STRBUF_INIT;
1012
0
  int dirlen;
1013
0
  char hdr[MAX_HEADER_LEN];
1014
0
  int hdrlen;
1015
1016
0
  if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
1017
0
    prepare_loose_object_transaction(source->odb->transaction);
1018
1019
  /* Since oid is not determined, save tmp file to odb path. */
1020
0
  strbuf_addf(&filename, "%s/", source->path);
1021
0
  hdrlen = format_object_header(hdr, sizeof(hdr), OBJ_BLOB, len);
1022
1023
  /*
1024
   * Common steps for write_loose_object and stream_loose_object to
1025
   * start writing loose objects:
1026
   *
1027
   *  - Create tmpfile for the loose object.
1028
   *  - Setup zlib stream for compression.
1029
   *  - Start to feed header to zlib stream.
1030
   */
1031
0
  fd = start_loose_object_common(source, &tmp_file, filename.buf, 0,
1032
0
               &stream, compressed, sizeof(compressed),
1033
0
               &c, &compat_c, hdr, hdrlen);
1034
0
  if (fd < 0) {
1035
0
    err = -1;
1036
0
    goto cleanup;
1037
0
  }
1038
1039
  /* Then the data itself.. */
1040
0
  do {
1041
0
    unsigned char *in0 = stream.next_in;
1042
1043
0
    if (!stream.avail_in && !in_stream->is_finished) {
1044
0
      const void *in = in_stream->read(in_stream, &stream.avail_in);
1045
0
      stream.next_in = (void *)in;
1046
0
      in0 = (unsigned char *)in;
1047
      /* All data has been read. */
1048
0
      if (in_stream->is_finished)
1049
0
        flush = 1;
1050
0
    }
1051
0
    ret = write_loose_object_common(source, &c, &compat_c, &stream, flush, in0, fd,
1052
0
            compressed, sizeof(compressed));
1053
    /*
1054
     * Unlike write_loose_object(), we do not have the entire
1055
     * buffer. If we get Z_BUF_ERROR due to too few input bytes,
1056
     * then we'll replenish them in the next input_stream->read()
1057
     * call when we loop.
1058
     */
1059
0
  } while (ret == Z_OK || ret == Z_BUF_ERROR);
1060
1061
0
  if (stream.total_in != len + hdrlen)
1062
0
    die(_("write stream object %ld != %"PRIuMAX), stream.total_in,
1063
0
        (uintmax_t)len + hdrlen);
1064
1065
  /*
1066
   * Common steps for write_loose_object and stream_loose_object to
1067
   * end writing loose object:
1068
   *
1069
   *  - End the compression of zlib stream.
1070
   *  - Get the calculated oid.
1071
   */
1072
0
  if (ret != Z_STREAM_END)
1073
0
    die(_("unable to stream deflate new object (%d)"), ret);
1074
0
  ret = end_loose_object_common(source, &c, &compat_c, &stream, oid, &compat_oid);
1075
0
  if (ret != Z_OK)
1076
0
    die(_("deflateEnd on stream object failed (%d)"), ret);
1077
0
  close_loose_object(source, fd, tmp_file.buf);
1078
1079
0
  if (odb_freshen_object(source->odb, oid)) {
1080
0
    unlink_or_warn(tmp_file.buf);
1081
0
    goto cleanup;
1082
0
  }
1083
0
  odb_loose_path(source, &filename, oid);
1084
1085
  /* We finally know the object path, and create the missing dir. */
1086
0
  dirlen = directory_size(filename.buf);
1087
0
  if (dirlen) {
1088
0
    struct strbuf dir = STRBUF_INIT;
1089
0
    strbuf_add(&dir, filename.buf, dirlen);
1090
1091
0
    if (safe_create_dir_in_gitdir(source->odb->repo, dir.buf) &&
1092
0
        errno != EEXIST) {
1093
0
      err = error_errno(_("unable to create directory %s"), dir.buf);
1094
0
      strbuf_release(&dir);
1095
0
      goto cleanup;
1096
0
    }
1097
0
    strbuf_release(&dir);
1098
0
  }
1099
1100
0
  err = finalize_object_file_flags(source->odb->repo, tmp_file.buf, filename.buf,
1101
0
           FOF_SKIP_COLLISION_CHECK);
1102
0
  if (!err && compat)
1103
0
    err = repo_add_loose_object_map(source, oid, &compat_oid);
1104
0
cleanup:
1105
0
  strbuf_release(&tmp_file);
1106
0
  strbuf_release(&filename);
1107
0
  return err;
1108
0
}
1109
1110
int odb_source_loose_write_object(struct odb_source *source,
1111
          const void *buf, unsigned long len,
1112
          enum object_type type, struct object_id *oid,
1113
          struct object_id *compat_oid_in, unsigned flags)
1114
0
{
1115
0
  const struct git_hash_algo *algo = source->odb->repo->hash_algo;
1116
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
1117
0
  struct object_id compat_oid;
1118
0
  char hdr[MAX_HEADER_LEN];
1119
0
  int hdrlen = sizeof(hdr);
1120
1121
  /* Generate compat_oid */
1122
0
  if (compat) {
1123
0
    if (compat_oid_in)
1124
0
      oidcpy(&compat_oid, compat_oid_in);
1125
0
    else if (type == OBJ_BLOB)
1126
0
      hash_object_file(compat, buf, len, type, &compat_oid);
1127
0
    else {
1128
0
      struct strbuf converted = STRBUF_INIT;
1129
0
      convert_object_file(source->odb->repo, &converted, algo, compat,
1130
0
              buf, len, type, 0);
1131
0
      hash_object_file(compat, converted.buf, converted.len,
1132
0
           type, &compat_oid);
1133
0
      strbuf_release(&converted);
1134
0
    }
1135
0
  }
1136
1137
  /* Normally if we have it in the pack then we do not bother writing
1138
   * it out into .git/objects/??/?{38} file.
1139
   */
1140
0
  write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
1141
0
  if (odb_freshen_object(source->odb, oid))
1142
0
    return 0;
1143
0
  if (write_loose_object(source, oid, hdr, hdrlen, buf, len, 0, flags))
1144
0
    return -1;
1145
0
  if (compat)
1146
0
    return repo_add_loose_object_map(source, oid, &compat_oid);
1147
0
  return 0;
1148
0
}
1149
1150
int force_object_loose(struct odb_source *source,
1151
           const struct object_id *oid, time_t mtime)
1152
0
{
1153
0
  const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo;
1154
0
  void *buf;
1155
0
  unsigned long len;
1156
0
  struct object_info oi = OBJECT_INFO_INIT;
1157
0
  struct object_id compat_oid;
1158
0
  enum object_type type;
1159
0
  char hdr[MAX_HEADER_LEN];
1160
0
  int hdrlen;
1161
0
  int ret;
1162
1163
0
  for (struct odb_source *s = source->odb->sources; s; s = s->next)
1164
0
    if (odb_source_loose_has_object(s, oid))
1165
0
      return 0;
1166
1167
0
  oi.typep = &type;
1168
0
  oi.sizep = &len;
1169
0
  oi.contentp = &buf;
1170
0
  if (odb_read_object_info_extended(source->odb, oid, &oi, 0))
1171
0
    return error(_("cannot read object for %s"), oid_to_hex(oid));
1172
0
  if (compat) {
1173
0
    if (repo_oid_to_algop(source->odb->repo, oid, compat, &compat_oid))
1174
0
      return error(_("cannot map object %s to %s"),
1175
0
             oid_to_hex(oid), compat->name);
1176
0
  }
1177
0
  hdrlen = format_object_header(hdr, sizeof(hdr), type, len);
1178
0
  ret = write_loose_object(source, oid, hdr, hdrlen, buf, len, mtime, 0);
1179
0
  if (!ret && compat)
1180
0
    ret = repo_add_loose_object_map(source, oid, &compat_oid);
1181
0
  free(buf);
1182
1183
0
  return ret;
1184
0
}
1185
1186
/*
1187
 * We can't use the normal fsck_error_function() for index_mem(),
1188
 * because we don't yet have a valid oid for it to report. Instead,
1189
 * report the minimal fsck error here, and rely on the caller to
1190
 * give more context.
1191
 */
1192
static int hash_format_check_report(struct fsck_options *opts UNUSED,
1193
            void *fsck_report UNUSED,
1194
            enum fsck_msg_type msg_type UNUSED,
1195
            enum fsck_msg_id msg_id UNUSED,
1196
            const char *message)
1197
0
{
1198
0
  error(_("object fails fsck: %s"), message);
1199
0
  return 1;
1200
0
}
1201
1202
static int index_mem(struct index_state *istate,
1203
         struct object_id *oid,
1204
         const void *buf, size_t size,
1205
         enum object_type type,
1206
         const char *path, unsigned flags)
1207
0
{
1208
0
  struct strbuf nbuf = STRBUF_INIT;
1209
0
  int ret = 0;
1210
0
  int write_object = flags & INDEX_WRITE_OBJECT;
1211
1212
0
  if (!type)
1213
0
    type = OBJ_BLOB;
1214
1215
  /*
1216
   * Convert blobs to git internal format
1217
   */
1218
0
  if ((type == OBJ_BLOB) && path) {
1219
0
    if (convert_to_git(istate, path, buf, size, &nbuf,
1220
0
           get_conv_flags(flags))) {
1221
0
      buf = nbuf.buf;
1222
0
      size = nbuf.len;
1223
0
    }
1224
0
  }
1225
0
  if (flags & INDEX_FORMAT_CHECK) {
1226
0
    struct fsck_options opts = FSCK_OPTIONS_DEFAULT;
1227
1228
0
    opts.strict = 1;
1229
0
    opts.error_func = hash_format_check_report;
1230
0
    if (fsck_buffer(null_oid(istate->repo->hash_algo), type, buf, size, &opts))
1231
0
      die(_("refusing to create malformed object"));
1232
0
    fsck_finish(&opts);
1233
0
  }
1234
1235
0
  if (write_object)
1236
0
    ret = odb_write_object(istate->repo->objects, buf, size, type, oid);
1237
0
  else
1238
0
    hash_object_file(istate->repo->hash_algo, buf, size, type, oid);
1239
1240
0
  strbuf_release(&nbuf);
1241
0
  return ret;
1242
0
}
1243
1244
static int index_stream_convert_blob(struct index_state *istate,
1245
             struct object_id *oid,
1246
             int fd,
1247
             const char *path,
1248
             unsigned flags)
1249
0
{
1250
0
  int ret = 0;
1251
0
  const int write_object = flags & INDEX_WRITE_OBJECT;
1252
0
  struct strbuf sbuf = STRBUF_INIT;
1253
1254
0
  assert(path);
1255
0
  ASSERT(would_convert_to_git_filter_fd(istate, path));
1256
1257
0
  convert_to_git_filter_fd(istate, path, fd, &sbuf,
1258
0
         get_conv_flags(flags));
1259
1260
0
  if (write_object)
1261
0
    ret = odb_write_object(istate->repo->objects, sbuf.buf, sbuf.len, OBJ_BLOB,
1262
0
               oid);
1263
0
  else
1264
0
    hash_object_file(istate->repo->hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB,
1265
0
         oid);
1266
0
  strbuf_release(&sbuf);
1267
0
  return ret;
1268
0
}
1269
1270
static int index_pipe(struct index_state *istate, struct object_id *oid,
1271
          int fd, enum object_type type,
1272
          const char *path, unsigned flags)
1273
0
{
1274
0
  struct strbuf sbuf = STRBUF_INIT;
1275
0
  int ret;
1276
1277
0
  if (strbuf_read(&sbuf, fd, 4096) >= 0)
1278
0
    ret = index_mem(istate, oid, sbuf.buf, sbuf.len, type, path, flags);
1279
0
  else
1280
0
    ret = -1;
1281
0
  strbuf_release(&sbuf);
1282
0
  return ret;
1283
0
}
1284
1285
0
#define SMALL_FILE_SIZE (32*1024)
1286
1287
static int index_core(struct index_state *istate,
1288
          struct object_id *oid, int fd, size_t size,
1289
          enum object_type type, const char *path,
1290
          unsigned flags)
1291
0
{
1292
0
  int ret;
1293
1294
0
  if (!size) {
1295
0
    ret = index_mem(istate, oid, "", size, type, path, flags);
1296
0
  } else if (size <= SMALL_FILE_SIZE) {
1297
0
    char *buf = xmalloc(size);
1298
0
    ssize_t read_result = read_in_full(fd, buf, size);
1299
0
    if (read_result < 0)
1300
0
      ret = error_errno(_("read error while indexing %s"),
1301
0
            path ? path : "<unknown>");
1302
0
    else if ((size_t) read_result != size)
1303
0
      ret = error(_("short read while indexing %s"),
1304
0
            path ? path : "<unknown>");
1305
0
    else
1306
0
      ret = index_mem(istate, oid, buf, size, type, path, flags);
1307
0
    free(buf);
1308
0
  } else {
1309
0
    void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1310
0
    ret = index_mem(istate, oid, buf, size, type, path, flags);
1311
0
    munmap(buf, size);
1312
0
  }
1313
0
  return ret;
1314
0
}
1315
1316
static int already_written(struct odb_transaction *transaction,
1317
         struct object_id *oid)
1318
0
{
1319
  /* The object may already exist in the repository */
1320
0
  if (odb_has_object(transaction->odb, oid,
1321
0
         HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
1322
0
    return 1;
1323
1324
  /* Might want to keep the list sorted */
1325
0
  for (uint32_t i = 0; i < transaction->packfile.nr_written; i++)
1326
0
    if (oideq(&transaction->packfile.written[i]->oid, oid))
1327
0
      return 1;
1328
1329
  /* This is a new object we need to keep */
1330
0
  return 0;
1331
0
}
1332
1333
/* Lazily create backing packfile for the state */
1334
static void prepare_packfile_transaction(struct odb_transaction *transaction,
1335
           unsigned flags)
1336
0
{
1337
0
  struct transaction_packfile *state = &transaction->packfile;
1338
0
  if (!(flags & INDEX_WRITE_OBJECT) || state->f)
1339
0
    return;
1340
1341
0
  state->f = create_tmp_packfile(transaction->odb->repo,
1342
0
               &state->pack_tmp_name);
1343
0
  reset_pack_idx_option(&state->pack_idx_opts);
1344
1345
  /* Pretend we are going to write only one object */
1346
0
  state->offset = write_pack_header(state->f, 1);
1347
0
  if (!state->offset)
1348
0
    die_errno("unable to write pack header");
1349
0
}
1350
1351
/*
1352
 * Read the contents from fd for size bytes, streaming it to the
1353
 * packfile in state while updating the hash in ctx. Signal a failure
1354
 * by returning a negative value when the resulting pack would exceed
1355
 * the pack size limit and this is not the first object in the pack,
1356
 * so that the caller can discard what we wrote from the current pack
1357
 * by truncating it and opening a new one. The caller will then call
1358
 * us again after rewinding the input fd.
1359
 *
1360
 * The already_hashed_to pointer is kept untouched by the caller to
1361
 * make sure we do not hash the same byte when we are called
1362
 * again. This way, the caller does not have to checkpoint its hash
1363
 * status before calling us just in case we ask it to call us again
1364
 * with a new pack.
1365
 */
1366
static int stream_blob_to_pack(struct transaction_packfile *state,
1367
             struct git_hash_ctx *ctx, off_t *already_hashed_to,
1368
             int fd, size_t size, const char *path,
1369
             unsigned flags)
1370
0
{
1371
0
  git_zstream s;
1372
0
  unsigned char ibuf[16384];
1373
0
  unsigned char obuf[16384];
1374
0
  unsigned hdrlen;
1375
0
  int status = Z_OK;
1376
0
  int write_object = (flags & INDEX_WRITE_OBJECT);
1377
0
  off_t offset = 0;
1378
1379
0
  git_deflate_init(&s, pack_compression_level);
1380
1381
0
  hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), OBJ_BLOB, size);
1382
0
  s.next_out = obuf + hdrlen;
1383
0
  s.avail_out = sizeof(obuf) - hdrlen;
1384
1385
0
  while (status != Z_STREAM_END) {
1386
0
    if (size && !s.avail_in) {
1387
0
      size_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
1388
0
      ssize_t read_result = read_in_full(fd, ibuf, rsize);
1389
0
      if (read_result < 0)
1390
0
        die_errno("failed to read from '%s'", path);
1391
0
      if ((size_t)read_result != rsize)
1392
0
        die("failed to read %u bytes from '%s'",
1393
0
            (unsigned)rsize, path);
1394
0
      offset += rsize;
1395
0
      if (*already_hashed_to < offset) {
1396
0
        size_t hsize = offset - *already_hashed_to;
1397
0
        if (rsize < hsize)
1398
0
          hsize = rsize;
1399
0
        if (hsize)
1400
0
          git_hash_update(ctx, ibuf, hsize);
1401
0
        *already_hashed_to = offset;
1402
0
      }
1403
0
      s.next_in = ibuf;
1404
0
      s.avail_in = rsize;
1405
0
      size -= rsize;
1406
0
    }
1407
1408
0
    status = git_deflate(&s, size ? 0 : Z_FINISH);
1409
1410
0
    if (!s.avail_out || status == Z_STREAM_END) {
1411
0
      if (write_object) {
1412
0
        size_t written = s.next_out - obuf;
1413
1414
        /* would we bust the size limit? */
1415
0
        if (state->nr_written &&
1416
0
            pack_size_limit_cfg &&
1417
0
            pack_size_limit_cfg < state->offset + written) {
1418
0
          git_deflate_abort(&s);
1419
0
          return -1;
1420
0
        }
1421
1422
0
        hashwrite(state->f, obuf, written);
1423
0
        state->offset += written;
1424
0
      }
1425
0
      s.next_out = obuf;
1426
0
      s.avail_out = sizeof(obuf);
1427
0
    }
1428
1429
0
    switch (status) {
1430
0
    case Z_OK:
1431
0
    case Z_BUF_ERROR:
1432
0
    case Z_STREAM_END:
1433
0
      continue;
1434
0
    default:
1435
0
      die("unexpected deflate failure: %d", status);
1436
0
    }
1437
0
  }
1438
0
  git_deflate_end(&s);
1439
0
  return 0;
1440
0
}
1441
1442
static void flush_packfile_transaction(struct odb_transaction *transaction)
1443
0
{
1444
0
  struct transaction_packfile *state = &transaction->packfile;
1445
0
  struct repository *repo = transaction->odb->repo;
1446
0
  unsigned char hash[GIT_MAX_RAWSZ];
1447
0
  struct strbuf packname = STRBUF_INIT;
1448
0
  char *idx_tmp_name = NULL;
1449
1450
0
  if (!state->f)
1451
0
    return;
1452
1453
0
  if (state->nr_written == 0) {
1454
0
    close(state->f->fd);
1455
0
    free_hashfile(state->f);
1456
0
    unlink(state->pack_tmp_name);
1457
0
    goto clear_exit;
1458
0
  } else if (state->nr_written == 1) {
1459
0
    finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK,
1460
0
          CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
1461
0
  } else {
1462
0
    int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
1463
0
    fixup_pack_header_footer(repo->hash_algo, fd, hash, state->pack_tmp_name,
1464
0
           state->nr_written, hash,
1465
0
           state->offset);
1466
0
    close(fd);
1467
0
  }
1468
1469
0
  strbuf_addf(&packname, "%s/pack/pack-%s.",
1470
0
        repo_get_object_directory(transaction->odb->repo),
1471
0
        hash_to_hex_algop(hash, repo->hash_algo));
1472
1473
0
  stage_tmp_packfiles(repo, &packname, state->pack_tmp_name,
1474
0
          state->written, state->nr_written, NULL,
1475
0
          &state->pack_idx_opts, hash, &idx_tmp_name);
1476
0
  rename_tmp_packfile_idx(repo, &packname, &idx_tmp_name);
1477
1478
0
  for (uint32_t i = 0; i < state->nr_written; i++)
1479
0
    free(state->written[i]);
1480
1481
0
clear_exit:
1482
0
  free(idx_tmp_name);
1483
0
  free(state->pack_tmp_name);
1484
0
  free(state->written);
1485
0
  memset(state, 0, sizeof(*state));
1486
1487
0
  strbuf_release(&packname);
1488
  /* Make objects we just wrote available to ourselves */
1489
0
  odb_reprepare(repo->objects);
1490
0
}
1491
1492
/*
1493
 * This writes the specified object to a packfile. Objects written here
1494
 * during the same transaction are written to the same packfile. The
1495
 * packfile is not flushed until the transaction is flushed. The caller
1496
 * is expected to ensure a valid transaction is setup for objects to be
1497
 * recorded to.
1498
 *
1499
 * This also bypasses the usual "convert-to-git" dance, and that is on
1500
 * purpose. We could write a streaming version of the converting
1501
 * functions and insert that before feeding the data to fast-import
1502
 * (or equivalent in-core API described above). However, that is
1503
 * somewhat complicated, as we do not know the size of the filter
1504
 * result, which we need to know beforehand when writing a git object.
1505
 * Since the primary motivation for trying to stream from the working
1506
 * tree file and to avoid mmaping it in core is to deal with large
1507
 * binary blobs, they generally do not want to get any conversion, and
1508
 * callers should avoid this code path when filters are requested.
1509
 */
1510
static int index_blob_packfile_transaction(struct odb_transaction *transaction,
1511
             struct object_id *result_oid, int fd,
1512
             size_t size, const char *path,
1513
             unsigned flags)
1514
0
{
1515
0
  struct transaction_packfile *state = &transaction->packfile;
1516
0
  off_t seekback, already_hashed_to;
1517
0
  struct git_hash_ctx ctx;
1518
0
  unsigned char obuf[16384];
1519
0
  unsigned header_len;
1520
0
  struct hashfile_checkpoint checkpoint;
1521
0
  struct pack_idx_entry *idx = NULL;
1522
1523
0
  seekback = lseek(fd, 0, SEEK_CUR);
1524
0
  if (seekback == (off_t)-1)
1525
0
    return error("cannot find the current offset");
1526
1527
0
  header_len = format_object_header((char *)obuf, sizeof(obuf),
1528
0
            OBJ_BLOB, size);
1529
0
  transaction->odb->repo->hash_algo->init_fn(&ctx);
1530
0
  git_hash_update(&ctx, obuf, header_len);
1531
1532
  /* Note: idx is non-NULL when we are writing */
1533
0
  if ((flags & INDEX_WRITE_OBJECT) != 0) {
1534
0
    CALLOC_ARRAY(idx, 1);
1535
1536
0
    prepare_packfile_transaction(transaction, flags);
1537
0
    hashfile_checkpoint_init(state->f, &checkpoint);
1538
0
  }
1539
1540
0
  already_hashed_to = 0;
1541
1542
0
  while (1) {
1543
0
    prepare_packfile_transaction(transaction, flags);
1544
0
    if (idx) {
1545
0
      hashfile_checkpoint(state->f, &checkpoint);
1546
0
      idx->offset = state->offset;
1547
0
      crc32_begin(state->f);
1548
0
    }
1549
0
    if (!stream_blob_to_pack(state, &ctx, &already_hashed_to,
1550
0
           fd, size, path, flags))
1551
0
      break;
1552
    /*
1553
     * Writing this object to the current pack will make
1554
     * it too big; we need to truncate it, start a new
1555
     * pack, and write into it.
1556
     */
1557
0
    if (!idx)
1558
0
      BUG("should not happen");
1559
0
    hashfile_truncate(state->f, &checkpoint);
1560
0
    state->offset = checkpoint.offset;
1561
0
    flush_packfile_transaction(transaction);
1562
0
    if (lseek(fd, seekback, SEEK_SET) == (off_t)-1)
1563
0
      return error("cannot seek back");
1564
0
  }
1565
0
  git_hash_final_oid(result_oid, &ctx);
1566
0
  if (!idx)
1567
0
    return 0;
1568
1569
0
  idx->crc32 = crc32_end(state->f);
1570
0
  if (already_written(transaction, result_oid)) {
1571
0
    hashfile_truncate(state->f, &checkpoint);
1572
0
    state->offset = checkpoint.offset;
1573
0
    free(idx);
1574
0
  } else {
1575
0
    oidcpy(&idx->oid, result_oid);
1576
0
    ALLOC_GROW(state->written,
1577
0
         state->nr_written + 1,
1578
0
         state->alloc_written);
1579
0
    state->written[state->nr_written++] = idx;
1580
0
  }
1581
0
  return 0;
1582
0
}
1583
1584
int index_fd(struct index_state *istate, struct object_id *oid,
1585
       int fd, struct stat *st,
1586
       enum object_type type, const char *path, unsigned flags)
1587
0
{
1588
0
  int ret;
1589
1590
  /*
1591
   * Call xsize_t() only when needed to avoid potentially unnecessary
1592
   * die() for large files.
1593
   */
1594
0
  if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(istate, path)) {
1595
0
    ret = index_stream_convert_blob(istate, oid, fd, path, flags);
1596
0
  } else if (!S_ISREG(st->st_mode)) {
1597
0
    ret = index_pipe(istate, oid, fd, type, path, flags);
1598
0
  } else if ((st->st_size >= 0 &&
1599
0
        (size_t)st->st_size <= repo_settings_get_big_file_threshold(istate->repo)) ||
1600
0
       type != OBJ_BLOB ||
1601
0
       (path && would_convert_to_git(istate, path))) {
1602
0
    ret = index_core(istate, oid, fd, xsize_t(st->st_size),
1603
0
         type, path, flags);
1604
0
  } else {
1605
0
    struct odb_transaction *transaction;
1606
1607
0
    transaction = odb_transaction_begin(the_repository->objects);
1608
0
    ret = index_blob_packfile_transaction(the_repository->objects->transaction,
1609
0
                  oid, fd,
1610
0
                  xsize_t(st->st_size),
1611
0
                  path, flags);
1612
0
    odb_transaction_commit(transaction);
1613
0
  }
1614
1615
0
  close(fd);
1616
0
  return ret;
1617
0
}
1618
1619
int index_path(struct index_state *istate, struct object_id *oid,
1620
         const char *path, struct stat *st, unsigned flags)
1621
0
{
1622
0
  int fd;
1623
0
  struct strbuf sb = STRBUF_INIT;
1624
0
  int rc = 0;
1625
1626
0
  switch (st->st_mode & S_IFMT) {
1627
0
  case S_IFREG:
1628
0
    fd = open(path, O_RDONLY);
1629
0
    if (fd < 0)
1630
0
      return error_errno("open(\"%s\")", path);
1631
0
    if (index_fd(istate, oid, fd, st, OBJ_BLOB, path, flags) < 0)
1632
0
      return error(_("%s: failed to insert into database"),
1633
0
             path);
1634
0
    break;
1635
0
  case S_IFLNK:
1636
0
    if (strbuf_readlink(&sb, path, st->st_size))
1637
0
      return error_errno("readlink(\"%s\")", path);
1638
0
    if (!(flags & INDEX_WRITE_OBJECT))
1639
0
      hash_object_file(istate->repo->hash_algo, sb.buf, sb.len,
1640
0
           OBJ_BLOB, oid);
1641
0
    else if (odb_write_object(istate->repo->objects, sb.buf, sb.len, OBJ_BLOB, oid))
1642
0
      rc = error(_("%s: failed to insert into database"), path);
1643
0
    strbuf_release(&sb);
1644
0
    break;
1645
0
  case S_IFDIR:
1646
0
    if (repo_resolve_gitlink_ref(istate->repo, path, "HEAD", oid))
1647
0
      return error(_("'%s' does not have a commit checked out"), path);
1648
0
    if (&hash_algos[oid->algo] != istate->repo->hash_algo)
1649
0
      return error(_("cannot add a submodule of a different hash algorithm"));
1650
0
    break;
1651
0
  default:
1652
0
    return error(_("%s: unsupported file type"), path);
1653
0
  }
1654
0
  return rc;
1655
0
}
1656
1657
int read_pack_header(int fd, struct pack_header *header)
1658
0
{
1659
0
  if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
1660
    /* "eof before pack header was fully read" */
1661
0
    return PH_ERROR_EOF;
1662
1663
0
  if (header->hdr_signature != htonl(PACK_SIGNATURE))
1664
    /* "protocol error (pack signature mismatch detected)" */
1665
0
    return PH_ERROR_PACK_SIGNATURE;
1666
0
  if (!pack_version_ok(header->hdr_version))
1667
    /* "protocol error (pack version unsupported)" */
1668
0
    return PH_ERROR_PROTOCOL;
1669
0
  return 0;
1670
0
}
1671
1672
static int for_each_file_in_obj_subdir(unsigned int subdir_nr,
1673
               struct strbuf *path,
1674
               const struct git_hash_algo *algop,
1675
               each_loose_object_fn obj_cb,
1676
               each_loose_cruft_fn cruft_cb,
1677
               each_loose_subdir_fn subdir_cb,
1678
               void *data)
1679
0
{
1680
0
  size_t origlen, baselen;
1681
0
  DIR *dir;
1682
0
  struct dirent *de;
1683
0
  int r = 0;
1684
0
  struct object_id oid;
1685
1686
0
  if (subdir_nr > 0xff)
1687
0
    BUG("invalid loose object subdirectory: %x", subdir_nr);
1688
1689
0
  origlen = path->len;
1690
0
  strbuf_complete(path, '/');
1691
0
  strbuf_addf(path, "%02x", subdir_nr);
1692
1693
0
  dir = opendir(path->buf);
1694
0
  if (!dir) {
1695
0
    if (errno != ENOENT)
1696
0
      r = error_errno(_("unable to open %s"), path->buf);
1697
0
    strbuf_setlen(path, origlen);
1698
0
    return r;
1699
0
  }
1700
1701
0
  oid.hash[0] = subdir_nr;
1702
0
  strbuf_addch(path, '/');
1703
0
  baselen = path->len;
1704
1705
0
  while ((de = readdir_skip_dot_and_dotdot(dir))) {
1706
0
    size_t namelen;
1707
1708
0
    namelen = strlen(de->d_name);
1709
0
    strbuf_setlen(path, baselen);
1710
0
    strbuf_add(path, de->d_name, namelen);
1711
0
    if (namelen == algop->hexsz - 2 &&
1712
0
        !hex_to_bytes(oid.hash + 1, de->d_name,
1713
0
          algop->rawsz - 1)) {
1714
0
      oid_set_algo(&oid, algop);
1715
0
      memset(oid.hash + algop->rawsz, 0,
1716
0
             GIT_MAX_RAWSZ - algop->rawsz);
1717
0
      if (obj_cb) {
1718
0
        r = obj_cb(&oid, path->buf, data);
1719
0
        if (r)
1720
0
          break;
1721
0
      }
1722
0
      continue;
1723
0
    }
1724
1725
0
    if (cruft_cb) {
1726
0
      r = cruft_cb(de->d_name, path->buf, data);
1727
0
      if (r)
1728
0
        break;
1729
0
    }
1730
0
  }
1731
0
  closedir(dir);
1732
1733
0
  strbuf_setlen(path, baselen - 1);
1734
0
  if (!r && subdir_cb)
1735
0
    r = subdir_cb(subdir_nr, path->buf, data);
1736
1737
0
  strbuf_setlen(path, origlen);
1738
1739
0
  return r;
1740
0
}
1741
1742
int for_each_loose_file_in_source(struct odb_source *source,
1743
          each_loose_object_fn obj_cb,
1744
          each_loose_cruft_fn cruft_cb,
1745
          each_loose_subdir_fn subdir_cb,
1746
          void *data)
1747
0
{
1748
0
  struct strbuf buf = STRBUF_INIT;
1749
0
  int r;
1750
1751
0
  strbuf_addstr(&buf, source->path);
1752
0
  for (int i = 0; i < 256; i++) {
1753
0
    r = for_each_file_in_obj_subdir(i, &buf, source->odb->repo->hash_algo,
1754
0
            obj_cb, cruft_cb, subdir_cb, data);
1755
0
    if (r)
1756
0
      break;
1757
0
  }
1758
1759
0
  strbuf_release(&buf);
1760
0
  return r;
1761
0
}
1762
1763
int for_each_loose_object(struct object_database *odb,
1764
        each_loose_object_fn cb, void *data,
1765
        enum for_each_object_flags flags)
1766
0
{
1767
0
  struct odb_source *source;
1768
1769
0
  odb_prepare_alternates(odb);
1770
0
  for (source = odb->sources; source; source = source->next) {
1771
0
    int r = for_each_loose_file_in_source(source, cb, NULL,
1772
0
                  NULL, data);
1773
0
    if (r)
1774
0
      return r;
1775
1776
0
    if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
1777
0
      break;
1778
0
  }
1779
1780
0
  return 0;
1781
0
}
1782
1783
static int append_loose_object(const struct object_id *oid,
1784
             const char *path UNUSED,
1785
             void *data)
1786
0
{
1787
0
  oidtree_insert(data, oid);
1788
0
  return 0;
1789
0
}
1790
1791
struct oidtree *odb_source_loose_cache(struct odb_source *source,
1792
               const struct object_id *oid)
1793
0
{
1794
0
  int subdir_nr = oid->hash[0];
1795
0
  struct strbuf buf = STRBUF_INIT;
1796
0
  size_t word_bits = bitsizeof(source->loose->subdir_seen[0]);
1797
0
  size_t word_index = subdir_nr / word_bits;
1798
0
  size_t mask = (size_t)1u << (subdir_nr % word_bits);
1799
0
  uint32_t *bitmap;
1800
1801
0
  if (subdir_nr < 0 ||
1802
0
      (size_t) subdir_nr >= bitsizeof(source->loose->subdir_seen))
1803
0
    BUG("subdir_nr out of range");
1804
1805
0
  bitmap = &source->loose->subdir_seen[word_index];
1806
0
  if (*bitmap & mask)
1807
0
    return source->loose->cache;
1808
0
  if (!source->loose->cache) {
1809
0
    ALLOC_ARRAY(source->loose->cache, 1);
1810
0
    oidtree_init(source->loose->cache);
1811
0
  }
1812
0
  strbuf_addstr(&buf, source->path);
1813
0
  for_each_file_in_obj_subdir(subdir_nr, &buf,
1814
0
            source->odb->repo->hash_algo,
1815
0
            append_loose_object,
1816
0
            NULL, NULL,
1817
0
            source->loose->cache);
1818
0
  *bitmap |= mask;
1819
0
  strbuf_release(&buf);
1820
0
  return source->loose->cache;
1821
0
}
1822
1823
static void odb_source_loose_clear_cache(struct odb_source_loose *loose)
1824
0
{
1825
0
  oidtree_clear(loose->cache);
1826
0
  FREE_AND_NULL(loose->cache);
1827
0
  memset(&loose->subdir_seen, 0,
1828
0
         sizeof(loose->subdir_seen));
1829
0
}
1830
1831
void odb_source_loose_reprepare(struct odb_source *source)
1832
0
{
1833
0
  odb_source_loose_clear_cache(source->loose);
1834
0
}
1835
1836
static int check_stream_oid(git_zstream *stream,
1837
          const char *hdr,
1838
          unsigned long size,
1839
          const char *path,
1840
          const struct object_id *expected_oid,
1841
          const struct git_hash_algo *algop)
1842
0
{
1843
0
  struct git_hash_ctx c;
1844
0
  struct object_id real_oid;
1845
0
  unsigned char buf[4096];
1846
0
  unsigned long total_read;
1847
0
  int status = Z_OK;
1848
1849
0
  algop->init_fn(&c);
1850
0
  git_hash_update(&c, hdr, stream->total_out);
1851
1852
  /*
1853
   * We already read some bytes into hdr, but the ones up to the NUL
1854
   * do not count against the object's content size.
1855
   */
1856
0
  total_read = stream->total_out - strlen(hdr) - 1;
1857
1858
  /*
1859
   * This size comparison must be "<=" to read the final zlib packets;
1860
   * see the comment in unpack_loose_rest for details.
1861
   */
1862
0
  while (total_read <= size &&
1863
0
         (status == Z_OK ||
1864
0
    (status == Z_BUF_ERROR && !stream->avail_out))) {
1865
0
    stream->next_out = buf;
1866
0
    stream->avail_out = sizeof(buf);
1867
0
    if (size - total_read < stream->avail_out)
1868
0
      stream->avail_out = size - total_read;
1869
0
    status = git_inflate(stream, Z_FINISH);
1870
0
    git_hash_update(&c, buf, stream->next_out - buf);
1871
0
    total_read += stream->next_out - buf;
1872
0
  }
1873
1874
0
  if (status != Z_STREAM_END) {
1875
0
    error(_("corrupt loose object '%s'"), oid_to_hex(expected_oid));
1876
0
    return -1;
1877
0
  }
1878
0
  if (stream->avail_in) {
1879
0
    error(_("garbage at end of loose object '%s'"),
1880
0
          oid_to_hex(expected_oid));
1881
0
    return -1;
1882
0
  }
1883
1884
0
  git_hash_final_oid(&real_oid, &c);
1885
0
  if (!oideq(expected_oid, &real_oid)) {
1886
0
    error(_("hash mismatch for %s (expected %s)"), path,
1887
0
          oid_to_hex(expected_oid));
1888
0
    return -1;
1889
0
  }
1890
1891
0
  return 0;
1892
0
}
1893
1894
int read_loose_object(struct repository *repo,
1895
          const char *path,
1896
          const struct object_id *expected_oid,
1897
          struct object_id *real_oid,
1898
          void **contents,
1899
          struct object_info *oi)
1900
0
{
1901
0
  int ret = -1;
1902
0
  int fd;
1903
0
  void *map = NULL;
1904
0
  unsigned long mapsize;
1905
0
  git_zstream stream;
1906
0
  char hdr[MAX_HEADER_LEN];
1907
0
  unsigned long *size = oi->sizep;
1908
1909
0
  fd = git_open(path);
1910
0
  if (fd >= 0)
1911
0
    map = map_fd(fd, path, &mapsize);
1912
0
  if (!map) {
1913
0
    error_errno(_("unable to mmap %s"), path);
1914
0
    goto out;
1915
0
  }
1916
1917
0
  if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) != ULHR_OK) {
1918
0
    error(_("unable to unpack header of %s"), path);
1919
0
    goto out_inflate;
1920
0
  }
1921
1922
0
  if (parse_loose_header(hdr, oi) < 0) {
1923
0
    error(_("unable to parse header of %s"), path);
1924
0
    goto out_inflate;
1925
0
  }
1926
1927
0
  if (*oi->typep < 0) {
1928
0
    error(_("unable to parse type from header '%s' of %s"),
1929
0
          hdr, path);
1930
0
    goto out_inflate;
1931
0
  }
1932
1933
0
  if (*oi->typep == OBJ_BLOB &&
1934
0
      *size > repo_settings_get_big_file_threshold(repo)) {
1935
0
    if (check_stream_oid(&stream, hdr, *size, path, expected_oid,
1936
0
             repo->hash_algo) < 0)
1937
0
      goto out_inflate;
1938
0
  } else {
1939
0
    *contents = unpack_loose_rest(&stream, hdr, *size, expected_oid);
1940
0
    if (!*contents) {
1941
0
      error(_("unable to unpack contents of %s"), path);
1942
0
      goto out_inflate;
1943
0
    }
1944
0
    hash_object_file(repo->hash_algo,
1945
0
         *contents, *size,
1946
0
         *oi->typep, real_oid);
1947
0
    if (!oideq(expected_oid, real_oid))
1948
0
      goto out_inflate;
1949
0
  }
1950
1951
0
  ret = 0; /* everything checks out */
1952
1953
0
out_inflate:
1954
0
  git_inflate_end(&stream);
1955
0
out:
1956
0
  if (map)
1957
0
    munmap(map, mapsize);
1958
0
  return ret;
1959
0
}
1960
1961
struct odb_transaction *object_file_transaction_begin(struct odb_source *source)
1962
0
{
1963
0
  struct object_database *odb = source->odb;
1964
1965
0
  if (odb->transaction)
1966
0
    return NULL;
1967
1968
0
  CALLOC_ARRAY(odb->transaction, 1);
1969
0
  odb->transaction->odb = odb;
1970
1971
0
  return odb->transaction;
1972
0
}
1973
1974
void object_file_transaction_commit(struct odb_transaction *transaction)
1975
0
{
1976
0
  if (!transaction)
1977
0
    return;
1978
1979
  /*
1980
   * Ensure the transaction ending matches the pending transaction.
1981
   */
1982
0
  ASSERT(transaction == transaction->odb->transaction);
1983
1984
0
  flush_loose_object_transaction(transaction);
1985
0
  flush_packfile_transaction(transaction);
1986
0
  transaction->odb->transaction = NULL;
1987
0
  free(transaction);
1988
0
}
1989
1990
struct odb_source_loose *odb_source_loose_new(struct odb_source *source)
1991
0
{
1992
0
  struct odb_source_loose *loose;
1993
0
  CALLOC_ARRAY(loose, 1);
1994
0
  loose->source = source;
1995
0
  return loose;
1996
0
}
1997
1998
void odb_source_loose_free(struct odb_source_loose *loose)
1999
0
{
2000
0
  if (!loose)
2001
0
    return;
2002
0
  odb_source_loose_clear_cache(loose);
2003
0
  loose_object_map_clear(&loose->map);
2004
0
  free(loose);
2005
0
}
2006
2007
struct odb_loose_read_stream {
2008
  struct odb_read_stream base;
2009
  git_zstream z;
2010
  enum {
2011
    ODB_LOOSE_READ_STREAM_INUSE,
2012
    ODB_LOOSE_READ_STREAM_DONE,
2013
    ODB_LOOSE_READ_STREAM_ERROR,
2014
  } z_state;
2015
  void *mapped;
2016
  unsigned long mapsize;
2017
  char hdr[32];
2018
  int hdr_avail;
2019
  int hdr_used;
2020
};
2021
2022
static ssize_t read_istream_loose(struct odb_read_stream *_st, char *buf, size_t sz)
2023
0
{
2024
0
  struct odb_loose_read_stream *st = (struct odb_loose_read_stream *)_st;
2025
0
  size_t total_read = 0;
2026
2027
0
  switch (st->z_state) {
2028
0
  case ODB_LOOSE_READ_STREAM_DONE:
2029
0
    return 0;
2030
0
  case ODB_LOOSE_READ_STREAM_ERROR:
2031
0
    return -1;
2032
0
  default:
2033
0
    break;
2034
0
  }
2035
2036
0
  if (st->hdr_used < st->hdr_avail) {
2037
0
    size_t to_copy = st->hdr_avail - st->hdr_used;
2038
0
    if (sz < to_copy)
2039
0
      to_copy = sz;
2040
0
    memcpy(buf, st->hdr + st->hdr_used, to_copy);
2041
0
    st->hdr_used += to_copy;
2042
0
    total_read += to_copy;
2043
0
  }
2044
2045
0
  while (total_read < sz) {
2046
0
    int status;
2047
2048
0
    st->z.next_out = (unsigned char *)buf + total_read;
2049
0
    st->z.avail_out = sz - total_read;
2050
0
    status = git_inflate(&st->z, Z_FINISH);
2051
2052
0
    total_read = st->z.next_out - (unsigned char *)buf;
2053
2054
0
    if (status == Z_STREAM_END) {
2055
0
      git_inflate_end(&st->z);
2056
0
      st->z_state = ODB_LOOSE_READ_STREAM_DONE;
2057
0
      break;
2058
0
    }
2059
0
    if (status != Z_OK && (status != Z_BUF_ERROR || total_read < sz)) {
2060
0
      git_inflate_end(&st->z);
2061
0
      st->z_state = ODB_LOOSE_READ_STREAM_ERROR;
2062
0
      return -1;
2063
0
    }
2064
0
  }
2065
0
  return total_read;
2066
0
}
2067
2068
static int close_istream_loose(struct odb_read_stream *_st)
2069
0
{
2070
0
  struct odb_loose_read_stream *st = (struct odb_loose_read_stream *)_st;
2071
0
  if (st->z_state == ODB_LOOSE_READ_STREAM_INUSE)
2072
0
    git_inflate_end(&st->z);
2073
0
  munmap(st->mapped, st->mapsize);
2074
0
  return 0;
2075
0
}
2076
2077
int odb_source_loose_read_object_stream(struct odb_read_stream **out,
2078
          struct odb_source *source,
2079
          const struct object_id *oid)
2080
0
{
2081
0
  struct object_info oi = OBJECT_INFO_INIT;
2082
0
  struct odb_loose_read_stream *st;
2083
0
  unsigned long mapsize;
2084
0
  void *mapped;
2085
2086
0
  mapped = odb_source_loose_map_object(source, oid, &mapsize);
2087
0
  if (!mapped)
2088
0
    return -1;
2089
2090
  /*
2091
   * Note: we must allocate this structure early even though we may still
2092
   * fail. This is because we need to initialize the zlib stream, and it
2093
   * is not possible to copy the stream around after the fact because it
2094
   * has self-referencing pointers.
2095
   */
2096
0
  CALLOC_ARRAY(st, 1);
2097
2098
0
  switch (unpack_loose_header(&st->z, mapped, mapsize, st->hdr,
2099
0
            sizeof(st->hdr))) {
2100
0
  case ULHR_OK:
2101
0
    break;
2102
0
  case ULHR_BAD:
2103
0
  case ULHR_TOO_LONG:
2104
0
    goto error;
2105
0
  }
2106
2107
0
  oi.sizep = &st->base.size;
2108
0
  oi.typep = &st->base.type;
2109
2110
0
  if (parse_loose_header(st->hdr, &oi) < 0 || st->base.type < 0)
2111
0
    goto error;
2112
2113
0
  st->mapped = mapped;
2114
0
  st->mapsize = mapsize;
2115
0
  st->hdr_used = strlen(st->hdr) + 1;
2116
0
  st->hdr_avail = st->z.total_out;
2117
0
  st->z_state = ODB_LOOSE_READ_STREAM_INUSE;
2118
0
  st->base.close = close_istream_loose;
2119
0
  st->base.read = read_istream_loose;
2120
2121
0
  *out = &st->base;
2122
2123
0
  return 0;
2124
0
error:
2125
0
  git_inflate_end(&st->z);
2126
0
  munmap(st->mapped, st->mapsize);
2127
0
  free(st);
2128
0
  return -1;
2129
0
}