Coverage Report

Created: 2024-09-16 06:12

/src/git/xdiff/xdiffi.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 *  LibXDiff by Davide Libenzi ( File Differential Library )
3
 *  Copyright (C) 2003  Davide Libenzi
4
 *
5
 *  This library is free software; you can redistribute it and/or
6
 *  modify it under the terms of the GNU Lesser General Public
7
 *  License as published by the Free Software Foundation; either
8
 *  version 2.1 of the License, or (at your option) any later version.
9
 *
10
 *  This library is distributed in the hope that it will be useful,
11
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
 *  Lesser General Public License for more details.
14
 *
15
 *  You should have received a copy of the GNU Lesser General Public
16
 *  License along with this library; if not, see
17
 *  <http://www.gnu.org/licenses/>.
18
 *
19
 *  Davide Libenzi <davidel@xmailserver.org>
20
 *
21
 */
22
23
#include "xinclude.h"
24
25
12.6k
#define XDL_MAX_COST_MIN 256
26
6.34k
#define XDL_HEUR_MIN_COST 256
27
0
#define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
28
6.34k
#define XDL_SNAKE_CNT 20
29
0
#define XDL_K_HEUR 4
30
31
typedef struct s_xdpsplit {
32
  long i1, i2;
33
  int min_lo, min_hi;
34
} xdpsplit_t;
35
36
/*
37
 * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
38
 * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
39
 * the forward diagonal starting from (off1, off2) and the backward diagonal
40
 * starting from (lim1, lim2). If the K values on the same diagonal crosses
41
 * returns the furthest point of reach. We might encounter expensive edge cases
42
 * using this algorithm, so a little bit of heuristic is needed to cut the
43
 * search and to return a suboptimal point.
44
 */
45
static long xdl_split(unsigned long const *ha1, long off1, long lim1,
46
          unsigned long const *ha2, long off2, long lim2,
47
          long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
48
0
          xdalgoenv_t *xenv) {
49
0
  long dmin = off1 - lim2, dmax = lim1 - off2;
50
0
  long fmid = off1 - off2, bmid = lim1 - lim2;
51
0
  long odd = (fmid - bmid) & 1;
52
0
  long fmin = fmid, fmax = fmid;
53
0
  long bmin = bmid, bmax = bmid;
54
0
  long ec, d, i1, i2, prev1, best, dd, v, k;
55
56
  /*
57
   * Set initial diagonal values for both forward and backward path.
58
   */
59
0
  kvdf[fmid] = off1;
60
0
  kvdb[bmid] = lim1;
61
62
0
  for (ec = 1;; ec++) {
63
0
    int got_snake = 0;
64
65
    /*
66
     * We need to extend the diagonal "domain" by one. If the next
67
     * values exits the box boundaries we need to change it in the
68
     * opposite direction because (max - min) must be a power of
69
     * two.
70
     *
71
     * Also we initialize the external K value to -1 so that we can
72
     * avoid extra conditions in the check inside the core loop.
73
     */
74
0
    if (fmin > dmin)
75
0
      kvdf[--fmin - 1] = -1;
76
0
    else
77
0
      ++fmin;
78
0
    if (fmax < dmax)
79
0
      kvdf[++fmax + 1] = -1;
80
0
    else
81
0
      --fmax;
82
83
0
    for (d = fmax; d >= fmin; d -= 2) {
84
0
      if (kvdf[d - 1] >= kvdf[d + 1])
85
0
        i1 = kvdf[d - 1] + 1;
86
0
      else
87
0
        i1 = kvdf[d + 1];
88
0
      prev1 = i1;
89
0
      i2 = i1 - d;
90
0
      for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
91
0
      if (i1 - prev1 > xenv->snake_cnt)
92
0
        got_snake = 1;
93
0
      kvdf[d] = i1;
94
0
      if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) {
95
0
        spl->i1 = i1;
96
0
        spl->i2 = i2;
97
0
        spl->min_lo = spl->min_hi = 1;
98
0
        return ec;
99
0
      }
100
0
    }
101
102
    /*
103
     * We need to extend the diagonal "domain" by one. If the next
104
     * values exits the box boundaries we need to change it in the
105
     * opposite direction because (max - min) must be a power of
106
     * two.
107
     *
108
     * Also we initialize the external K value to -1 so that we can
109
     * avoid extra conditions in the check inside the core loop.
110
     */
111
0
    if (bmin > dmin)
112
0
      kvdb[--bmin - 1] = XDL_LINE_MAX;
113
0
    else
114
0
      ++bmin;
115
0
    if (bmax < dmax)
116
0
      kvdb[++bmax + 1] = XDL_LINE_MAX;
117
0
    else
118
0
      --bmax;
119
120
0
    for (d = bmax; d >= bmin; d -= 2) {
121
0
      if (kvdb[d - 1] < kvdb[d + 1])
122
0
        i1 = kvdb[d - 1];
123
0
      else
124
0
        i1 = kvdb[d + 1] - 1;
125
0
      prev1 = i1;
126
0
      i2 = i1 - d;
127
0
      for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
128
0
      if (prev1 - i1 > xenv->snake_cnt)
129
0
        got_snake = 1;
130
0
      kvdb[d] = i1;
131
0
      if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) {
132
0
        spl->i1 = i1;
133
0
        spl->i2 = i2;
134
0
        spl->min_lo = spl->min_hi = 1;
135
0
        return ec;
136
0
      }
137
0
    }
138
139
0
    if (need_min)
140
0
      continue;
141
142
    /*
143
     * If the edit cost is above the heuristic trigger and if
144
     * we got a good snake, we sample current diagonals to see
145
     * if some of them have reached an "interesting" path. Our
146
     * measure is a function of the distance from the diagonal
147
     * corner (i1 + i2) penalized with the distance from the
148
     * mid diagonal itself. If this value is above the current
149
     * edit cost times a magic factor (XDL_K_HEUR) we consider
150
     * it interesting.
151
     */
152
0
    if (got_snake && ec > xenv->heur_min) {
153
0
      for (best = 0, d = fmax; d >= fmin; d -= 2) {
154
0
        dd = d > fmid ? d - fmid: fmid - d;
155
0
        i1 = kvdf[d];
156
0
        i2 = i1 - d;
157
0
        v = (i1 - off1) + (i2 - off2) - dd;
158
159
0
        if (v > XDL_K_HEUR * ec && v > best &&
160
0
            off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
161
0
            off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
162
0
          for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
163
0
            if (k == xenv->snake_cnt) {
164
0
              best = v;
165
0
              spl->i1 = i1;
166
0
              spl->i2 = i2;
167
0
              break;
168
0
            }
169
0
        }
170
0
      }
171
0
      if (best > 0) {
172
0
        spl->min_lo = 1;
173
0
        spl->min_hi = 0;
174
0
        return ec;
175
0
      }
176
177
0
      for (best = 0, d = bmax; d >= bmin; d -= 2) {
178
0
        dd = d > bmid ? d - bmid: bmid - d;
179
0
        i1 = kvdb[d];
180
0
        i2 = i1 - d;
181
0
        v = (lim1 - i1) + (lim2 - i2) - dd;
182
183
0
        if (v > XDL_K_HEUR * ec && v > best &&
184
0
            off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
185
0
            off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
186
0
          for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
187
0
            if (k == xenv->snake_cnt - 1) {
188
0
              best = v;
189
0
              spl->i1 = i1;
190
0
              spl->i2 = i2;
191
0
              break;
192
0
            }
193
0
        }
194
0
      }
195
0
      if (best > 0) {
196
0
        spl->min_lo = 0;
197
0
        spl->min_hi = 1;
198
0
        return ec;
199
0
      }
200
0
    }
201
202
    /*
203
     * Enough is enough. We spent too much time here and now we
204
     * collect the furthest reaching path using the (i1 + i2)
205
     * measure.
206
     */
207
0
    if (ec >= xenv->mxcost) {
208
0
      long fbest, fbest1, bbest, bbest1;
209
210
0
      fbest = fbest1 = -1;
211
0
      for (d = fmax; d >= fmin; d -= 2) {
212
0
        i1 = XDL_MIN(kvdf[d], lim1);
213
0
        i2 = i1 - d;
214
0
        if (lim2 < i2)
215
0
          i1 = lim2 + d, i2 = lim2;
216
0
        if (fbest < i1 + i2) {
217
0
          fbest = i1 + i2;
218
0
          fbest1 = i1;
219
0
        }
220
0
      }
221
222
0
      bbest = bbest1 = XDL_LINE_MAX;
223
0
      for (d = bmax; d >= bmin; d -= 2) {
224
0
        i1 = XDL_MAX(off1, kvdb[d]);
225
0
        i2 = i1 - d;
226
0
        if (i2 < off2)
227
0
          i1 = off2 + d, i2 = off2;
228
0
        if (i1 + i2 < bbest) {
229
0
          bbest = i1 + i2;
230
0
          bbest1 = i1;
231
0
        }
232
0
      }
233
234
0
      if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) {
235
0
        spl->i1 = fbest1;
236
0
        spl->i2 = fbest - fbest1;
237
0
        spl->min_lo = 1;
238
0
        spl->min_hi = 0;
239
0
      } else {
240
0
        spl->i1 = bbest1;
241
0
        spl->i2 = bbest - bbest1;
242
0
        spl->min_lo = 0;
243
0
        spl->min_hi = 1;
244
0
      }
245
0
      return ec;
246
0
    }
247
0
  }
248
0
}
249
250
251
/*
252
 * Rule: "Divide et Impera" (divide & conquer). Recursively split the box in
253
 * sub-boxes by calling the box splitting function. Note that the real job
254
 * (marking changed lines) is done in the two boundary reaching checks.
255
 */
256
int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
257
     diffdata_t *dd2, long off2, long lim2,
258
6.34k
     long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv) {
259
6.34k
  unsigned long const *ha1 = dd1->ha, *ha2 = dd2->ha;
260
261
  /*
262
   * Shrink the box by walking through each diagonal snake (SW and NE).
263
   */
264
6.34k
  for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
265
6.34k
  for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
266
267
  /*
268
   * If one dimension is empty, then all records on the other one must
269
   * be obviously changed.
270
   */
271
6.34k
  if (off1 == lim1) {
272
6.34k
    char *rchg2 = dd2->rchg;
273
6.34k
    long *rindex2 = dd2->rindex;
274
275
6.34k
    for (; off2 < lim2; off2++)
276
0
      rchg2[rindex2[off2]] = 1;
277
6.34k
  } else if (off2 == lim2) {
278
0
    char *rchg1 = dd1->rchg;
279
0
    long *rindex1 = dd1->rindex;
280
281
0
    for (; off1 < lim1; off1++)
282
0
      rchg1[rindex1[off1]] = 1;
283
0
  } else {
284
0
    xdpsplit_t spl;
285
0
    spl.i1 = spl.i2 = 0;
286
287
    /*
288
     * Divide ...
289
     */
290
0
    if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
291
0
            need_min, &spl, xenv) < 0) {
292
293
0
      return -1;
294
0
    }
295
296
    /*
297
     * ... et Impera.
298
     */
299
0
    if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
300
0
         kvdf, kvdb, spl.min_lo, xenv) < 0 ||
301
0
        xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
302
0
         kvdf, kvdb, spl.min_hi, xenv) < 0) {
303
304
0
      return -1;
305
0
    }
306
0
  }
307
308
6.34k
  return 0;
309
6.34k
}
310
311
312
int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
313
6.34k
    xdfenv_t *xe) {
314
6.34k
  long ndiags;
315
6.34k
  long *kvd, *kvdf, *kvdb;
316
6.34k
  xdalgoenv_t xenv;
317
6.34k
  diffdata_t dd1, dd2;
318
6.34k
  int res;
319
320
6.34k
  if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0)
321
0
    return -1;
322
323
6.34k
  if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) {
324
0
    res = xdl_do_patience_diff(xpp, xe);
325
0
    goto out;
326
0
  }
327
328
6.34k
  if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) {
329
0
    res = xdl_do_histogram_diff(xpp, xe);
330
0
    goto out;
331
0
  }
332
333
  /*
334
   * Allocate and setup K vectors to be used by the differential
335
   * algorithm.
336
   *
337
   * One is to store the forward path and one to store the backward path.
338
   */
339
6.34k
  ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
340
6.34k
  if (!XDL_ALLOC_ARRAY(kvd, 2 * ndiags + 2)) {
341
342
0
    xdl_free_env(xe);
343
0
    return -1;
344
0
  }
345
6.34k
  kvdf = kvd;
346
6.34k
  kvdb = kvdf + ndiags;
347
6.34k
  kvdf += xe->xdf2.nreff + 1;
348
6.34k
  kvdb += xe->xdf2.nreff + 1;
349
350
6.34k
  xenv.mxcost = xdl_bogosqrt(ndiags);
351
6.34k
  if (xenv.mxcost < XDL_MAX_COST_MIN)
352
6.34k
    xenv.mxcost = XDL_MAX_COST_MIN;
353
6.34k
  xenv.snake_cnt = XDL_SNAKE_CNT;
354
6.34k
  xenv.heur_min = XDL_HEUR_MIN_COST;
355
356
6.34k
  dd1.nrec = xe->xdf1.nreff;
357
6.34k
  dd1.ha = xe->xdf1.ha;
358
6.34k
  dd1.rchg = xe->xdf1.rchg;
359
6.34k
  dd1.rindex = xe->xdf1.rindex;
360
6.34k
  dd2.nrec = xe->xdf2.nreff;
361
6.34k
  dd2.ha = xe->xdf2.ha;
362
6.34k
  dd2.rchg = xe->xdf2.rchg;
363
6.34k
  dd2.rindex = xe->xdf2.rindex;
364
365
6.34k
  res = xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
366
6.34k
         kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0,
367
6.34k
         &xenv);
368
6.34k
  xdl_free(kvd);
369
6.34k
 out:
370
6.34k
  if (res < 0)
371
0
    xdl_free_env(xe);
372
373
6.34k
  return res;
374
6.34k
}
375
376
377
6.34k
static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2) {
378
6.34k
  xdchange_t *xch;
379
380
6.34k
  if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t))))
381
0
    return NULL;
382
383
6.34k
  xch->next = xscr;
384
6.34k
  xch->i1 = i1;
385
6.34k
  xch->i2 = i2;
386
6.34k
  xch->chg1 = chg1;
387
6.34k
  xch->chg2 = chg2;
388
6.34k
  xch->ignore = 0;
389
390
6.34k
  return xch;
391
6.34k
}
392
393
394
static int recs_match(xrecord_t *rec1, xrecord_t *rec2)
395
0
{
396
0
  return (rec1->ha == rec2->ha);
397
0
}
398
399
/*
400
 * If a line is indented more than this, get_indent() just returns this value.
401
 * This avoids having to do absurd amounts of work for data that are not
402
 * human-readable text, and also ensures that the output of get_indent fits
403
 * within an int.
404
 */
405
0
#define MAX_INDENT 200
406
407
/*
408
 * Return the amount of indentation of the specified line, treating TAB as 8
409
 * columns. Return -1 if line is empty or contains only whitespace. Clamp the
410
 * output value at MAX_INDENT.
411
 */
412
static int get_indent(xrecord_t *rec)
413
0
{
414
0
  long i;
415
0
  int ret = 0;
416
417
0
  for (i = 0; i < rec->size; i++) {
418
0
    char c = rec->ptr[i];
419
420
0
    if (!XDL_ISSPACE(c))
421
0
      return ret;
422
0
    else if (c == ' ')
423
0
      ret += 1;
424
0
    else if (c == '\t')
425
0
      ret += 8 - ret % 8;
426
    /* ignore other whitespace characters */
427
428
0
    if (ret >= MAX_INDENT)
429
0
      return MAX_INDENT;
430
0
  }
431
432
  /* The line contains only whitespace. */
433
0
  return -1;
434
0
}
435
436
/*
437
 * If more than this number of consecutive blank rows are found, just return
438
 * this value. This avoids requiring O(N^2) work for pathological cases, and
439
 * also ensures that the output of score_split fits in an int.
440
 */
441
0
#define MAX_BLANKS 20
442
443
/* Characteristics measured about a hypothetical split position. */
444
struct split_measurement {
445
  /*
446
   * Is the split at the end of the file (aside from any blank lines)?
447
   */
448
  int end_of_file;
449
450
  /*
451
   * How much is the line immediately following the split indented (or -1
452
   * if the line is blank):
453
   */
454
  int indent;
455
456
  /*
457
   * How many consecutive lines above the split are blank?
458
   */
459
  int pre_blank;
460
461
  /*
462
   * How much is the nearest non-blank line above the split indented (or
463
   * -1 if there is no such line)?
464
   */
465
  int pre_indent;
466
467
  /*
468
   * How many lines after the line following the split are blank?
469
   */
470
  int post_blank;
471
472
  /*
473
   * How much is the nearest non-blank line after the line following the
474
   * split indented (or -1 if there is no such line)?
475
   */
476
  int post_indent;
477
};
478
479
struct split_score {
480
  /* The effective indent of this split (smaller is preferred). */
481
  int effective_indent;
482
483
  /* Penalty for this split (smaller is preferred). */
484
  int penalty;
485
};
486
487
/*
488
 * Fill m with information about a hypothetical split of xdf above line split.
489
 */
490
static void measure_split(const xdfile_t *xdf, long split,
491
        struct split_measurement *m)
492
0
{
493
0
  long i;
494
495
0
  if (split >= xdf->nrec) {
496
0
    m->end_of_file = 1;
497
0
    m->indent = -1;
498
0
  } else {
499
0
    m->end_of_file = 0;
500
0
    m->indent = get_indent(xdf->recs[split]);
501
0
  }
502
503
0
  m->pre_blank = 0;
504
0
  m->pre_indent = -1;
505
0
  for (i = split - 1; i >= 0; i--) {
506
0
    m->pre_indent = get_indent(xdf->recs[i]);
507
0
    if (m->pre_indent != -1)
508
0
      break;
509
0
    m->pre_blank += 1;
510
0
    if (m->pre_blank == MAX_BLANKS) {
511
0
      m->pre_indent = 0;
512
0
      break;
513
0
    }
514
0
  }
515
516
0
  m->post_blank = 0;
517
0
  m->post_indent = -1;
518
0
  for (i = split + 1; i < xdf->nrec; i++) {
519
0
    m->post_indent = get_indent(xdf->recs[i]);
520
0
    if (m->post_indent != -1)
521
0
      break;
522
0
    m->post_blank += 1;
523
0
    if (m->post_blank == MAX_BLANKS) {
524
0
      m->post_indent = 0;
525
0
      break;
526
0
    }
527
0
  }
528
0
}
529
530
/*
531
 * The empirically-determined weight factors used by score_split() below.
532
 * Larger values means that the position is a less favorable place to split.
533
 *
534
 * Note that scores are only ever compared against each other, so multiplying
535
 * all of these weight/penalty values by the same factor wouldn't change the
536
 * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*.
537
 * In practice, these numbers are chosen to be large enough that they can be
538
 * adjusted relative to each other with sufficient precision despite using
539
 * integer math.
540
 */
541
542
/* Penalty if there are no non-blank lines before the split */
543
0
#define START_OF_FILE_PENALTY 1
544
545
/* Penalty if there are no non-blank lines after the split */
546
0
#define END_OF_FILE_PENALTY 21
547
548
/* Multiplier for the number of blank lines around the split */
549
0
#define TOTAL_BLANK_WEIGHT (-30)
550
551
/* Multiplier for the number of blank lines after the split */
552
0
#define POST_BLANK_WEIGHT 6
553
554
/*
555
 * Penalties applied if the line is indented more than its predecessor
556
 */
557
0
#define RELATIVE_INDENT_PENALTY (-4)
558
0
#define RELATIVE_INDENT_WITH_BLANK_PENALTY 10
559
560
/*
561
 * Penalties applied if the line is indented less than both its predecessor and
562
 * its successor
563
 */
564
0
#define RELATIVE_OUTDENT_PENALTY 24
565
0
#define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17
566
567
/*
568
 * Penalties applied if the line is indented less than its predecessor but not
569
 * less than its successor
570
 */
571
0
#define RELATIVE_DEDENT_PENALTY 23
572
0
#define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17
573
574
/*
575
 * We only consider whether the sum of the effective indents for splits are
576
 * less than (-1), equal to (0), or greater than (+1) each other. The resulting
577
 * value is multiplied by the following weight and combined with the penalty to
578
 * determine the better of two scores.
579
 */
580
0
#define INDENT_WEIGHT 60
581
582
/*
583
 * How far do we slide a hunk at most?
584
 */
585
0
#define INDENT_HEURISTIC_MAX_SLIDING 100
586
587
/*
588
 * Compute a badness score for the hypothetical split whose measurements are
589
 * stored in m. The weight factors were determined empirically using the tools
590
 * and corpus described in
591
 *
592
 *     https://github.com/mhagger/diff-slider-tools
593
 *
594
 * Also see that project if you want to improve the weights based on, for
595
 * example, a larger or more diverse corpus.
596
 */
597
static void score_add_split(const struct split_measurement *m, struct split_score *s)
598
0
{
599
  /*
600
   * A place to accumulate penalty factors (positive makes this index more
601
   * favored):
602
   */
603
0
  int post_blank, total_blank, indent, any_blanks;
604
605
0
  if (m->pre_indent == -1 && m->pre_blank == 0)
606
0
    s->penalty += START_OF_FILE_PENALTY;
607
608
0
  if (m->end_of_file)
609
0
    s->penalty += END_OF_FILE_PENALTY;
610
611
  /*
612
   * Set post_blank to the number of blank lines following the split,
613
   * including the line immediately after the split:
614
   */
615
0
  post_blank = (m->indent == -1) ? 1 + m->post_blank : 0;
616
0
  total_blank = m->pre_blank + post_blank;
617
618
  /* Penalties based on nearby blank lines: */
619
0
  s->penalty += TOTAL_BLANK_WEIGHT * total_blank;
620
0
  s->penalty += POST_BLANK_WEIGHT * post_blank;
621
622
0
  if (m->indent != -1)
623
0
    indent = m->indent;
624
0
  else
625
0
    indent = m->post_indent;
626
627
0
  any_blanks = (total_blank != 0);
628
629
  /* Note that the effective indent is -1 at the end of the file: */
630
0
  s->effective_indent += indent;
631
632
0
  if (indent == -1) {
633
    /* No additional adjustments needed. */
634
0
  } else if (m->pre_indent == -1) {
635
    /* No additional adjustments needed. */
636
0
  } else if (indent > m->pre_indent) {
637
    /*
638
     * The line is indented more than its predecessor.
639
     */
640
0
    s->penalty += any_blanks ?
641
0
      RELATIVE_INDENT_WITH_BLANK_PENALTY :
642
0
      RELATIVE_INDENT_PENALTY;
643
0
  } else if (indent == m->pre_indent) {
644
    /*
645
     * The line has the same indentation level as its predecessor.
646
     * No additional adjustments needed.
647
     */
648
0
  } else {
649
    /*
650
     * The line is indented less than its predecessor. It could be
651
     * the block terminator of the previous block, but it could
652
     * also be the start of a new block (e.g., an "else" block, or
653
     * maybe the previous block didn't have a block terminator).
654
     * Try to distinguish those cases based on what comes next:
655
     */
656
0
    if (m->post_indent != -1 && m->post_indent > indent) {
657
      /*
658
       * The following line is indented more. So it is likely
659
       * that this line is the start of a block.
660
       */
661
0
      s->penalty += any_blanks ?
662
0
        RELATIVE_OUTDENT_WITH_BLANK_PENALTY :
663
0
        RELATIVE_OUTDENT_PENALTY;
664
0
    } else {
665
      /*
666
       * That was probably the end of a block.
667
       */
668
0
      s->penalty += any_blanks ?
669
0
        RELATIVE_DEDENT_WITH_BLANK_PENALTY :
670
0
        RELATIVE_DEDENT_PENALTY;
671
0
    }
672
0
  }
673
0
}
674
675
static int score_cmp(struct split_score *s1, struct split_score *s2)
676
0
{
677
  /* -1 if s1.effective_indent < s2->effective_indent, etc. */
678
0
  int cmp_indents = ((s1->effective_indent > s2->effective_indent) -
679
0
         (s1->effective_indent < s2->effective_indent));
680
681
0
  return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty);
682
0
}
683
684
/*
685
 * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group
686
 * of lines that was inserted or deleted from the corresponding version of the
687
 * file). We consider there to be such a group at the beginning of the file, at
688
 * the end of the file, and between any two unchanged lines, though most such
689
 * groups will usually be empty.
690
 *
691
 * If the first line in a group is equal to the line following the group, then
692
 * the group can be slid down. Similarly, if the last line in a group is equal
693
 * to the line preceding the group, then the group can be slid up. See
694
 * group_slide_down() and group_slide_up().
695
 *
696
 * Note that loops that are testing for changed lines in xdf->rchg do not need
697
 * index bounding since the array is prepared with a zero at position -1 and N.
698
 */
699
struct xdlgroup {
700
  /*
701
   * The index of the first changed line in the group, or the index of
702
   * the unchanged line above which the (empty) group is located.
703
   */
704
  long start;
705
706
  /*
707
   * The index of the first unchanged line after the group. For an empty
708
   * group, end is equal to start.
709
   */
710
  long end;
711
};
712
713
/*
714
 * Initialize g to point at the first group in xdf.
715
 */
716
static void group_init(xdfile_t *xdf, struct xdlgroup *g)
717
25.3k
{
718
25.3k
  g->start = g->end = 0;
719
53.1k
  while (xdf->rchg[g->end])
720
27.7k
    g->end++;
721
25.3k
}
722
723
/*
724
 * Move g to describe the next (possibly empty) group in xdf and return 0. If g
725
 * is already at the end of the file, do nothing and return -1.
726
 */
727
static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
728
25.3k
{
729
25.3k
  if (g->end == xdf->nrec)
730
25.3k
    return -1;
731
732
0
  g->start = g->end + 1;
733
0
  for (g->end = g->start; xdf->rchg[g->end]; g->end++)
734
0
    ;
735
736
0
  return 0;
737
25.3k
}
738
739
/*
740
 * Move g to describe the previous (possibly empty) group in xdf and return 0.
741
 * If g is already at the beginning of the file, do nothing and return -1.
742
 */
743
static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
744
0
{
745
0
  if (g->start == 0)
746
0
    return -1;
747
748
0
  g->end = g->start - 1;
749
0
  for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
750
0
    ;
751
752
0
  return 0;
753
0
}
754
755
/*
756
 * If g can be slid toward the end of the file, do so, and if it bumps into a
757
 * following group, expand this group to include it. Return 0 on success or -1
758
 * if g cannot be slid down.
759
 */
760
static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g)
761
6.34k
{
762
6.34k
  if (g->end < xdf->nrec &&
763
6.34k
      recs_match(xdf->recs[g->start], xdf->recs[g->end])) {
764
0
    xdf->rchg[g->start++] = 0;
765
0
    xdf->rchg[g->end++] = 1;
766
767
0
    while (xdf->rchg[g->end])
768
0
      g->end++;
769
770
0
    return 0;
771
6.34k
  } else {
772
6.34k
    return -1;
773
6.34k
  }
774
6.34k
}
775
776
/*
777
 * If g can be slid toward the beginning of the file, do so, and if it bumps
778
 * into a previous group, expand this group to include it. Return 0 on success
779
 * or -1 if g cannot be slid up.
780
 */
781
static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g)
782
6.34k
{
783
6.34k
  if (g->start > 0 &&
784
6.34k
      recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1])) {
785
0
    xdf->rchg[--g->start] = 1;
786
0
    xdf->rchg[--g->end] = 0;
787
788
0
    while (xdf->rchg[g->start - 1])
789
0
      g->start--;
790
791
0
    return 0;
792
6.34k
  } else {
793
6.34k
    return -1;
794
6.34k
  }
795
6.34k
}
796
797
/*
798
 * Move back and forward change groups for a consistent and pretty diff output.
799
 * This also helps in finding joinable change groups and reducing the diff
800
 * size.
801
 */
802
12.6k
int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
803
12.6k
  struct xdlgroup g, go;
804
12.6k
  long earliest_end, end_matching_other;
805
12.6k
  long groupsize;
806
807
12.6k
  group_init(xdf, &g);
808
12.6k
  group_init(xdfo, &go);
809
810
12.6k
  while (1) {
811
    /*
812
     * If the group is empty in the to-be-compacted file, skip it:
813
     */
814
12.6k
    if (g.end == g.start)
815
6.34k
      goto next;
816
817
    /*
818
     * Now shift the change up and then down as far as possible in
819
     * each direction. If it bumps into any other changes, merge
820
     * them.
821
     */
822
6.34k
    do {
823
6.34k
      groupsize = g.end - g.start;
824
825
      /*
826
       * Keep track of the last "end" index that causes this
827
       * group to align with a group of changed lines in the
828
       * other file. -1 indicates that we haven't found such
829
       * a match yet:
830
       */
831
6.34k
      end_matching_other = -1;
832
833
      /* Shift the group backward as much as possible: */
834
6.34k
      while (!group_slide_up(xdf, &g))
835
0
        if (group_previous(xdfo, &go))
836
0
          BUG("group sync broken sliding up");
837
838
      /*
839
       * This is this highest that this group can be shifted.
840
       * Record its end index:
841
       */
842
6.34k
      earliest_end = g.end;
843
844
6.34k
      if (go.end > go.start)
845
0
        end_matching_other = g.end;
846
847
      /* Now shift the group forward as far as possible: */
848
6.34k
      while (1) {
849
6.34k
        if (group_slide_down(xdf, &g))
850
6.34k
          break;
851
0
        if (group_next(xdfo, &go))
852
0
          BUG("group sync broken sliding down");
853
854
0
        if (go.end > go.start)
855
0
          end_matching_other = g.end;
856
0
      }
857
6.34k
    } while (groupsize != g.end - g.start);
858
859
    /*
860
     * If the group can be shifted, then we can possibly use this
861
     * freedom to produce a more intuitive diff.
862
     *
863
     * The group is currently shifted as far down as possible, so
864
     * the heuristics below only have to handle upwards shifts.
865
     */
866
867
6.34k
    if (g.end == earliest_end) {
868
      /* no shifting was possible */
869
6.34k
    } else if (end_matching_other != -1) {
870
      /*
871
       * Move the possibly merged group of changes back to
872
       * line up with the last group of changes from the
873
       * other file that it can align with.
874
       */
875
0
      while (go.end == go.start) {
876
0
        if (group_slide_up(xdf, &g))
877
0
          BUG("match disappeared");
878
0
        if (group_previous(xdfo, &go))
879
0
          BUG("group sync broken sliding to match");
880
0
      }
881
0
    } else if (flags & XDF_INDENT_HEURISTIC) {
882
      /*
883
       * Indent heuristic: a group of pure add/delete lines
884
       * implies two splits, one between the end of the
885
       * "before" context and the start of the group, and
886
       * another between the end of the group and the
887
       * beginning of the "after" context. Some splits are
888
       * aesthetically better and some are worse. We compute
889
       * a badness "score" for each split, and add the scores
890
       * for the two splits to define a "score" for each
891
       * position that the group can be shifted to. Then we
892
       * pick the shift with the lowest score.
893
       */
894
0
      long shift, best_shift = -1;
895
0
      struct split_score best_score;
896
897
0
      shift = earliest_end;
898
0
      if (g.end - groupsize - 1 > shift)
899
0
        shift = g.end - groupsize - 1;
900
0
      if (g.end - INDENT_HEURISTIC_MAX_SLIDING > shift)
901
0
        shift = g.end - INDENT_HEURISTIC_MAX_SLIDING;
902
0
      for (; shift <= g.end; shift++) {
903
0
        struct split_measurement m;
904
0
        struct split_score score = {0, 0};
905
906
0
        measure_split(xdf, shift, &m);
907
0
        score_add_split(&m, &score);
908
0
        measure_split(xdf, shift - groupsize, &m);
909
0
        score_add_split(&m, &score);
910
0
        if (best_shift == -1 ||
911
0
            score_cmp(&score, &best_score) <= 0) {
912
0
          best_score.effective_indent = score.effective_indent;
913
0
          best_score.penalty = score.penalty;
914
0
          best_shift = shift;
915
0
        }
916
0
      }
917
918
0
      while (g.end > best_shift) {
919
0
        if (group_slide_up(xdf, &g))
920
0
          BUG("best shift unreached");
921
0
        if (group_previous(xdfo, &go))
922
0
          BUG("group sync broken sliding to blank line");
923
0
      }
924
0
    }
925
926
12.6k
  next:
927
    /* Move past the just-processed group: */
928
12.6k
    if (group_next(xdf, &g))
929
12.6k
      break;
930
0
    if (group_next(xdfo, &go))
931
0
      BUG("group sync broken moving to next group");
932
0
  }
933
934
12.6k
  if (!group_next(xdfo, &go))
935
0
    BUG("group sync broken at end of file");
936
937
12.6k
  return 0;
938
12.6k
}
939
940
941
6.34k
int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
942
6.34k
  xdchange_t *cscr = NULL, *xch;
943
6.34k
  char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
944
6.34k
  long i1, i2, l1, l2;
945
946
  /*
947
   * Trivial. Collects "groups" of changes and creates an edit script.
948
   */
949
12.6k
  for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
950
6.34k
    if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
951
6.34k
      for (l1 = i1; rchg1[i1 - 1]; i1--);
952
20.2k
      for (l2 = i2; rchg2[i2 - 1]; i2--);
953
954
6.34k
      if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
955
0
        xdl_free_script(cscr);
956
0
        return -1;
957
0
      }
958
6.34k
      cscr = xch;
959
6.34k
    }
960
961
6.34k
  *xscr = cscr;
962
963
6.34k
  return 0;
964
6.34k
}
965
966
967
6.34k
void xdl_free_script(xdchange_t *xscr) {
968
6.34k
  xdchange_t *xch;
969
970
12.6k
  while ((xch = xscr) != NULL) {
971
6.34k
    xscr = xscr->next;
972
6.34k
    xdl_free(xch);
973
6.34k
  }
974
6.34k
}
975
976
static int xdl_call_hunk_func(xdfenv_t *xe UNUSED, xdchange_t *xscr, xdemitcb_t *ecb,
977
            xdemitconf_t const *xecfg)
978
0
{
979
0
  xdchange_t *xch, *xche;
980
981
0
  for (xch = xscr; xch; xch = xche->next) {
982
0
    xche = xdl_get_hunk(&xch, xecfg);
983
0
    if (!xch)
984
0
      break;
985
0
    if (xecfg->hunk_func(xch->i1, xche->i1 + xche->chg1 - xch->i1,
986
0
             xch->i2, xche->i2 + xche->chg2 - xch->i2,
987
0
             ecb->priv) < 0)
988
0
      return -1;
989
0
  }
990
0
  return 0;
991
0
}
992
993
static void xdl_mark_ignorable_lines(xdchange_t *xscr, xdfenv_t *xe, long flags)
994
0
{
995
0
  xdchange_t *xch;
996
997
0
  for (xch = xscr; xch; xch = xch->next) {
998
0
    int ignore = 1;
999
0
    xrecord_t **rec;
1000
0
    long i;
1001
1002
0
    rec = &xe->xdf1.recs[xch->i1];
1003
0
    for (i = 0; i < xch->chg1 && ignore; i++)
1004
0
      ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags);
1005
1006
0
    rec = &xe->xdf2.recs[xch->i2];
1007
0
    for (i = 0; i < xch->chg2 && ignore; i++)
1008
0
      ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags);
1009
1010
0
    xch->ignore = ignore;
1011
0
  }
1012
0
}
1013
1014
0
static int record_matches_regex(xrecord_t *rec, xpparam_t const *xpp) {
1015
0
  regmatch_t regmatch;
1016
0
  int i;
1017
1018
0
  for (i = 0; i < xpp->ignore_regex_nr; i++)
1019
0
    if (!regexec_buf(xpp->ignore_regex[i], rec->ptr, rec->size, 1,
1020
0
         &regmatch, 0))
1021
0
      return 1;
1022
1023
0
  return 0;
1024
0
}
1025
1026
static void xdl_mark_ignorable_regex(xdchange_t *xscr, const xdfenv_t *xe,
1027
             xpparam_t const *xpp)
1028
0
{
1029
0
  xdchange_t *xch;
1030
1031
0
  for (xch = xscr; xch; xch = xch->next) {
1032
0
    xrecord_t **rec;
1033
0
    int ignore = 1;
1034
0
    long i;
1035
1036
    /*
1037
     * Do not override --ignore-blank-lines.
1038
     */
1039
0
    if (xch->ignore)
1040
0
      continue;
1041
1042
0
    rec = &xe->xdf1.recs[xch->i1];
1043
0
    for (i = 0; i < xch->chg1 && ignore; i++)
1044
0
      ignore = record_matches_regex(rec[i], xpp);
1045
1046
0
    rec = &xe->xdf2.recs[xch->i2];
1047
0
    for (i = 0; i < xch->chg2 && ignore; i++)
1048
0
      ignore = record_matches_regex(rec[i], xpp);
1049
1050
0
    xch->ignore = ignore;
1051
0
  }
1052
0
}
1053
1054
int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
1055
6.34k
       xdemitconf_t const *xecfg, xdemitcb_t *ecb) {
1056
6.34k
  xdchange_t *xscr;
1057
6.34k
  xdfenv_t xe;
1058
6.34k
  emit_func_t ef = xecfg->hunk_func ? xdl_call_hunk_func : xdl_emit_diff;
1059
1060
6.34k
  if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) {
1061
1062
0
    return -1;
1063
0
  }
1064
6.34k
  if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
1065
6.34k
      xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 ||
1066
6.34k
      xdl_build_script(&xe, &xscr) < 0) {
1067
1068
0
    xdl_free_env(&xe);
1069
0
    return -1;
1070
0
  }
1071
6.34k
  if (xscr) {
1072
6.34k
    if (xpp->flags & XDF_IGNORE_BLANK_LINES)
1073
0
      xdl_mark_ignorable_lines(xscr, &xe, xpp->flags);
1074
1075
6.34k
    if (xpp->ignore_regex)
1076
0
      xdl_mark_ignorable_regex(xscr, &xe, xpp);
1077
1078
6.34k
    if (ef(&xe, xscr, ecb, xecfg) < 0) {
1079
1080
0
      xdl_free_script(xscr);
1081
0
      xdl_free_env(&xe);
1082
0
      return -1;
1083
0
    }
1084
6.34k
    xdl_free_script(xscr);
1085
6.34k
  }
1086
6.34k
  xdl_free_env(&xe);
1087
1088
6.34k
  return 0;
1089
6.34k
}