Coverage Report

Created: 2025-12-31 07:01

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/git/xdiff/xpatience.c
Line
Count
Source
1
/*
2
 *  LibXDiff by Davide Libenzi ( File Differential Library )
3
 *  Copyright (C) 2003-2016 Davide Libenzi, Johannes E. Schindelin
4
 *
5
 *  This library is free software; you can redistribute it and/or
6
 *  modify it under the terms of the GNU Lesser General Public
7
 *  License as published by the Free Software Foundation; either
8
 *  version 2.1 of the License, or (at your option) any later version.
9
 *
10
 *  This library is distributed in the hope that it will be useful,
11
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
 *  Lesser General Public License for more details.
14
 *
15
 *  You should have received a copy of the GNU Lesser General Public
16
 *  License along with this library; if not, see
17
 *  <http://www.gnu.org/licenses/>.
18
 *
19
 *  Davide Libenzi <davidel@xmailserver.org>
20
 *
21
 */
22
23
#include "xinclude.h"
24
25
/*
26
 * The basic idea of patience diff is to find lines that are unique in
27
 * both files.  These are intuitively the ones that we want to see as
28
 * common lines.
29
 *
30
 * The maximal ordered sequence of such line pairs (where ordered means
31
 * that the order in the sequence agrees with the order of the lines in
32
 * both files) naturally defines an initial set of common lines.
33
 *
34
 * Now, the algorithm tries to extend the set of common lines by growing
35
 * the line ranges where the files have identical lines.
36
 *
37
 * Between those common lines, the patience diff algorithm is applied
38
 * recursively, until no unique line pairs can be found; these line ranges
39
 * are handled by the well-known Myers algorithm.
40
 */
41
42
0
#define NON_UNIQUE ULONG_MAX
43
44
/*
45
 * This is a hash mapping from line hash to line numbers in the first and
46
 * second file.
47
 */
48
struct hashmap {
49
  int nr, alloc;
50
  struct entry {
51
    size_t minimal_perfect_hash;
52
    /*
53
     * 0 = unused entry, 1 = first line, 2 = second, etc.
54
     * line2 is NON_UNIQUE if the line is not unique
55
     * in either the first or the second file.
56
     */
57
    unsigned long line1, line2;
58
    /*
59
     * "next" & "previous" are used for the longest common
60
     * sequence;
61
     * initially, "next" reflects only the order in file1.
62
     */
63
    struct entry *next, *previous;
64
65
    /*
66
     * If 1, this entry can serve as an anchor. See
67
     * Documentation/diff-options.adoc for more information.
68
     */
69
    unsigned anchor : 1;
70
  } *entries, *first, *last;
71
  /* were common records found? */
72
  unsigned long has_matches;
73
  xdfenv_t *env;
74
  xpparam_t const *xpp;
75
};
76
77
static int is_anchor(xpparam_t const *xpp, const char *line)
78
0
{
79
0
  size_t i;
80
0
  for (i = 0; i < xpp->anchors_nr; i++) {
81
0
    if (!strncmp(line, xpp->anchors[i], strlen(xpp->anchors[i])))
82
0
      return 1;
83
0
  }
84
0
  return 0;
85
0
}
86
87
/* The argument "pass" is 1 for the first file, 2 for the second. */
88
static void insert_record(xpparam_t const *xpp, int line, struct hashmap *map,
89
        int pass)
90
0
{
91
0
  xrecord_t *records = pass == 1 ?
92
0
    map->env->xdf1.recs : map->env->xdf2.recs;
93
0
  xrecord_t *record = &records[line - 1];
94
  /*
95
   * After xdl_prepare_env() (or more precisely, due to
96
   * xdl_classify_record()), the "ha" member of the records (AKA lines)
97
   * is _not_ the hash anymore, but a linearized version of it.  In
98
   * other words, the "ha" member is guaranteed to start with 0 and
99
   * the second record's ha can only be 0 or 1, etc.
100
   *
101
   * So we multiply ha by 2 in the hope that the hashing was
102
   * "unique enough".
103
   */
104
0
  int index = (int)((record->minimal_perfect_hash << 1) % map->alloc);
105
106
0
  while (map->entries[index].line1) {
107
0
    if (map->entries[index].minimal_perfect_hash != record->minimal_perfect_hash) {
108
0
      if (++index >= map->alloc)
109
0
        index = 0;
110
0
      continue;
111
0
    }
112
0
    if (pass == 2)
113
0
      map->has_matches = 1;
114
0
    if (pass == 1 || map->entries[index].line2)
115
0
      map->entries[index].line2 = NON_UNIQUE;
116
0
    else
117
0
      map->entries[index].line2 = line;
118
0
    return;
119
0
  }
120
0
  if (pass == 2)
121
0
    return;
122
0
  map->entries[index].line1 = line;
123
0
  map->entries[index].minimal_perfect_hash = record->minimal_perfect_hash;
124
0
  map->entries[index].anchor = is_anchor(xpp, (const char *)map->env->xdf1.recs[line - 1].ptr);
125
0
  if (!map->first)
126
0
    map->first = map->entries + index;
127
0
  if (map->last) {
128
0
    map->last->next = map->entries + index;
129
0
    map->entries[index].previous = map->last;
130
0
  }
131
0
  map->last = map->entries + index;
132
0
  map->nr++;
133
0
}
134
135
/*
136
 * This function has to be called for each recursion into the inter-hunk
137
 * parts, as previously non-unique lines can become unique when being
138
 * restricted to a smaller part of the files.
139
 *
140
 * It is assumed that env has been prepared using xdl_prepare().
141
 */
142
static int fill_hashmap(xpparam_t const *xpp, xdfenv_t *env,
143
    struct hashmap *result,
144
    int line1, int count1, int line2, int count2)
145
0
{
146
0
  result->xpp = xpp;
147
0
  result->env = env;
148
149
  /* We know exactly how large we want the hash map */
150
0
  result->alloc = count1 * 2;
151
0
  if (!XDL_CALLOC_ARRAY(result->entries, result->alloc))
152
0
    return -1;
153
154
  /* First, fill with entries from the first file */
155
0
  while (count1--)
156
0
    insert_record(xpp, line1++, result, 1);
157
158
  /* Then search for matches in the second file */
159
0
  while (count2--)
160
0
    insert_record(xpp, line2++, result, 2);
161
162
0
  return 0;
163
0
}
164
165
/*
166
 * Find the longest sequence with a smaller last element (meaning a smaller
167
 * line2, as we construct the sequence with entries ordered by line1).
168
 */
169
static int binary_search(struct entry **sequence, int longest,
170
    struct entry *entry)
171
0
{
172
0
  int left = -1, right = longest;
173
174
0
  while (left + 1 < right) {
175
0
    int middle = left + (right - left) / 2;
176
    /* by construction, no two entries can be equal */
177
0
    if (sequence[middle]->line2 > entry->line2)
178
0
      right = middle;
179
0
    else
180
0
      left = middle;
181
0
  }
182
  /* return the index in "sequence", _not_ the sequence length */
183
0
  return left;
184
0
}
185
186
/*
187
 * The idea is to start with the list of common unique lines sorted by
188
 * the order in file1.  For each of these pairs, the longest (partial)
189
 * sequence whose last element's line2 is smaller is determined.
190
 *
191
 * For efficiency, the sequences are kept in a list containing exactly one
192
 * item per sequence length: the sequence with the smallest last
193
 * element (in terms of line2).
194
 */
195
static int find_longest_common_sequence(struct hashmap *map, struct entry **res)
196
0
{
197
0
  struct entry **sequence;
198
0
  int longest = 0, i;
199
0
  struct entry *entry;
200
201
  /*
202
   * If not -1, this entry in sequence must never be overridden.
203
   * Therefore, overriding entries before this has no effect, so
204
   * do not do that either.
205
   */
206
0
  int anchor_i = -1;
207
208
0
  if (!XDL_ALLOC_ARRAY(sequence, map->nr))
209
0
    return -1;
210
211
0
  for (entry = map->first; entry; entry = entry->next) {
212
0
    if (!entry->line2 || entry->line2 == NON_UNIQUE)
213
0
      continue;
214
0
    if (longest == 0 || entry->line2 > sequence[longest - 1]->line2)
215
0
      i = longest - 1;
216
0
    else
217
0
      i = binary_search(sequence, longest, entry);
218
0
    entry->previous = i < 0 ? NULL : sequence[i];
219
0
    ++i;
220
0
    if (i <= anchor_i)
221
0
      continue;
222
0
    sequence[i] = entry;
223
0
    if (entry->anchor) {
224
0
      anchor_i = i;
225
0
      longest = anchor_i + 1;
226
0
    } else if (i == longest) {
227
0
      longest++;
228
0
    }
229
0
  }
230
231
  /* No common unique lines were found */
232
0
  if (!longest) {
233
0
    *res = NULL;
234
0
    xdl_free(sequence);
235
0
    return 0;
236
0
  }
237
238
  /* Iterate starting at the last element, adjusting the "next" members */
239
0
  entry = sequence[longest - 1];
240
0
  entry->next = NULL;
241
0
  while (entry->previous) {
242
0
    entry->previous->next = entry;
243
0
    entry = entry->previous;
244
0
  }
245
0
  *res = entry;
246
0
  xdl_free(sequence);
247
0
  return 0;
248
0
}
249
250
static int match(struct hashmap *map, int line1, int line2)
251
0
{
252
0
  xrecord_t *record1 = &map->env->xdf1.recs[line1 - 1];
253
0
  xrecord_t *record2 = &map->env->xdf2.recs[line2 - 1];
254
0
  return record1->minimal_perfect_hash == record2->minimal_perfect_hash;
255
0
}
256
257
static int patience_diff(xpparam_t const *xpp, xdfenv_t *env,
258
    int line1, int count1, int line2, int count2);
259
260
static int walk_common_sequence(struct hashmap *map, struct entry *first,
261
    int line1, int count1, int line2, int count2)
262
0
{
263
0
  int end1 = line1 + count1, end2 = line2 + count2;
264
0
  int next1, next2;
265
266
0
  for (;;) {
267
    /* Try to grow the line ranges of common lines */
268
0
    if (first) {
269
0
      next1 = first->line1;
270
0
      next2 = first->line2;
271
0
      while (next1 > line1 && next2 > line2 &&
272
0
          match(map, next1 - 1, next2 - 1)) {
273
0
        next1--;
274
0
        next2--;
275
0
      }
276
0
    } else {
277
0
      next1 = end1;
278
0
      next2 = end2;
279
0
    }
280
0
    while (line1 < next1 && line2 < next2 &&
281
0
        match(map, line1, line2)) {
282
0
      line1++;
283
0
      line2++;
284
0
    }
285
286
    /* Recurse */
287
0
    if (next1 > line1 || next2 > line2) {
288
0
      if (patience_diff(map->xpp, map->env,
289
0
          line1, next1 - line1,
290
0
          line2, next2 - line2))
291
0
        return -1;
292
0
    }
293
294
0
    if (!first)
295
0
      return 0;
296
297
0
    while (first->next &&
298
0
        first->next->line1 == first->line1 + 1 &&
299
0
        first->next->line2 == first->line2 + 1)
300
0
      first = first->next;
301
302
0
    line1 = first->line1 + 1;
303
0
    line2 = first->line2 + 1;
304
305
0
    first = first->next;
306
0
  }
307
0
}
308
309
static int fall_back_to_classic_diff(struct hashmap *map,
310
    int line1, int count1, int line2, int count2)
311
0
{
312
0
  xpparam_t xpp;
313
314
0
  memset(&xpp, 0, sizeof(xpp));
315
0
  xpp.flags = map->xpp->flags & ~XDF_DIFF_ALGORITHM_MASK;
316
317
0
  return xdl_fall_back_diff(map->env, &xpp,
318
0
          line1, count1, line2, count2);
319
0
}
320
321
/*
322
 * Recursively find the longest common sequence of unique lines,
323
 * and if none was found, ask xdl_do_diff() to do the job.
324
 *
325
 * This function assumes that env was prepared with xdl_prepare_env().
326
 */
327
static int patience_diff(xpparam_t const *xpp, xdfenv_t *env,
328
    int line1, int count1, int line2, int count2)
329
0
{
330
0
  struct hashmap map;
331
0
  struct entry *first;
332
0
  int result = 0;
333
334
  /* trivial case: one side is empty */
335
0
  if (!count1) {
336
0
    while(count2--)
337
0
      env->xdf2.changed[line2++ - 1] = true;
338
0
    return 0;
339
0
  } else if (!count2) {
340
0
    while(count1--)
341
0
      env->xdf1.changed[line1++ - 1] = true;
342
0
    return 0;
343
0
  }
344
345
0
  memset(&map, 0, sizeof(map));
346
0
  if (fill_hashmap(xpp, env, &map,
347
0
      line1, count1, line2, count2))
348
0
    return -1;
349
350
  /* are there any matching lines at all? */
351
0
  if (!map.has_matches) {
352
0
    while(count1--)
353
0
      env->xdf1.changed[line1++ - 1] = true;
354
0
    while(count2--)
355
0
      env->xdf2.changed[line2++ - 1] = true;
356
0
    xdl_free(map.entries);
357
0
    return 0;
358
0
  }
359
360
0
  result = find_longest_common_sequence(&map, &first);
361
0
  if (result)
362
0
    goto out;
363
0
  if (first)
364
0
    result = walk_common_sequence(&map, first,
365
0
      line1, count1, line2, count2);
366
0
  else
367
0
    result = fall_back_to_classic_diff(&map,
368
0
      line1, count1, line2, count2);
369
0
 out:
370
0
  xdl_free(map.entries);
371
0
  return result;
372
0
}
373
374
int xdl_do_patience_diff(xpparam_t const *xpp, xdfenv_t *env)
375
0
{
376
0
  return patience_diff(xpp, env, 1, (int)env->xdf1.nrec, 1, (int)env->xdf2.nrec);
377
0
}