Blob


1 /* Implementation of the Patience Diff algorithm invented by Bram Cohen:
2 * Divide a diff problem into smaller chunks by an LCS of common-unique lines. */
3 /*
4 * Copyright (c) 2020 Neels Hofmeyr <neels@hofmeyr.de>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
19 #include <assert.h>
20 #include <inttypes.h>
21 #include <errno.h>
22 #include <stdbool.h>
23 #include <stdio.h>
24 #include <stdlib.h>
26 #include <arraylist.h>
27 #include <diff_main.h>
29 #include "diff_internal.h"
30 #include "diff_debug.h"
32 /* Per-atom state for the Patience Diff algorithm */
33 struct atom_patience {
34 bool unique_in_both;
35 struct diff_atom *pos_in_other;
36 struct diff_atom *prev_stack;
37 struct diff_range identical_lines;
38 };
40 /* A diff_atom has a backpointer to the root diff_data. That points to the
41 * current diff_data, a possibly smaller section of the root. That current
42 * diff_data->algo_data is a pointer to an array of struct atom_patience. The
43 * atom's index in current diff_data gives the index in the atom_patience array.
44 */
45 #define PATIENCE(ATOM) \
46 (((struct atom_patience*)((ATOM)->root->current->algo_data))\
47 [diff_atom_idx((ATOM)->root->current, ATOM)])
49 int diff_atoms_qsort_compar(const void *_a, const void *_b)
50 {
51 const struct diff_atom *a = *(struct diff_atom**)_a;
52 const struct diff_atom *b = *(struct diff_atom**)_b;
53 int cmp;
54 int rc = 0;
56 /* If there's been an error (e.g. I/O error) in a previous compar, we
57 * have no way to abort the qsort but just report the rc and stop
58 * comparing. Make sure to catch errors on either side. If atoms are
59 * from more than one diff_data, make sure the error, if any, spreads
60 * to all of them, so we can cut short all future comparisons. */
61 if (a->root->err)
62 rc = a->root->err;
63 if (b->root->err)
64 rc = b->root->err;
65 if (rc) {
66 a->root->err = rc;
67 b->root->err = rc;
68 /* just return 'equal' to not swap more positions */
69 return 0;
70 }
72 /* Sort by the simplistic hash */
73 if (a->hash < b->hash)
74 return -1;
75 if (a->hash > b->hash)
76 return 1;
78 /* If hashes are the same, the lines may still differ. Do a full cmp. */
79 rc = diff_atom_cmp(&cmp, a, b);
81 if (rc) {
82 /* Mark the I/O error so that the caller can find out about it.
83 * For the case atoms are from more than one diff_data, mark in
84 * both. */
85 a->root->err = rc;
86 if (a->root != b->root)
87 b->root->err = rc;
88 return 0;
89 }
91 return cmp;
92 }
94 /* Sort an array of struct diff_atom* in-place. */
95 static int diff_atoms_qsort(struct diff_atom *atoms[],
96 size_t atoms_count)
97 {
98 qsort(atoms, atoms_count, sizeof(struct diff_atom*),
99 diff_atoms_qsort_compar);
100 return atoms[0]->root->err;
103 static int
104 diff_atoms_mark_unique_in_both_by_qsort(struct diff_data *left,
105 struct diff_data *right,
106 unsigned int *unique_in_both_count_p)
108 struct diff_atom *a;
109 struct diff_atom *b;
110 struct diff_atom **all_atoms =
111 malloc((left->atoms.len + right->atoms.len)
112 * sizeof(struct diff_atom*));
113 unsigned int len = 0;
114 unsigned int i;
115 unsigned int unique_in_both_count = 0;
116 int rc;
117 left->err = 0;
118 right->err = 0;
119 left->root->err = 0;
120 right->root->err = 0;
121 diff_data_foreach_atom(a, left) {
122 all_atoms[len++] = a;
124 diff_data_foreach_atom(b, right) {
125 all_atoms[len++] = b;
128 rc = diff_atoms_qsort(all_atoms, len);
129 if (rc)
130 goto free_and_exit;
132 /* Now we have a sorted array of atom pointers. All similar lines are
133 * adjacent. Walk through the array and mark those that are unique on
134 * each side, but exist once in both sources. */
135 for (i = 0; i < len; i++) {
136 bool same;
137 unsigned int j;
138 unsigned int count_first_side = 1;
139 unsigned int count_other_side = 0;
140 a = all_atoms[i];
142 for (j = i+1; j < len; j++) {
143 b = all_atoms[j];
144 rc = diff_atom_same(&same, a, b);
145 if (rc)
146 goto free_and_exit;
147 if (!same)
148 break;
149 /* A following atom is the same. See on which side the
150 * repetition counts. */
151 if (a->root == b->root)
152 count_first_side ++;
153 else
154 count_other_side ++;
157 /* Counted a section of similar atoms, put the results back to
158 * the atoms. */
159 if ((count_first_side == 1)
160 && (count_other_side == 1)) {
161 b = all_atoms[i+1];
162 PATIENCE(a).unique_in_both = true;
163 PATIENCE(a).pos_in_other = b;
164 PATIENCE(b).unique_in_both = true;
165 PATIENCE(b).pos_in_other = a;
166 unique_in_both_count++;
169 *unique_in_both_count_p = unique_in_both_count;
170 rc = 0;
171 free_and_exit:
172 free(all_atoms);
173 return rc;
176 static int
177 diff_atoms_swallow_identical_neighbors(struct diff_data *left,
178 struct diff_data *right,
179 unsigned int *unique_in_both_count)
181 debug("trivially combine identical lines"
182 " around unique_in_both lines\n");
184 unsigned int l_idx;
185 unsigned int next_l_idx;
186 unsigned int l_min = 0;
187 unsigned int r_min = 0;
188 for (l_idx = 0; l_idx < left->atoms.len; l_idx = next_l_idx) {
189 next_l_idx = l_idx + 1;
190 struct diff_atom *l = &left->atoms.head[l_idx];
192 if (!PATIENCE(l).unique_in_both)
193 continue;
195 debug("check identical lines around ");
196 debug_dump_atom(left, right, l);
198 unsigned int r_idx = diff_atom_idx(right, PATIENCE(l).pos_in_other);
200 struct diff_range identical_l;
201 struct diff_range identical_r;
203 /* Swallow upwards.
204 * Each common-unique line swallows identical lines upwards and
205 * downwards.
206 * All common-unique lines that were part of the identical lines
207 * following below were already swallowed in the previous
208 * iteration, so we will never hit another common-unique line
209 * above. */
210 for (identical_l.start = l_idx, identical_r.start = r_idx;
211 identical_l.start > l_min && identical_r.start > r_min;
212 identical_l.start--, identical_r.start--) {
213 bool same;
214 int r = diff_atom_same(&same,
215 &left->atoms.head[identical_l.start - 1],
216 &right->atoms.head[identical_r.start - 1]);
217 if (r)
218 return r;
219 if (!same)
220 break;
223 /* Swallow downwards */
224 for (identical_l.end = l_idx + 1, identical_r.end = r_idx + 1;
225 identical_l.end < left->atoms.len
226 && identical_r.end < right->atoms.len;
227 identical_l.end++, identical_r.end++,
228 next_l_idx++) {
229 struct diff_atom *l_end;
230 struct diff_atom *r_end;
231 bool same;
232 int r = diff_atom_same(&same,
233 &left->atoms.head[identical_l.end],
234 &right->atoms.head[identical_r.end]);
235 if (r)
236 return r;
237 if (!same)
238 break;
239 l_end = &left->atoms.head[identical_l.end];
240 r_end = &right->atoms.head[identical_r.end];
241 if (!PATIENCE(l_end).unique_in_both)
242 continue;
243 /* Part of a chunk of identical lines, remove from
244 * listing of unique_in_both lines */
245 PATIENCE(l_end).unique_in_both = false;
246 PATIENCE(r_end).unique_in_both = false;
247 (*unique_in_both_count)--;
250 PATIENCE(l).identical_lines = identical_l;
251 PATIENCE(PATIENCE(l).pos_in_other).identical_lines =
252 identical_r;
254 l_min = identical_l.end;
255 r_min = identical_r.end;
257 if (!diff_range_empty(&PATIENCE(l).identical_lines)) {
258 debug("common-unique line at l=%u r=%u swallowed"
259 " identical lines l=%u-%u r=%u-%u\n",
260 l_idx, r_idx,
261 identical_l.start, identical_l.end,
262 identical_r.start, identical_r.end);
264 debug("next_l_idx = %u\n", next_l_idx);
266 return 0;
269 /* binary search to find the stack to put this atom "card" on. */
270 static int
271 find_target_stack(struct diff_atom *atom,
272 struct diff_atom **patience_stacks,
273 unsigned int patience_stacks_count)
275 unsigned int lo = 0;
276 unsigned int hi = patience_stacks_count;
277 while (lo < hi) {
278 unsigned int mid = (lo + hi) >> 1;
280 if (PATIENCE(patience_stacks[mid]).pos_in_other
281 < PATIENCE(atom).pos_in_other)
282 lo = mid + 1;
283 else
284 hi = mid;
286 return lo;
289 /* Among the lines that appear exactly once in each side, find the longest
290 * streak that appear in both files in the same order (with other stuff allowed
291 * to interleave). Use patience sort for that, as in the Patience Diff
292 * algorithm.
293 * See https://bramcohen.livejournal.com/73318.html and, for a much more
294 * detailed explanation,
295 * https://blog.jcoglan.com/2017/09/19/the-patience-diff-algorithm/ */
296 int
297 diff_algo_patience(const struct diff_algo_config *algo_config,
298 struct diff_state *state)
300 int rc;
301 struct diff_data *left = &state->left;
302 struct diff_data *right = &state->right;
303 struct atom_patience *atom_patience_left =
304 calloc(left->atoms.len, sizeof(struct atom_patience));
305 struct atom_patience *atom_patience_right =
306 calloc(right->atoms.len, sizeof(struct atom_patience));
307 unsigned int unique_in_both_count;
308 struct diff_atom **lcs = NULL;
310 debug("\n** %s\n", __func__);
312 left->root->current = left;
313 right->root->current = right;
314 left->algo_data = atom_patience_left;
315 right->algo_data = atom_patience_right;
317 /* Find those lines that appear exactly once in 'left' and exactly once
318 * in 'right'. */
319 rc = diff_atoms_mark_unique_in_both_by_qsort(left, right,
320 &unique_in_both_count);
321 if (rc)
322 goto free_and_exit;
324 debug("unique_in_both_count %u\n", unique_in_both_count);
325 debug("left:\n");
326 debug_dump(left);
327 debug("right:\n");
328 debug_dump(right);
330 if (!unique_in_both_count) {
331 /* Cannot apply Patience, tell the caller to use fallback_algo
332 * instead. */
333 rc = DIFF_RC_USE_DIFF_ALGO_FALLBACK;
334 goto free_and_exit;
337 rc = diff_atoms_swallow_identical_neighbors(left, right,
338 &unique_in_both_count);
339 if (rc)
340 goto free_and_exit;
341 debug("After swallowing identical neighbors: unique_in_both = %u\n",
342 unique_in_both_count);
344 rc = ENOMEM;
346 /* An array of Longest Common Sequence is the result of the below
347 * subscope: */
348 unsigned int lcs_count = 0;
349 struct diff_atom *lcs_tail = NULL;
352 /* This subscope marks the lifetime of the atom_pointers
353 * allocation */
355 /* One chunk of storage for atom pointers */
356 struct diff_atom **atom_pointers;
357 atom_pointers = recallocarray(NULL, 0, unique_in_both_count * 2,
358 sizeof(struct diff_atom*));
360 /* Half for the list of atoms that still need to be put on
361 * stacks */
362 struct diff_atom **uniques = atom_pointers;
364 /* Half for the patience sort state's "card stacks" -- we
365 * remember only each stack's topmost "card" */
366 struct diff_atom **patience_stacks;
367 patience_stacks = atom_pointers + unique_in_both_count;
368 unsigned int patience_stacks_count = 0;
370 /* Take all common, unique items from 'left' ... */
372 struct diff_atom *atom;
373 struct diff_atom **uniques_end = uniques;
374 diff_data_foreach_atom(atom, left) {
375 if (!PATIENCE(atom).unique_in_both)
376 continue;
377 *uniques_end = atom;
378 uniques_end++;
381 /* ...and sort them to the order found in 'right'.
382 * The idea is to find the leftmost stack that has a higher line
383 * number and add it to the stack's top.
384 * If there is no such stack, open a new one on the right. The
385 * line number is derived from the atom*, which are array items
386 * and hence reflect the relative position in the source file.
387 * So we got the common-uniques from 'left' and sort them
388 * according to PATIENCE(atom).pos_in_other. */
389 unsigned int i;
390 for (i = 0; i < unique_in_both_count; i++) {
391 atom = uniques[i];
392 unsigned int target_stack;
393 target_stack = find_target_stack(atom, patience_stacks,
394 patience_stacks_count);
395 assert(target_stack <= patience_stacks_count);
396 patience_stacks[target_stack] = atom;
397 if (target_stack == patience_stacks_count)
398 patience_stacks_count++;
400 /* Record a back reference to the next stack on the
401 * left, which will form the final longest sequence
402 * later. */
403 PATIENCE(atom).prev_stack = target_stack ?
404 patience_stacks[target_stack - 1] : NULL;
408 /* backtrace through prev_stack references to form the final
409 * longest common sequence */
410 lcs_tail = patience_stacks[patience_stacks_count - 1];
411 lcs_count = patience_stacks_count;
413 /* uniques and patience_stacks are no longer needed.
414 * Backpointers are in PATIENCE(atom).prev_stack */
415 free(atom_pointers);
418 lcs = recallocarray(NULL, 0, lcs_count, sizeof(struct diff_atom*));
419 struct diff_atom **lcs_backtrace_pos = &lcs[lcs_count - 1];
420 struct diff_atom *atom;
421 for (atom = lcs_tail; atom; atom = PATIENCE(atom).prev_stack, lcs_backtrace_pos--) {
422 assert(lcs_backtrace_pos >= lcs);
423 *lcs_backtrace_pos = atom;
426 unsigned int i;
427 if (DEBUG) {
428 debug("\npatience LCS:\n");
429 for (i = 0; i < lcs_count; i++) {
430 debug_dump_atom(left, right, lcs[i]);
435 /* TODO: For each common-unique line found (now listed in lcs), swallow
436 * lines upwards and downwards that are identical on each side. Requires
437 * a way to represent atoms being glued to adjacent atoms. */
439 debug("\ntraverse LCS, possibly recursing:\n");
441 /* Now we have pinned positions in both files at which it makes sense to
442 * divide the diff problem into smaller chunks. Go into the next round:
443 * look at each section in turn, trying to again find common-unique
444 * lines in those smaller sections. As soon as no more are found, the
445 * remaining smaller sections are solved by Myers. */
446 unsigned int left_pos = 0;
447 unsigned int right_pos = 0;
448 for (i = 0; i <= lcs_count; i++) {
449 struct diff_atom *atom;
450 struct diff_atom *atom_r;
451 unsigned int left_idx;
452 unsigned int right_idx;
454 if (i < lcs_count) {
455 atom = lcs[i];
456 atom_r = PATIENCE(atom).pos_in_other;
457 debug("lcs[%u] = left[%u] = right[%u]\n", i,
458 diff_atom_idx(left, atom), diff_atom_idx(right, atom_r));
459 left_idx = PATIENCE(atom).identical_lines.start;
460 right_idx = PATIENCE(atom_r).identical_lines.start;
461 debug(" identical lines l %u-%u r %u-%u\n",
462 PATIENCE(atom).identical_lines.start, PATIENCE(atom).identical_lines.end,
463 PATIENCE(atom_r).identical_lines.start, PATIENCE(atom_r).identical_lines.end);
464 } else {
465 atom = NULL;
466 atom_r = NULL;
467 left_idx = left->atoms.len;
468 right_idx = right->atoms.len;
471 /* 'atom' now marks an atom that matches on both sides according
472 * to patience-diff (a common-unique identical atom in both
473 * files).
474 * Handle the section before and the atom itself; the section
475 * after will be handled by the next loop iteration -- note that
476 * i loops to last element + 1 ("i <= lcs_count"), so that there
477 * will be another final iteration to pick up the last remaining
478 * items after the last LCS atom.
479 * The sections before might also be empty on left and/or right.
480 * left_pos and right_pos mark the indexes of the first atoms
481 * that have not yet been handled in the previous loop
482 * iteration. left_idx and right_idx mark the indexes of the
483 * matching atom on left and right, respectively. */
485 debug("iteration %u left_pos %u left_idx %u"
486 " right_pos %u right_idx %u\n",
487 i, left_pos, left_idx, right_pos, right_idx);
489 /* Section before the matching atom */
490 struct diff_atom *left_atom = &left->atoms.head[left_pos];
491 unsigned int left_section_len = left_idx - left_pos;
493 struct diff_atom *right_atom = &(right->atoms.head[right_pos]);
494 unsigned int right_section_len = right_idx - right_pos;
496 if (left_section_len && right_section_len) {
497 /* Record an unsolved chunk, the caller will apply
498 * inner_algo() on this chunk. */
499 if (!diff_state_add_chunk(state, false,
500 left_atom, left_section_len,
501 right_atom,
502 right_section_len))
503 goto free_and_exit;
504 } else if (left_section_len && !right_section_len) {
505 /* Only left atoms and none on the right, they form a
506 * "minus" chunk, then. */
507 if (!diff_state_add_chunk(state, true,
508 left_atom, left_section_len,
509 right_atom, 0))
510 goto free_and_exit;
511 } else if (!left_section_len && right_section_len) {
512 /* No left atoms, only atoms on the right, they form a
513 * "plus" chunk, then. */
514 if (!diff_state_add_chunk(state, true,
515 left_atom, 0,
516 right_atom, right_section_len))
517 goto free_and_exit;
519 /* else: left_section_len == 0 and right_section_len == 0, i.e.
520 * nothing here. */
522 /* The atom found to match on both sides forms a chunk of equals
523 * on each side. In the very last iteration of this loop, there
524 * is no matching atom, we were just cleaning out the remaining
525 * lines. */
526 if (atom) {
527 void *ok;
528 ok = diff_state_add_chunk(state, true,
529 left->atoms.head
530 + PATIENCE(atom).identical_lines.start,
531 diff_range_len(&PATIENCE(atom).identical_lines),
532 right->atoms.head
533 + PATIENCE(atom_r).identical_lines.start,
534 diff_range_len(&PATIENCE(atom_r).identical_lines));
535 if (!ok)
536 goto free_and_exit;
537 left_pos = PATIENCE(atom).identical_lines.end;
538 right_pos = PATIENCE(atom_r).identical_lines.end;
539 } else {
540 left_pos = left_idx + 1;
541 right_pos = right_idx + 1;
543 debug("end of iteration %u left_pos %u left_idx %u"
544 " right_pos %u right_idx %u\n",
545 i, left_pos, left_idx, right_pos, right_idx);
547 debug("** END %s\n", __func__);
549 rc = DIFF_RC_OK;
551 free_and_exit:
552 left->root->current = NULL;
553 right->root->current = NULL;
554 free(atom_patience_left);
555 free(atom_patience_right);
556 if (lcs)
557 free(lcs);
558 return rc;