|
| 1 | +// Copyright 2022 The Go Authors. All rights reserved. |
| 2 | +// Use of this source code is governed by a BSD-style |
| 3 | +// license that can be found in the LICENSE file. |
| 4 | + |
| 5 | +package main |
| 6 | + |
| 7 | +import ( |
| 8 | + "bytes" |
| 9 | + "fmt" |
| 10 | + "sort" |
| 11 | + "strings" |
| 12 | +) |
| 13 | + |
| 14 | +// A pair is a pair of values tracked for both the x and y side of a diff. |
| 15 | +// It is typically a pair of line indexes. |
| 16 | +type pair struct{ x, y int } |
| 17 | + |
| 18 | +// Diff returns an anchored diff of the two texts old and new |
| 19 | +// in the “unified diff” format. If old and new are identical, |
| 20 | +// Diff returns a nil slice (no output). |
| 21 | +// |
| 22 | +// Unix diff implementations typically look for a diff with |
| 23 | +// the smallest number of lines inserted and removed, |
| 24 | +// which can in the worst case take time quadratic in the |
| 25 | +// number of lines in the texts. As a result, many implementations |
| 26 | +// either can be made to run for a long time or cut off the search |
| 27 | +// after a predetermined amount of work. |
| 28 | +// |
| 29 | +// In contrast, this implementation looks for a diff with the |
| 30 | +// smallest number of “unique” lines inserted and removed, |
| 31 | +// where unique means a line that appears just once in both old and new. |
| 32 | +// We call this an “anchored diff” because the unique lines anchor |
| 33 | +// the chosen matching regions. An anchored diff is usually clearer |
| 34 | +// than a standard diff, because the algorithm does not try to |
| 35 | +// reuse unrelated blank lines or closing braces. |
| 36 | +// The algorithm also guarantees to run in O(n log n) time |
| 37 | +// instead of the standard O(n²) time. |
| 38 | +// |
| 39 | +// Some systems call this approach a “patience diff,” named for |
| 40 | +// the “patience sorting” algorithm, itself named for a solitaire card game. |
| 41 | +// We avoid that name for two reasons. First, the name has been used |
| 42 | +// for a few different variants of the algorithm, so it is imprecise. |
| 43 | +// Second, the name is frequently interpreted as meaning that you have |
| 44 | +// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm, |
| 45 | +// when in fact the algorithm is faster than the standard one. |
| 46 | +func Diff(oldName string, old []byte, newName string, new []byte) []byte { |
| 47 | + if bytes.Equal(old, new) { |
| 48 | + return nil |
| 49 | + } |
| 50 | + x := lines(old) |
| 51 | + y := lines(new) |
| 52 | + |
| 53 | + // Print diff header. |
| 54 | + var out bytes.Buffer |
| 55 | + fmt.Fprintf(&out, "diff %s %s\n", oldName, newName) |
| 56 | + fmt.Fprintf(&out, "--- %s\n", oldName) |
| 57 | + fmt.Fprintf(&out, "+++ %s\n", newName) |
| 58 | + |
| 59 | + // Loop over matches to consider, |
| 60 | + // expanding each match to include surrounding lines, |
| 61 | + // and then printing diff chunks. |
| 62 | + // To avoid setup/teardown cases outside the loop, |
| 63 | + // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair |
| 64 | + // in the sequence of matches. |
| 65 | + var ( |
| 66 | + done pair // printed up to x[:done.x] and y[:done.y] |
| 67 | + chunk pair // start lines of current chunk |
| 68 | + count pair // number of lines from each side in current chunk |
| 69 | + ctext []string // lines for current chunk |
| 70 | + ) |
| 71 | + for _, m := range tgs(x, y) { |
| 72 | + if m.x < done.x { |
| 73 | + // Already handled scanning forward from earlier match. |
| 74 | + continue |
| 75 | + } |
| 76 | + |
| 77 | + // Expand matching lines as far as possible, |
| 78 | + // establishing that x[start.x:end.x] == y[start.y:end.y]. |
| 79 | + // Note that on the first (or last) iteration we may (or definitely do) |
| 80 | + // have an empty match: start.x==end.x and start.y==end.y. |
| 81 | + start := m |
| 82 | + for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] { |
| 83 | + start.x-- |
| 84 | + start.y-- |
| 85 | + } |
| 86 | + end := m |
| 87 | + for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] { |
| 88 | + end.x++ |
| 89 | + end.y++ |
| 90 | + } |
| 91 | + |
| 92 | + // Emit the mismatched lines before start into this chunk. |
| 93 | + // (No effect on first sentinel iteration, when start = {0,0}.) |
| 94 | + for _, s := range x[done.x:start.x] { |
| 95 | + ctext = append(ctext, "-"+s) |
| 96 | + count.x++ |
| 97 | + } |
| 98 | + for _, s := range y[done.y:start.y] { |
| 99 | + ctext = append(ctext, "+"+s) |
| 100 | + count.y++ |
| 101 | + } |
| 102 | + |
| 103 | + // If we're not at EOF and have too few common lines, |
| 104 | + // the chunk includes all the common lines and continues. |
| 105 | + const C = 3 // number of context lines |
| 106 | + if (end.x < len(x) || end.y < len(y)) && |
| 107 | + (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) { |
| 108 | + for _, s := range x[start.x:end.x] { |
| 109 | + ctext = append(ctext, " "+s) |
| 110 | + count.x++ |
| 111 | + count.y++ |
| 112 | + } |
| 113 | + done = end |
| 114 | + continue |
| 115 | + } |
| 116 | + |
| 117 | + // End chunk with common lines for context. |
| 118 | + if len(ctext) > 0 { |
| 119 | + n := end.x - start.x |
| 120 | + if n > C { |
| 121 | + n = C |
| 122 | + } |
| 123 | + for _, s := range x[start.x : start.x+n] { |
| 124 | + ctext = append(ctext, " "+s) |
| 125 | + count.x++ |
| 126 | + count.y++ |
| 127 | + } |
| 128 | + done = pair{start.x + n, start.y + n} |
| 129 | + |
| 130 | + // Format and emit chunk. |
| 131 | + // Convert line numbers to 1-indexed. |
| 132 | + // Special case: empty file shows up as 0,0 not 1,0. |
| 133 | + if count.x > 0 { |
| 134 | + chunk.x++ |
| 135 | + } |
| 136 | + if count.y > 0 { |
| 137 | + chunk.y++ |
| 138 | + } |
| 139 | + fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y) |
| 140 | + for _, s := range ctext { |
| 141 | + out.WriteString(s) |
| 142 | + } |
| 143 | + count.x = 0 |
| 144 | + count.y = 0 |
| 145 | + ctext = ctext[:0] |
| 146 | + } |
| 147 | + |
| 148 | + // If we reached EOF, we're done. |
| 149 | + if end.x >= len(x) && end.y >= len(y) { |
| 150 | + break |
| 151 | + } |
| 152 | + |
| 153 | + // Otherwise start a new chunk. |
| 154 | + chunk = pair{end.x - C, end.y - C} |
| 155 | + for _, s := range x[chunk.x:end.x] { |
| 156 | + ctext = append(ctext, " "+s) |
| 157 | + count.x++ |
| 158 | + count.y++ |
| 159 | + } |
| 160 | + done = end |
| 161 | + } |
| 162 | + |
| 163 | + return out.Bytes() |
| 164 | +} |
| 165 | + |
| 166 | +// lines returns the lines in the file x, including newlines. |
| 167 | +// If the file does not end in a newline, one is supplied |
| 168 | +// along with a warning about the missing newline. |
| 169 | +func lines(x []byte) []string { |
| 170 | + l := strings.SplitAfter(string(x), "\n") |
| 171 | + if l[len(l)-1] == "" { |
| 172 | + l = l[:len(l)-1] |
| 173 | + } else { |
| 174 | + // Treat last line as having a message about the missing newline attached, |
| 175 | + // using the same text as BSD/GNU diff (including the leading backslash). |
| 176 | + l[len(l)-1] += "\n\\ No newline at end of file\n" |
| 177 | + } |
| 178 | + return l |
| 179 | +} |
| 180 | + |
| 181 | +// tgs returns the pairs of indexes of the longest common subsequence |
| 182 | +// of unique lines in x and y, where a unique line is one that appears |
| 183 | +// once in x and once in y. |
| 184 | +// |
| 185 | +// The longest common subsequence algorithm is as described in |
| 186 | +// Thomas G. Szymanski, “A Special Case of the Maximal Common |
| 187 | +// Subsequence Problem,” Princeton TR #170 (January 1975), |
| 188 | +// available at https://research.swtch.com/tgs170.pdf. |
| 189 | +func tgs(x, y []string) []pair { |
| 190 | + // Count the number of times each string appears in a and b. |
| 191 | + // We only care about 0, 1, many, counted as 0, -1, -2 |
| 192 | + // for the x side and 0, -4, -8 for the y side. |
| 193 | + // Using negative numbers now lets us distinguish positive line numbers later. |
| 194 | + m := make(map[string]int) |
| 195 | + for _, s := range x { |
| 196 | + if c := m[s]; c > -2 { |
| 197 | + m[s] = c - 1 |
| 198 | + } |
| 199 | + } |
| 200 | + for _, s := range y { |
| 201 | + if c := m[s]; c > -8 { |
| 202 | + m[s] = c - 4 |
| 203 | + } |
| 204 | + } |
| 205 | + |
| 206 | + // Now unique strings can be identified by m[s] = -1+-4. |
| 207 | + // |
| 208 | + // Gather the indexes of those strings in x and y, building: |
| 209 | + // xi[i] = increasing indexes of unique strings in x. |
| 210 | + // yi[i] = increasing indexes of unique strings in y. |
| 211 | + // inv[i] = index j such that x[xi[i]] = y[yi[j]]. |
| 212 | + var xi, yi, inv []int |
| 213 | + for i, s := range y { |
| 214 | + if m[s] == -1+-4 { |
| 215 | + m[s] = len(yi) |
| 216 | + yi = append(yi, i) |
| 217 | + } |
| 218 | + } |
| 219 | + for i, s := range x { |
| 220 | + if j, ok := m[s]; ok && j >= 0 { |
| 221 | + xi = append(xi, i) |
| 222 | + inv = append(inv, j) |
| 223 | + } |
| 224 | + } |
| 225 | + |
| 226 | + // Apply Algorithm A from Szymanski's paper. |
| 227 | + // In those terms, A = J = inv and B = [0, n). |
| 228 | + // We add sentinel pairs {0,0}, and {len(x),len(y)} |
| 229 | + // to the returned sequence, to help the processing loop. |
| 230 | + J := inv |
| 231 | + n := len(xi) |
| 232 | + T := make([]int, n) |
| 233 | + L := make([]int, n) |
| 234 | + for i := range T { |
| 235 | + T[i] = n + 1 |
| 236 | + } |
| 237 | + for i := 0; i < n; i++ { |
| 238 | + k := sort.Search(n, func(k int) bool { |
| 239 | + return T[k] >= J[i] |
| 240 | + }) |
| 241 | + T[k] = J[i] |
| 242 | + L[i] = k + 1 |
| 243 | + } |
| 244 | + k := 0 |
| 245 | + for _, v := range L { |
| 246 | + if k < v { |
| 247 | + k = v |
| 248 | + } |
| 249 | + } |
| 250 | + seq := make([]pair, 2+k) |
| 251 | + seq[1+k] = pair{len(x), len(y)} // sentinel at end |
| 252 | + lastj := n |
| 253 | + for i := n - 1; i >= 0; i-- { |
| 254 | + if L[i] == k && J[i] < lastj { |
| 255 | + seq[k] = pair{xi[i], yi[J[i]]} |
| 256 | + k-- |
| 257 | + } |
| 258 | + } |
| 259 | + seq[0] = pair{0, 0} // sentinel at start |
| 260 | + return seq |
| 261 | +} |
0 commit comments