forked from ChromeDevTools/devtools-frontend
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscreenshots.ts
351 lines (306 loc) · 13.1 KB
/
screenshots.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/* eslint-disable no-console */
// no-console disabled here as this is a test runner and expects to output to the console
import {assert} from 'chai';
import * as childProcess from 'child_process';
import * as fs from 'fs';
import * as path from 'path';
import type * as puppeteer from 'puppeteer-core';
import {platform} from '../conductor/mocha-interface-helpers.js';
import {SOURCE_ROOT} from '../conductor/paths.js';
import {ScreenshotError} from '../conductor/screenshot-error.js';
import {TestConfig} from '../conductor/test_config.js';
import {
getBrowserAndPages,
timeout,
waitFor,
} from '../shared/helper.js';
/**
* The goldens screenshot folder is always taken from the source directory (NOT
* out/Target/...) because we commit these files to git. Therefore we use the
* flags from the test runner config to locate the source directory and read our
* goldens from there.
*/
const testRunnerCWD = SOURCE_ROOT;
const GOLDENS_FOLDER = path.join(testRunnerCWD, 'test', 'interactions', 'goldens', platform);
/**
* It's assumed that the image_diff binaries are in CWD/third_party/image_diff/{platform}/image_diff
*/
const exeSuffix = platform.startsWith('win') ? '.exe' : '';
const IMAGE_DIFF_BINARY = path.join(testRunnerCWD, 'third_party', 'image_diff', platform, 'image_diff' + exeSuffix);
if (!fs.existsSync(IMAGE_DIFF_BINARY)) {
throw new Error(`path to image_diff (${IMAGE_DIFF_BINARY}) did not exist.`);
}
/**
* The generated screenshot path is relative, as we put the generated
* screenshots into the out/TARGET/... directory.
*
* If we find it exists ahead of a test run, we remove it, so that when we start
* a test run the folder is empty. This ensures no generated files left from
* previous runs interfere.
*/
const generatedScreenshotFolderParts = ['..', '.generated', platform];
const generatedScreenshotFolder = path.join(__dirname, ...generatedScreenshotFolderParts);
if (fs.existsSync(generatedScreenshotFolder)) {
fs.rmSync(generatedScreenshotFolder, {recursive: true});
}
fs.mkdirSync(generatedScreenshotFolder, {recursive: true});
const defaultScreenshotOpts: puppeteer.ScreenshotOptions = {
type: 'png',
encoding: 'binary',
captureBeyondViewport: false,
};
const DEFAULT_RETRIES_COUNT = 5;
const DEFAULT_MS_BETWEEN_RETRIES = 150;
// Percentage difference when comparing golden vs new screenshot that is
// acceptable and will not fail the test.
const DEFAULT_SCREENSHOT_THRESHOLD_PERCENT = 4;
export const assertElementScreenshotUnchanged = async (
element: puppeteer.ElementHandle|null, fileName: string,
maximumDiffThreshold = DEFAULT_SCREENSHOT_THRESHOLD_PERCENT,
options: Partial<puppeteer.ScreenshotOptions> = {}) => {
if (!element) {
assert.fail(`Given element for test ${fileName} was not found.`);
}
// Only assert screenshots on Linux. We don't observe platform-specific differences enough to justify
// the costs of asserting 3 platforms per screenshot.
if (platform !== 'linux') {
return;
}
return assertScreenshotUnchangedWithRetries(element, fileName, maximumDiffThreshold, DEFAULT_RETRIES_COUNT, options);
};
const assertScreenshotUnchangedWithRetries = async (
elementOrPage: puppeteer.ElementHandle|puppeteer.Page, fileName: string, maximumDiffThreshold: number,
maximumRetries: number, options: Partial<puppeteer.ScreenshotOptions> = {}) => {
const {frontend} = getBrowserAndPages();
try {
await frontend.evaluate(() => window.dispatchEvent(new Event('hidecomponentdocsui')));
/**
* You can call the helper with a path for the golden - e.g.
* accordion/basic.png. So we split on `/` and then join on path.sep to
* ensure we calculate the right path regardless of platform.
*/
const fileNameForPlatform = fileName.split('/').join(path.sep);
const goldenScreenshotPath = path.join(GOLDENS_FOLDER, fileNameForPlatform);
const generatedScreenshotPath = path.join(generatedScreenshotFolder, fileNameForPlatform);
// You can run the tests with ITERATIONS=2 to run each test twice. In that
// case we would expect the generated screenshots to already exists, so if
// we are running more than 1 iteration, we do not error.
const testIterations = TestConfig.repetitions;
if (fs.existsSync(generatedScreenshotPath) && testIterations < 2) {
// If this happened something went wrong during the clean-up at the start of the test run, so let's bail.
throw new Error(`${generatedScreenshotPath} already exists.`);
}
/**
* Ensure that the directories for the golden/generated file exist. We need
* this because if the user calls this function with `accordion/basic.png`,
* we need to make sure that the `accordion` folder exists.
*/
fs.mkdirSync(path.dirname(generatedScreenshotPath), {recursive: true});
fs.mkdirSync(path.dirname(goldenScreenshotPath), {recursive: true});
await assertScreenshotUnchanged({
elementOrPage,
generatedScreenshotPath,
goldenScreenshotPath,
screenshotOptions: options,
fileName,
maximumDiffThreshold,
maximumRetries,
});
} finally {
await frontend.evaluate(() => window.dispatchEvent(new Event('showcomponentdocsui')));
}
};
interface ScreenshotAssertionOptions {
goldenScreenshotPath: string;
generatedScreenshotPath: string;
screenshotOptions: Partial<puppeteer.ScreenshotOptions>;
elementOrPage: puppeteer.ElementHandle|puppeteer.Page;
fileName: string;
maximumDiffThreshold: number;
maximumRetries: number;
retryCount?: number;
}
const assertScreenshotUnchanged = async (options: ScreenshotAssertionOptions) => {
const {
elementOrPage,
generatedScreenshotPath,
goldenScreenshotPath,
fileName,
maximumDiffThreshold,
maximumRetries,
retryCount = 1,
} = options;
const screenshotOptions = {...defaultScreenshotOpts, ...options.screenshotOptions, path: generatedScreenshotPath};
await (elementOrPage as puppeteer.Page).screenshot(screenshotOptions);
/**
* The user can do UPDATE_GOLDEN=accordion/basic.png npm run screenshotstest
* to update the golden image. This is useful if work has caused the
* screenshot to change and therefore the test goldens need to be updated.
*/
const shouldUpdate =
TestConfig.onDiff.update && (TestConfig.onDiff.update === true || TestConfig.onDiff.update.includes(fileName));
const throwAfterGoldensUpdate = TestConfig.onDiff.throw;
let onBotAndImageNotFound = false;
// In the event that a golden does not exist, assume the generated screenshot is the new golden.
if (!fs.existsSync(goldenScreenshotPath)) {
// LUCI_CONTEXT is an environment variable present on the bots.
if (process.env.LUCI_CONTEXT !== undefined && !shouldUpdate) {
// If the image is missing, there's no point retrying the test N more times.
onBotAndImageNotFound = true;
throw ScreenshotError.fromMessage(
`Failing test: in an environment with LUCI_CONTEXT and did not find a golden screenshot.
Here's the image that this test generated as a base64:
data:image/png;base64,${fs.readFileSync(generatedScreenshotPath, {
encoding: 'base64',
})}
`,
generatedScreenshotPath);
}
console.log('Golden does not exist, using generated screenshot.');
setGeneratedFileAsGolden(goldenScreenshotPath, generatedScreenshotPath);
if (throwAfterGoldensUpdate) {
throw new Error('Golden does not exist, using generated screenshot.');
}
}
try {
await compare(goldenScreenshotPath, generatedScreenshotPath, maximumDiffThreshold);
} catch (compareError) {
if (!onBotAndImageNotFound) {
console.log(`=> Test failed. Retrying (retry ${retryCount} of ${maximumRetries} maximum).`);
}
if (retryCount === maximumRetries || onBotAndImageNotFound) {
if (shouldUpdate) {
console.log(`=> ${fileName} was out of date and failed; updating`);
setGeneratedFileAsGolden(goldenScreenshotPath, generatedScreenshotPath);
if (throwAfterGoldensUpdate) {
throw compareError;
}
return;
}
// If we don't want to update, throw the assertion error so we fail the test.
throw compareError;
}
// Wait a little bit before trying again
await new Promise(resolve => setTimeout(resolve, DEFAULT_MS_BETWEEN_RETRIES));
await assertScreenshotUnchanged({
elementOrPage,
generatedScreenshotPath,
goldenScreenshotPath,
fileName,
maximumDiffThreshold,
maximumRetries,
retryCount: retryCount + 1,
screenshotOptions: options.screenshotOptions,
});
}
};
interface ImageDiff {
rawMisMatchPercentage: number;
diffPath: string;
}
async function imageDiff(golden: string, generated: string) {
return new Promise<ImageDiff>(async (resolve, reject) => {
try {
const imageDiff: ImageDiff = {rawMisMatchPercentage: 0, diffPath: ''};
const diffText = await execImageDiffCommand(`${IMAGE_DIFF_BINARY} --histogram ${golden} ${generated}`);
// Parse out the number from the cmd output, i.e. diff: 48.9% failed => 48.9
imageDiff.rawMisMatchPercentage = Number(diffText.replace(/^diff:\s/, '').replace(/%.*/, ''));
if (Number.isNaN(imageDiff.rawMisMatchPercentage)) {
reject('Unable to compare images');
}
// Only create a diff image if the images are different.
if (imageDiff.rawMisMatchPercentage > 0) {
imageDiff.diffPath = path.join(path.dirname(generated), `${path.basename(generated, '.png')}-diff.png`);
await execImageDiffCommand(`${IMAGE_DIFF_BINARY} --diff ${golden} ${generated} ${imageDiff.diffPath}`);
}
resolve(imageDiff);
} catch (e) {
reject(new Error(`Error when running image_diff: ${e.stack}`));
}
});
}
async function execImageDiffCommand(cmd: string) {
return new Promise<string>((resolve, reject) => {
let commandOutput = '';
try {
commandOutput = childProcess.execSync(cmd, {encoding: 'utf8'});
resolve(commandOutput);
} catch (e) {
// image_diff will exit with a status code of 1 if the diff is too big, so
// this needs to be caught, but the outcome is the same - we want to send
// back the string for processing.
if (e.stdout && e.stdout.indexOf('diff') === -1) {
reject(new Error(`Comparing diff failed. stdout: "${e.stdout}"`));
return;
}
resolve(e.stdout);
}
});
}
async function compare(golden: string, generated: string, maximumDiffThreshold: number) {
const isOnBot = process.env.LUCI_CONTEXT !== undefined;
if (!isOnBot && process.env.SKIP_SCREENSHOT_COMPARISONS_FOR_FAST_COVERAGE) {
// When checking test coverage locally the tests get sped up significantly
// if we do not do the actual image comparison. Obviously this makes the
// tests all pass, but it is useful to quickly get coverage stats.
// Therefore you can pass this flag to skip all screenshot comparisions. We
// make sure this is only possible if not on a CQ bot and 99.9% of the time
// this should not be used!
return;
}
const {rawMisMatchPercentage, diffPath} = await imageDiff(golden, generated);
const base64TestGeneratedImageLog = `Here's the image the test generated as a base64:
data:image/png;base64,${fs.readFileSync(generated, {
encoding: 'base64',
})}`;
const base64DiffImageLog = `And here's the diff image as base64:\n
data:image/png;base64,${
diffPath ? fs.readFileSync(diffPath, {
encoding: 'base64',
}) :
''}`;
let debugInfo = '';
if (isOnBot) {
debugInfo = `${base64TestGeneratedImageLog}\n${base64DiffImageLog}\n`;
} else {
debugInfo = `Run the tests again with --on-diff=update to update all tests that fail.
Only do this if you expected this screenshot to have changed!
Diff image generated at:
=> ${path.relative(testRunnerCWD, diffPath)}\n`;
}
try {
assert.isAtMost(
rawMisMatchPercentage, maximumDiffThreshold,
`There is a ${rawMisMatchPercentage}% difference between the golden and generated image.
${debugInfo}`);
if (rawMisMatchPercentage > 0) {
console.log(`test passed with difference of ${rawMisMatchPercentage}%`);
}
} catch (assertionError) {
throw ScreenshotError.fromError(assertionError, golden, generated, diffPath);
}
}
function setGeneratedFileAsGolden(golden: string, generated: string) {
console.log(`Setting generated file to golden:
${path.relative(testRunnerCWD, generated)}
=> ${path.relative(testRunnerCWD, golden)}
`);
try {
fs.copyFileSync(generated, golden);
} catch (e) {
assert.fail(`Error setting golden, ${e}`);
}
}
export async function waitForDialogAnimationEnd(root?: puppeteer.ElementHandle) {
const ANIMATION_TIMEOUT = 2000;
const dialog = await waitFor('dialog[open]', root);
const animationPromise = dialog.evaluate((dialog: Element) => {
return new Promise<void>(resolve => {
dialog.addEventListener('animationend', () => resolve(), {once: true});
});
});
await Promise.race([animationPromise, timeout(ANIMATION_TIMEOUT)]);
}