forked from huningxin/onnxruntime
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest-runner-cli-args.ts
466 lines (396 loc) · 17.7 KB
/
test-runner-cli-args.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
import minimist from 'minimist';
import npmlog from 'npmlog';
import {Env, InferenceSession} from 'onnxruntime-common';
import {Logger} from '../lib/onnxjs/instrument';
import {Test} from '../test/test-types';
/* eslint-disable max-len */
const HELP_MESSAGE = `
test-runner-cli
Run ONNX Runtime Web tests, models, benchmarks in different environments.
Usage:
test-runner-cli <mode> ... [options]
Modes:
suite0 Run all unittests, all operator tests and node model tests that described in suite test list
suite1 Run all operator tests and node model tests that described in suite test list
model Run a single model test
unittest Run all unittests
op Run a single operator test
Options:
*** General Options ***
-h, --help Print this message.
-d, --debug Specify to run test runner in debug mode.
Debug mode outputs verbose log for test runner, sets up environment debug flag, and keeps karma not to exit after tests completed.
-b=<...>, --backend=<...> Specify one or more backend(s) to run the test upon.
Backends can be one or more of the following, splitted by comma:
webgl
webgpu
wasm
xnnpack
webnn
-e=<...>, --env=<...> Specify the environment to run the test. Should be one of the following:
chrome (default)
edge (Windows only)
firefox
electron
safari (MacOS only)
node
bs (for BrowserStack tests)
-p, --profile Enable profiler.
Profiler will generate extra logs which include the information of events time consumption
-P[=<...>], --perf[=<...>] Generate performance number. Cannot be used with flag --debug.
This flag can be used with a number as value, specifying the total count of test cases to run. The test cases may be used multiple times. Default value is 10.
-c, --file-cache Enable file cache.
*** Session Options ***
-u=<...>, --optimized-model-file-path=<...> Specify whether to dump the optimized model.
-o=<...>, --graph-optimization-level=<...> Specify graph optimization level.
Default is 'all'. Valid values are 'disabled', 'basic', 'extended', 'all'.
*** Logging Options ***
--log-verbose=<...> Set log level to verbose
--log-info=<...> Set log level to info
--log-warning=<...> Set log level to warning
--log-error=<...> Set log level to error
The 4 flags above specify the logging configuration. Each flag allows to specify one or more category(s), splitted by comma. If use the flags without value, the log level will be applied to all category.
*** Backend Options ***
--wasm-number-threads Set the WebAssembly number of threads
--wasm-init-timeout Set the timeout for WebAssembly backend initialization, in milliseconds
--wasm-enable-simd Set whether to enable SIMD
--wasm-enable-proxy Set whether to enable proxy worker
--webgl-context-id Set the WebGL context ID (webgl/webgl2)
--webgl-matmul-max-batch-size Set the WebGL matmulMaxBatchSize
--webgl-texture-cache-mode Set the WebGL texture cache mode (initializerOnly/full)
--webgl-texture-pack-mode Set the WebGL texture pack mode (true/false)
--webgpu-profiling-mode Set the WebGPU profiling mode (off/default)
*** Browser Options ***
--no-sandbox This flag will be passed to Chrome.
Sometimes Chrome need this flag to work together with Karma.
Examples:
Run all suite0 tests:
> test-runner-cli suite0
Run single model test (test_relu) on WebAssembly backend
> test-runner-cli model test_relu --backend=wasm
Debug unittest
> test-runner-cli unittest --debug
Debug operator matmul, highlight verbose log from BaseGlContext and WebGLBackend
> test-runner-cli op matmul --backend=webgl --debug --log-verbose=BaseGlContext,WebGLBackend
Profile an ONNX model on WebGL backend
> test-runner-cli model <model_folder> --profile --backend=webgl
Run perf testing of an ONNX model on WebGL backend
> test-runner-cli model <model_folder> -b=webgl -P
`;
/* eslint-enable max-len */
export declare namespace TestRunnerCliArgs {
type Mode = 'suite0'|'suite1'|'model'|'unittest'|'op';
type Backend = 'cpu'|'webgl'|'webgpu'|'wasm'|'onnxruntime'|'xnnpack'|'webnn';
type Environment = 'chrome'|'edge'|'firefox'|'electron'|'safari'|'node'|'bs';
type BundleMode = 'prod'|'dev'|'perf';
}
export interface TestRunnerCliArgs {
debug: boolean;
mode: TestRunnerCliArgs.Mode;
/**
* The parameter that used when in mode 'model' or 'op', specifying the search string for the model or op test
*/
param?: string;
backends: [TestRunnerCliArgs.Backend];
env: TestRunnerCliArgs.Environment;
/**
* Bundle Mode
*
* this field affects the behavior of Karma and Webpack.
*
* For Karma, if flag '--bundle-mode' is not set, the default behavior is 'dev'
* For Webpack, if flag '--bundle-mode' is not set, the default behavior is 'prod'
*
* For running tests, the default mode is 'dev'. If flag '--perf' is set, the mode will be set to 'perf'.
*
* Mode | Output File | Main | Source Map | Webpack Config
* ------ | --------------------- | -------------------- | ------------------ | --------------
* prod | /dist/ort.min.js | /lib/index.ts | source-map | production
* node | /dist/ort-web.node.js | /lib/index.ts | source-map | production
* dev | /test/ort.dev.js | /test/test-main.ts | inline-source-map | development
* perf | /test/ort.perf.js | /test/test-main.ts | (none) | production
*/
bundleMode: TestRunnerCliArgs.BundleMode;
logConfig: Test.Config['log'];
/**
* Whether to enable InferenceSession's profiler
*/
profile: boolean;
/**
* Whether to enable file cache
*/
fileCache: boolean;
/**
* Specify the times that test cases to run
*/
times?: number;
/**
* whether to dump the optimized model
*/
optimizedModelFilePath?: string;
/**
* Specify graph optimization level
*/
graphOptimizationLevel: 'disabled'|'basic'|'extended'|'all';
cpuOptions?: InferenceSession.CpuExecutionProviderOption;
cudaOptions?: InferenceSession.CudaExecutionProviderOption;
cudaFlags?: Record<string, unknown>;
wasmOptions?: InferenceSession.WebAssemblyExecutionProviderOption;
webglOptions?: InferenceSession.WebGLExecutionProviderOption;
globalEnvFlags?: Env;
noSandbox?: boolean;
}
function parseBooleanArg(arg: unknown, defaultValue: boolean): boolean;
function parseBooleanArg(arg: unknown): boolean|undefined;
function parseBooleanArg(arg: unknown, defaultValue?: boolean): boolean|undefined {
if (typeof arg === 'undefined') {
return defaultValue;
}
if (typeof arg === 'boolean') {
return arg;
}
if (typeof arg === 'number') {
return arg !== 0;
}
if (typeof arg === 'string') {
if (arg.toLowerCase() === 'true') {
return true;
}
if (arg.toLowerCase() === 'false') {
return false;
}
}
throw new TypeError(`invalid boolean arg: ${arg}`);
}
function parseLogLevel<T>(arg: T) {
let v: string[]|boolean;
if (typeof arg === 'string') {
v = arg.split(',');
} else if (Array.isArray(arg)) {
v = [];
for (const e of arg) {
v.push(...e.split(','));
}
} else {
v = arg ? true : false;
}
return v;
}
function parseLogConfig(args: minimist.ParsedArgs) {
const config: Array<{category: string; config: Logger.Config}> = [];
const verbose = parseLogLevel(args['log-verbose']);
const info = parseLogLevel(args['log-info']);
const warning = parseLogLevel(args['log-warning']);
const error = parseLogLevel(args['log-error']);
if (typeof error === 'boolean' && error) {
config.push({category: '*', config: {minimalSeverity: 'error'}});
} else if (typeof warning === 'boolean' && warning) {
config.push({category: '*', config: {minimalSeverity: 'warning'}});
} else if (typeof info === 'boolean' && info) {
config.push({category: '*', config: {minimalSeverity: 'info'}});
} else if (typeof verbose === 'boolean' && verbose) {
config.push({category: '*', config: {minimalSeverity: 'verbose'}});
}
if (Array.isArray(error)) {
config.push(...error.map(i => ({category: i, config: {minimalSeverity: 'error' as Logger.Severity}})));
}
if (Array.isArray(warning)) {
config.push(...warning.map(i => ({category: i, config: {minimalSeverity: 'warning' as Logger.Severity}})));
}
if (Array.isArray(info)) {
config.push(...info.map(i => ({category: i, config: {minimalSeverity: 'info' as Logger.Severity}})));
}
if (Array.isArray(verbose)) {
config.push(...verbose.map(i => ({category: i, config: {minimalSeverity: 'verbose' as Logger.Severity}})));
}
return config;
}
function parseCpuOptions(_args: minimist.ParsedArgs): InferenceSession.CpuExecutionProviderOption {
return {name: 'cpu'};
}
function parseCpuFlags(_args: minimist.ParsedArgs): Record<string, unknown> {
return {};
}
function parseWasmOptions(_args: minimist.ParsedArgs): InferenceSession.WebAssemblyExecutionProviderOption {
return {name: 'wasm'};
}
function parseWasmFlags(args: minimist.ParsedArgs): Env.WebAssemblyFlags {
const numThreads = args['wasm-number-threads'];
if (typeof numThreads !== 'undefined' && typeof numThreads !== 'number') {
throw new Error('Flag "wasm-number-threads" must be a number value');
}
const initTimeout = args['wasm-init-timeout'];
if (typeof initTimeout !== 'undefined' && typeof initTimeout !== 'number') {
throw new Error('Flag "wasm-init-timeout" must be a number value');
}
let simd = args['wasm-enable-simd'];
if (simd === 'true') {
simd = true;
} else if (simd === 'false') {
simd = false;
} else if (typeof simd !== 'undefined' && typeof simd !== 'boolean') {
throw new Error('Flag "wasm-enable-simd" must be a boolean value');
}
let proxy = args['wasm-enable-proxy'];
if (proxy === 'true') {
proxy = true;
} else if (proxy === 'false') {
proxy = false;
} else if (typeof proxy !== 'undefined' && typeof proxy !== 'boolean') {
throw new Error('Flag "wasm-enable-proxy" must be a boolean value');
}
return {numThreads, initTimeout, simd, proxy};
}
function parseWebglOptions(_args: minimist.ParsedArgs): InferenceSession.WebGLExecutionProviderOption {
return {name: 'webgl'};
}
function parseWebglFlags(args: minimist.ParsedArgs): Env.WebGLFlags {
const contextId = args['webgl-context-id'];
if (contextId !== undefined && contextId !== 'webgl' && contextId !== 'webgl2') {
throw new Error('Flag "webgl-context-id" is invalid');
}
const matmulMaxBatchSize = args['webgl-matmul-max-batch-size'];
if (matmulMaxBatchSize !== undefined && typeof matmulMaxBatchSize !== 'number') {
throw new Error('Flag "webgl-matmul-max-batch-size" must be a number value');
}
const textureCacheMode = args['webgl-texture-cache-mode'];
if (textureCacheMode !== undefined && textureCacheMode !== 'initializerOnly' && textureCacheMode !== 'full') {
throw new Error('Flag "webgl-texture-cache-mode" is invalid');
}
const pack = args['webgl-texture-pack-mode'];
if (pack !== undefined && typeof pack !== 'boolean') {
throw new Error('Flag "webgl-texture-pack-mode" is invalid');
}
const async = args['webgl-async'];
if (async !== undefined && typeof async !== 'boolean') {
throw new Error('Flag "webgl-async" is invalid');
}
return {contextId, matmulMaxBatchSize, textureCacheMode, pack};
}
function parseWebgpuFlags(args: minimist.ParsedArgs): Env.WebGpuFlags {
const profilingMode = args['webgpu-profiling-mode'];
if (profilingMode !== undefined && profilingMode !== 'off' && profilingMode !== 'default') {
throw new Error('Flag "webgpu-profiling-mode" is invalid');
}
return {profilingMode};
}
function parseGlobalEnvFlags(args: minimist.ParsedArgs): Env {
const wasm = parseWasmFlags(args);
const webgl = parseWebglFlags(args);
const webgpu = parseWebgpuFlags(args);
const cpuFlags = parseCpuFlags(args);
return {webgl, wasm, webgpu, ...cpuFlags};
}
export function parseTestRunnerCliArgs(cmdlineArgs: string[]): TestRunnerCliArgs {
const args = minimist(cmdlineArgs);
if (args.help || args.h) {
console.log(HELP_MESSAGE);
process.exit();
}
// Option: -d, --debug
const debug = parseBooleanArg(args.debug || args.d, false);
if (debug) {
npmlog.level = 'verbose';
}
npmlog.verbose('TestRunnerCli.Init', 'Parsing commandline arguments...');
const mode = args._.length === 0 ? 'suite0' : args._[0];
// Option: -e=<...>, --env=<...>
const envArg = args.env || args.e;
const env = (typeof envArg !== 'string') ? 'chrome' : envArg;
if (['chrome', 'edge', 'firefox', 'electron', 'safari', 'node', 'bs'].indexOf(env) === -1) {
throw new Error(`not supported env ${env}`);
}
// Option: -b=<...>, --backend=<...>
const browserBackends = ['webgl', 'webgpu', 'wasm', 'xnnpack', 'webnn'];
// TODO: remove this when Chrome support WebGPU or WebNN.
// we need this for now because Chrome does not support webgpu and webnn yet,
// and ChromeCanary is not in CI.
const defaultBrowserBackends = ['webgl', /* 'webgpu', */ 'wasm', 'xnnpack' /*, 'webnn'*/];
const nodejsBackends = ['cpu', 'wasm'];
const backendArgs = args.backend || args.b;
const backend = (typeof backendArgs !== 'string') ? (env === 'node' ? nodejsBackends : defaultBrowserBackends) :
backendArgs.split(',');
for (const b of backend) {
if ((env !== 'node' && browserBackends.indexOf(b) === -1) || (env === 'node' && nodejsBackends.indexOf(b) === -1)) {
throw new Error(`backend ${b} is not supported in env ${env}`);
}
}
if (backend.includes('webnn') && args['wasm-enable-proxy'] !== 'true') {
throw new Error(
'backend webnn is restricted in the dedicated worker, set "--wasm-enable-proxy true" to enable proxy worker');
}
const globalEnvFlags = parseGlobalEnvFlags(args);
// Options:
// --log-verbose=<...>
// --log-info=<...>
// --log-warning=<...>
// --log-error=<...>
const logConfig = parseLogConfig(args);
globalEnvFlags.logLevel = logConfig[0]?.config.minimalSeverity;
// Option: -p, --profile
const profile = (args.profile || args.p) ? true : false;
if (profile) {
logConfig.push({category: 'Profiler.session', config: {minimalSeverity: 'verbose'}});
logConfig.push({category: 'Profiler.node', config: {minimalSeverity: 'verbose'}});
logConfig.push({category: 'Profiler.op', config: {minimalSeverity: 'verbose'}});
logConfig.push({category: 'Profiler.backend', config: {minimalSeverity: 'verbose'}});
globalEnvFlags.logLevel = 'verbose';
}
// Option: -P[=<...>], --perf[=<...>]
const perfArg = (args.perf || args.P);
const perf = perfArg ? true : false;
const times = (typeof perfArg === 'number') ? perfArg : 10;
if (debug && perf) {
throw new Error('Flag "perf" cannot be used together with flag "debug".');
}
if (perf && (mode !== 'model')) {
throw new Error('Flag "perf" can only be used in mode "model".');
}
if (perf) {
logConfig.push({category: 'TestRunner.Perf', config: {minimalSeverity: 'verbose'}});
}
// Option: -u, --optimized-model-file-path
const optimizedModelFilePath = args['optimized-model-file-path'] || args.u || undefined;
if (typeof optimizedModelFilePath !== 'undefined' && typeof optimizedModelFilePath !== 'string') {
throw new Error('Flag "optimized-model-file-path" need to be either empty or a valid file path.');
}
// Option: -o, --graph-optimization-level
const graphOptimizationLevel = args['graph-optimization-level'] || args.o || 'all';
if (typeof graphOptimizationLevel !== 'string' ||
['disabled', 'basic', 'extended', 'all'].indexOf(graphOptimizationLevel) === -1) {
throw new Error(`graph optimization level is invalid: ${graphOptimizationLevel}`);
}
// Option: -c, --file-cache
const fileCache = parseBooleanArg(args['file-cache'] || args.c, false);
const cpuOptions = parseCpuOptions(args);
const wasmOptions = parseWasmOptions(args);
const webglOptions = parseWebglOptions(args);
// Option: --no-sandbox
const noSandbox = !!args['no-sandbox'];
npmlog.verbose('TestRunnerCli.Init', ` Mode: ${mode}`);
npmlog.verbose('TestRunnerCli.Init', ` Env: ${env}`);
npmlog.verbose('TestRunnerCli.Init', ` Debug: ${debug}`);
npmlog.verbose('TestRunnerCli.Init', ` Backend: ${backend}`);
npmlog.verbose('TestRunnerCli.Init', 'Parsing commandline arguments... DONE');
return {
debug,
mode: mode as TestRunnerCliArgs['mode'],
param: args._.length > 1 ? args._[1] : undefined,
backends: backend as TestRunnerCliArgs['backends'],
bundleMode: perf ? 'perf' : 'dev',
env: env as TestRunnerCliArgs['env'],
logConfig,
profile,
times: perf ? times : undefined,
optimizedModelFilePath,
graphOptimizationLevel: graphOptimizationLevel as TestRunnerCliArgs['graphOptimizationLevel'],
fileCache,
cpuOptions,
webglOptions,
wasmOptions,
globalEnvFlags,
noSandbox
};
}