Skip to content

Commit 81caeb8

Browse files
committed
Bug 1927976 [wpt PR 48875] - webnn: Do not allow partial MLTensor writes, a=testonly
Automatic update from web-platform-tests webnn: Do not allow partial MLTensor writes The buffer to be written may be sliced before being passed to the writeTensor() method Bug: 328105506 Cq-Include-Trybots: luci.chromium.try​:win11-blink-rel Change-Id: Ia1fb607606653a1c9f781a202a6953e49d54af76 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5968203 Reviewed-by: Phillis Tang <phillischromium.org> Commit-Queue: Austin Sullivan <asullychromium.org> Reviewed-by: ningxin hu <ningxin.huintel.com> Cr-Commit-Position: refs/heads/main{#1375665} -- wpt-commits: 45cfb0255ffad929a63cd4f0098c2e1073982e2b wpt-pr: 48875 UltraBlame original commit: ee308061b8326bc67474eee011c92d9ff63ac2c0
1 parent 62f261e commit 81caeb8

File tree

1 file changed

+10
-123
lines changed

1 file changed

+10
-123
lines changed

testing/web-platform/tests/webnn/conformance_tests/tensor.https.any.js

Lines changed: 10 additions & 123 deletions
Original file line numberDiff line numberDiff line change
@@ -173,45 +173,18 @@ const testWriteTensor = (testName) => {
173173
let mlTensor = await mlContext.createTensor(tensorDescriptor);
174174

175175
const tensorByteLength = sizeOfDescriptor(tensorDescriptor);
176-
let arrayBuffer = new ArrayBuffer(tensorByteLength);
177176

178177

179178
assert_throws_js(
180179
TypeError,
181180
() => mlContext.writeTensor(
182-
mlTensor, new Uint8Array(arrayBuffer), 0,
183-
tensorByteLength + 1));
184-
assert_throws_js(
185-
TypeError,
186-
() => mlContext.writeTensor(
187-
mlTensor, new Uint8Array(arrayBuffer), 3,
188-
tensorByteLength));
189-
190-
191-
assert_throws_js(
192-
TypeError,
193-
() => mlContext.writeTensor(
194-
mlTensor, new Uint8Array(arrayBuffer),
195-
tensorByteLength + 1));
196-
181+
mlTensor, new ArrayBuffer(tensorByteLength + 1)));
197182

198183
assert_throws_js(
199184
TypeError,
200185
() => mlContext.writeTensor(
201-
mlTensor, new Uint8Array(arrayBuffer),
202-
tensorByteLength + 1, undefined));
203-
204-
assert_throws_js(
205-
TypeError,
206-
() => mlContext.writeTensor(
207-
mlTensor, new Uint8Array(arrayBuffer), undefined,
208-
tensorByteLength + 1));
209-
210-
assert_throws_js(
211-
TypeError,
212-
() => mlContext.writeTensor(
213-
mlTensor, Uint8Array.from([0xEE, 0xEE, 0xEE, 0xEE, 0xEE])));
214-
}, `${testName} / error`);
186+
mlTensor, new ArrayBuffer(tensorByteLength - 1)));
187+
}, `${testName} / write with buffer of wrong size`);
215188

216189
promise_test(async () => {
217190
const tensorDescriptor = {
@@ -252,26 +225,15 @@ const testWriteTensor = (testName) => {
252225
promise_test(async () => {
253226
let mlTensor = await mlContext.createTensor({
254227
dataType: 'int32',
255-
shape: [1],
228+
shape: [],
256229
readable: true,
257230
writable: true,
258231
});
259232

260-
261-
const inputData = Uint8Array.from([0xAA, 0xAA, 0xAA, 0xAA]);
233+
const inputData = Int32Array.from([0xAAAABBBB]);
262234
mlContext.writeTensor(mlTensor, inputData);
263-
264-
265-
mlContext.writeTensor(mlTensor, Uint8Array.from([0xBB]), 0, 0);
266-
267235
await assert_tensor_data_equals(mlContext, mlTensor, inputData);
268-
269-
270-
mlContext.writeTensor(
271-
mlTensor, Uint32Array.from([0xBBBBBBBB]), 1);
272-
273-
await assert_tensor_data_equals(mlContext, mlTensor, inputData);
274-
}, `${testName} / zero_write`);
236+
}, `${testName} / scalar`);
275237

276238
promise_test(async () => {
277239
const tensorDescriptor = {
@@ -285,7 +247,6 @@ const testWriteTensor = (testName) => {
285247
const tensorByteLength = sizeOfDescriptor(tensorDescriptor);
286248
let inputBuffer = new ArrayBuffer(tensorByteLength);
287249

288-
289250
const int32View = new Int32Array(inputBuffer);
290251
int32View.fill(0xBBBBBBBB);
291252

@@ -295,7 +256,8 @@ const testWriteTensor = (testName) => {
295256
const detachedBuffer = inputBuffer.transfer();
296257
assert_true(inputBuffer.detached, 'array buffer should be detached.');
297258

298-
mlContext.writeTensor(mlTensor, inputBuffer);
259+
assert_throws_js(
260+
TypeError, () => mlContext.writeTensor(mlTensor, inputBuffer));
299261

300262
await assert_tensor_data_equals(
301263
mlContext, mlTensor, new Int32Array(detachedBuffer));
@@ -373,89 +335,14 @@ const testReadTensor = (testName) => {
373335
writable: true,
374336
});
375337

376-
377338
mlContext.writeTensor(mlTensor, Uint8Array.from([0xAA, 0xAA, 0xAA, 0xAA]));
378339

340+
379341
mlContext.writeTensor(mlTensor, Uint32Array.from([0xBBBBBBBB]));
380342
await assert_tensor_data_equals(
381343
mlContext, mlTensor, Uint32Array.from([0xBBBBBBBB]));
382344
;
383-
}, `${testName} / full_size`);
384-
385-
promise_test(async () => {
386-
let mlTensor = await mlContext.createTensor({
387-
dataType: 'int32',
388-
shape: [1],
389-
readable: true,
390-
writable: true,
391-
});
392-
393-
394-
mlContext.writeTensor(mlTensor, Uint8Array.from([0xAA, 0xAA, 0xAA, 0xAA]));
395-
396-
397-
mlContext.writeTensor(
398-
mlTensor, Uint8Array.from([0xCC, 0xCC, 0xBB, 0xBB]),
399-
2);
400-
await assert_tensor_data_equals(
401-
mlContext, mlTensor, Uint8Array.from([0xBB, 0xBB, 0xAA, 0xAA]));
402-
}, `${testName} / src_offset_only`);
403-
404-
promise_test(async () => {
405-
let mlTensor = await mlContext.createTensor({
406-
dataType: 'int32',
407-
shape: [1],
408-
readable: true,
409-
writable: true,
410-
});
411-
412-
413-
mlContext.writeTensor(mlTensor, Uint8Array.from([0xAA, 0xAA, 0xAA, 0xAA]));
414-
415-
416-
mlContext.writeTensor(
417-
mlTensor, Uint8Array.from([0xDD, 0xDD, 0xCC, 0xDD]),
418-
2, 1);
419-
await assert_tensor_data_equals(
420-
mlContext, mlTensor, Uint8Array.from([0xCC, 0xAA, 0xAA, 0xAA]));
421-
}, `${testName} / src_offset_and_size`);
422-
423-
promise_test(async () => {
424-
let mlTensor = await mlContext.createTensor({
425-
dataType: 'int32',
426-
shape: [1],
427-
readable: true,
428-
writable: true,
429-
});
430-
431-
432-
mlContext.writeTensor(mlTensor, Uint8Array.from([0xAA, 0xAA, 0xAA, 0xAA]));
433-
434-
435-
mlContext.writeTensor(
436-
mlTensor, Uint8Array.from([0xEE, 0xEE, 0xEE, 0xEE, 0xEE]),
437-
1);
438-
await assert_tensor_data_equals(
439-
mlContext, mlTensor, Uint8Array.from([0xEE, 0xEE, 0xEE, 0xEE]));
440-
}, `${testName} / larger_src_data`);
441-
442-
promise_test(async () => {
443-
let mlTensor = await mlContext.createTensor({
444-
dataType: 'int32',
445-
shape: [1],
446-
readable: true,
447-
writable: true,
448-
});
449-
450-
const inputData = [0xAA, 0xAA, 0xAA, 0xAA];
451-
452-
453-
mlContext.writeTensor(
454-
mlTensor, Uint8Array.from(inputData), undefined,
455-
inputData.length);
456-
await assert_tensor_data_equals(
457-
mlContext, mlTensor, Uint8Array.from(inputData));
458-
}, `${testName} / no_src_offset`);
345+
}, `${testName} / overwrite`);
459346

460347
promise_test(async t => {
461348
const tensorDescriptor = {

0 commit comments

Comments
 (0)