From cabfc945b8522e1cba3cd2244fb5c730e6989602 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Wed, 18 Dec 2024 19:08:59 -0800 Subject: [PATCH 01/39] Wave 3 skeleton --- index.bs | 1407 ++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 1249 insertions(+), 158 deletions(-) diff --git a/index.bs b/index.bs index 113fcd4f..3b3d3926 100644 --- a/index.bs +++ b/index.bs @@ -871,7 +871,7 @@ dictionary MLComputeResult { interface MLContext { Promise compute( MLGraph graph, MLNamedArrayBufferViews inputs, MLNamedArrayBufferViews outputs); - + MLOpSupportLimits opSupportLimits(); }; @@ -2565,6 +2565,9 @@ partial interface MLGraphBuilder { MLOperand equal(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); + MLOperand notEqual(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); MLOperand greater(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); @@ -2578,6 +2581,15 @@ partial interface MLGraphBuilder { MLOperand b, optional MLOperatorOptions options = {}); MLOperand logicalNot(MLOperand a, optional MLOperatorOptions options = {}); + MLOperand logicalAnd(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); + MLOperand logicalOr(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); + MLOperand logicalXor(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); }; dictionary MLLogicalNotSupportLimits { @@ -2587,11 +2599,15 @@ dictionary MLLogicalNotSupportLimits { partial dictionary MLOpSupportLimits { MLBinarySupportLimits equal; + MLBinarySupportLimits notEqual; MLBinarySupportLimits greater; MLBinarySupportLimits greaterOrEqual; MLBinarySupportLimits lesser; MLBinarySupportLimits lesserOrEqual; MLLogicalNotSupportLimits logicalNot; + MLLogicalNotSupportLimits logicalAnd; + MLLogicalNotSupportLimits logicalOr; + MLLogicalNotSupportLimits logicalXor; }; @@ -2616,6 +2632,8 @@ partial dictionary MLOpSupportLimits {
: equal :: Support limits for operator {{MLGraphBuilder/equal()}}. + : notEqual + :: Support limits for operator {{MLGraphBuilder/notEqual()}}. : greater :: Support limits for operator {{MLGraphBuilder/greater()}}. : greaterOrEqual @@ -2626,16 +2644,26 @@ partial dictionary MLOpSupportLimits { :: Support limits for operator {{MLGraphBuilder/lesserOrEqual()}}. : logicalNot :: Support limits for operator {{MLGraphBuilder/logicalNot()}}. + : logicalAnd + :: Support limits for operator {{MLGraphBuilder/logicalAnd()}}. + : logicalOr + :: Support limits for operator {{MLGraphBuilder/logicalOr()}}. + : logicalXor + :: Support limits for operator {{MLGraphBuilder/logicalXor()}}.
**Operation types:** - *equal*: Compare if the values of the two input tensors are equal, element-wise. + - *notEqual*: Compare if the values of the two input tensors are not equal, element-wise. - *greater*: Compare if the values of the first input tensor is greater, element-wise. - *greaterOrEqual*: Compare if the values of the first input tensor is greater or equal, element-wise. - *lesser*: Compare if the values of the first input tensor is lesser, element-wise. - *lesserOrEqual*: Compare if the values of the first input tensor is lesser or equal, element-wise. - *logicalNot*: Invert the values of the input tensor to values 0 or 1, element-wise. Specifically, when the input value is non-zero, invert it to 0. Conversely, for a zero input value, invert it to 1. + - *logicalAnd*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalOr*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalXor*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1.
@@ -2646,7 +2674,7 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les To create element-wise logical operation given [=string=] |op|, {{MLOperand}} |a|, an optional {{MLOperand}} |b|, and {{MLOperatorOptions}} |options|, run the following steps: - 1. [=Assert=]: |op| is one of "equal", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot". + 1. [=Assert=]: |op| is one of "equal", "notEqual", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot", "logicalAnd", "logicalOr", "logicalXor". 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If |op| is "logicalNot": 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |a| returns false, then [=exception/throw=] a {{TypeError}}. @@ -2677,6 +2705,12 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. Return |output|.
+ The notEqual(|a|, |b|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "notEqual", |a|, |b|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. + +
The greater(|a|, |b|, |options|) method steps are: 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "greater", |a|, |b|, and |options|. @@ -2711,6 +2745,27 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. 1. Return |output|.
+ +
+ The logicalAnd(|a|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalAnd", |a|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+ +
+ The logicalOr(|a|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalOr", |a|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+ +
+ The logicalXor(|a|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalXor", |a|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
### Element-wise unary operations ### {#api-mlgraphbuilder-unary} @@ -2919,6 +2974,196 @@ partial dictionary MLOpSupportLimits { + + +### dequantizeLinear ### {#api-mlgraphbuilder-dequantizelinear} +!!!Select the values from the trueValue or the falseValue tensor depending on the corresponding values of the condition tensor, where non-zero is true and zero is false. The condition tensor is often the output of one of the element-wise logical operations. + +The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors. + + + +
+ **Arguments:** + - condition: an {{MLOperand}}. The condition tensor. + - trueValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to true. + - falseValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to false. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor. +
+ +{{MLWhereSupportLimits}} has the following members: +
+ : condition + :: {{MLSupportLimits}} for condition operand. + : trueValue + :: {{MLSupportLimits}} for trueValue operand. + : falseValue + :: {{MLSupportLimits}} for falseValue operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/where()}}: +
+ : where + :: Support limits for operator {{MLGraphBuilder/where()}}. +
+ + +
+ + The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |condition|'s [=MLOperand/dataType=] is not equal to {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. + 1. If |trueValue|'s [=MLOperand/dataType=] is not equal to |falseValue|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |trueValue|'s [=MLOperand/shape=] and |falseValue|'s [=MLOperand/shape=]. + 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Set |outputShape| to the result of [=bidirectionally broadcasting=] |condition|'s [=MLOperand/shape=] and |outputShape]. + 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given |trueValue|'s [=MLOperand/dataType=] and |outputShape|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. + 1. Let |operator| be an [=operator=] for the "where" operation, given |condition|, |trueValue|, |falseValue|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |condition|, |trueValue| and |falseValue|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function where(builder, condition, trueValue, falseValue) {
+      const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
+      builder.add(
+        builder.mul(trueValue, builder.cast(c, trueValue.dataType)),
+        builder.mul(
+          falseValue, builder.cast(builder.logicalNot(c), falseValue.dataType)));
+    }
+    
+
+
+ + +### quantizeLinear ### {#api-mlgraphbuilder-quantizelinear} +!!!Select the values from the trueValue or the falseValue tensor depending on the corresponding values of the condition tensor, where non-zero is true and zero is false. The condition tensor is often the output of one of the element-wise logical operations. + +The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors. + + + +
+ **Arguments:** + - condition: an {{MLOperand}}. The condition tensor. + - trueValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to true. + - falseValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to false. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor. +
+ +{{MLWhereSupportLimits}} has the following members: +
+ : condition + :: {{MLSupportLimits}} for condition operand. + : trueValue + :: {{MLSupportLimits}} for trueValue operand. + : falseValue + :: {{MLSupportLimits}} for falseValue operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/where()}}: +
+ : where + :: Support limits for operator {{MLGraphBuilder/where()}}. +
+ + +
+ + The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |condition|'s [=MLOperand/dataType=] is not equal to {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. + 1. If |trueValue|'s [=MLOperand/dataType=] is not equal to |falseValue|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |trueValue|'s [=MLOperand/shape=] and |falseValue|'s [=MLOperand/shape=]. + 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Set |outputShape| to the result of [=bidirectionally broadcasting=] |condition|'s [=MLOperand/shape=] and |outputShape]. + 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given |trueValue|'s [=MLOperand/dataType=] and |outputShape|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. + 1. Let |operator| be an [=operator=] for the "where" operation, given |condition|, |trueValue|, |falseValue|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |condition|, |trueValue| and |falseValue|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function where(builder, condition, trueValue, falseValue) {
+      const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
+      builder.add(
+        builder.mul(trueValue, builder.cast(c, trueValue.dataType)),
+        builder.mul(
+          falseValue, builder.cast(builder.logicalNot(c), falseValue.dataType)));
+    }
+    
+
+
+ + ### elu ### {#api-mlgraphbuilder-elu} Calculate the exponential linear unit function (ELU) on the input tensor element-wise. The calculation follows the expression `max(0, x) + alpha * (exp(min(0, x)) - 1)`. @@ -3039,6 +3284,7 @@ partial dictionary MLOpSupportLimits { 1. Return |output|. + ### gather ### {#api-mlgraphbuilder-gather} Gather values of the input tensor along an axis according to the indices. -
+{{MLGatherOptions}} has the following members: +
+ : axis + :: + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
**Arguments:** - - input: an {{MLOperand}}. The input tensor. - - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. - **Returns:** - - an {{MLOperand}}. The output tensor of the same shape as *input*. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/gelu()}}: -
- : gelu - :: Support limits for operator {{MLGraphBuilder/gelu()}}. +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand.
-
+{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+ +
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
- The gelu(|input|, |options|) method steps are: + The gather(|input|, |indices|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* - 1. Let |output| be the result of [=copying an MLOperand=] given |input|. - 1. Let |operator| be an [=operator=] for the "gelu" operation given |options|. + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
-
-
- - The behavior of this operation can be [EMULATED] - -
-    function gelu(builder, input) {
-      return builder.mul(
-        builder.mul(input, builder.constant(input.dataType, 0.5)),
-        builder.add(
-          builder.constant(input.dataType, 1),
-          builder.erf(builder.div(
-            input, builder.sqrt(builder.constant(input.dataType, 2))))));
-    }
-    
-
-
+
+
+ + Examples of how gather works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
 
-### gemm ### {#api-mlgraphbuilder-gemm}
-Calculate the [general matrix multiplication of the Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3). The calculation follows the expression `alpha * A * B + beta * C`, where `A` is a 2-D tensor with shape *[M, K]* or *[K, M]*, `B` is a 2-D tensor with shape *[K, N]* or *[N, K]*, and `C` is [=unidirectionally broadcastable=] to the shape *[M, N]*. `A` and `B` may optionally be transposed prior to the calculation.
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+### gatherNd ### {#api-mlgraphbuilder-gathernd} +!!!Gather values of the input tensor along an axis according to the indices. -{{MLGemmOptions}} has the following members: -
- : c - :: - The third input tensor. It is either a scalar, or of the shape that is [=unidirectionally broadcastable=] to the shape *[M, N]*. When it is not specified, the computation is done as if *c* is a scalar 0.0. - - : alpha - :: - A multiplier for the first input. - - : beta - :: - A multiplier for the third input {{MLGemmOptions/c}}. - - : aTranspose - :: - Indicates if the first input should be transposed prior to calculating the output. - - : bTranspose +{{MLGatherOptions}} has the following members: +
+ : axis :: - Indicates if the second input should be transposed prior to calculating the output. + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor.
-
+
**Arguments:** - - a: an {{MLOperand}}. The first input 2-D tensor with shape *[M, K]* if *aTranspose* is false, or *[K, M]* if *aTranspose* is true. - - b: an {{MLOperand}}. The second input 2-D tensor with shape *[K, N]* if *bTranspose* is false, or *[N, K]* if *bTranspose* is true. - - options: an optional {{MLGemmOptions}}. The optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output 2-D tensor of shape *[M, N]* that contains the calculated product of all the inputs. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLGemmSupportLimits}} has the following members: -
- : a - :: {{MLSupportLimits}} for a operand. - : b - :: {{MLSupportLimits}} for b operand. - : c - :: {{MLSupportLimits}} for c operand. +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. : output :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/gemm()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}:
- : gemm - :: Support limits for operator {{MLGraphBuilder/gemm()}}. + : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}.
+
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+
- The gemm(|a|, |b|, |options|) method steps are: + The gather(|input|, |indices|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |a| and |b| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. - 1. If |b|'s [=MLOperand/dataType=] is not equal to |a|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. - 1. If |a|'s [=MLOperand/rank=] is not 2 or |b|'s [=MLOperand/rank=] is not 2, then [=exception/throw=] a {{TypeError}}. - 1. Set |options|.{{MLGemmOptions/alpha}} to the result of [=casting=] |options|.{{MLGemmOptions/alpha}} to |a|'s [=MLOperand/dataType=]. - 1. Set |options|.{{MLGemmOptions/beta}} to the result of [=casting=] |options|.{{MLGemmOptions/beta}} to |a|'s [=MLOperand/dataType=]. - 1. Let |shapeA| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. - 1. Let |shapeB| be a [=list/clone=] of |b|'s [=MLOperand/shape=]. - 1. If |options|.{{MLGemmOptions/aTranspose}} is true, then reverse the order of the items in |shapeA|. - 1. If |options|.{{MLGemmOptions/bTranspose}} is true, then reverse the order of the items in |shapeB|. - 1. If |shapeA|[1] is not equal to |shapeB|[0], then [=exception/throw=] a {{TypeError}}. - 1. If |options|.{{MLGemmOptions/c}} [=map/exists=]: - 1. If it is not [=unidirectionally broadcastable=] to the shape « |shapeA|[0], |shapeB|[1] », then [=exception/throw=] a {{TypeError}}. - 1. If its [=MLOperand/dataType=] is not equal to |a|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. - 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |a|'s [=MLOperand/dataType=] and « |shapeA|[0], |shapeB|[1] ». + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. - 1. Let |operator| be an [=operator=] for the "gemm" operation, given |options|. + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/inputs=] to |a| and |b|. - 1. If |options|.{{MLGemmOptions/c}} [=map/exists=], then add it to |operator|'s [=operator/inputs=]. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
-
+
- The behavior of this operation can be [EMULATED] + Examples of how gather works in different slicing schemes.
-    function gemm(builder, a, b, options) {
-      if (options.aTranspose)
-        a = builder.transpose(a);
-
-      if (options.bTranspose)
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+ +### gelu ### {#api-mlgraphbuilder-gelu-method} +Compute the gaussian error linear unit function (GELU) of the input tensor. The calculation follows the expression `0.5 * x * (1 + erf(x / sqrt(2)))`. + + + +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/gelu()}}: +
+ : gelu + :: Support limits for operator {{MLGraphBuilder/gelu()}}. +
+ +
+ + The gelu(|input|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "gelu" operation given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function gelu(builder, input) {
+      return builder.mul(
+        builder.mul(input, builder.constant(input.dataType, 0.5)),
+        builder.add(
+          builder.constant(input.dataType, 1),
+          builder.erf(builder.div(
+            input, builder.sqrt(builder.constant(input.dataType, 2))))));
+    }
+    
+
+
+ +### gemm ### {#api-mlgraphbuilder-gemm} +Calculate the [general matrix multiplication of the Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3). The calculation follows the expression `alpha * A * B + beta * C`, where `A` is a 2-D tensor with shape *[M, K]* or *[K, M]*, `B` is a 2-D tensor with shape *[K, N]* or *[N, K]*, and `C` is [=unidirectionally broadcastable=] to the shape *[M, N]*. `A` and `B` may optionally be transposed prior to the calculation. + + + +{{MLGemmOptions}} has the following members: +
+ : c + :: + The third input tensor. It is either a scalar, or of the shape that is [=unidirectionally broadcastable=] to the shape *[M, N]*. When it is not specified, the computation is done as if *c* is a scalar 0.0. + + : alpha + :: + A multiplier for the first input. + + : beta + :: + A multiplier for the third input {{MLGemmOptions/c}}. + + : aTranspose + :: + Indicates if the first input should be transposed prior to calculating the output. + + : bTranspose + :: + Indicates if the second input should be transposed prior to calculating the output. +
+ +
+ **Arguments:** + - a: an {{MLOperand}}. The first input 2-D tensor with shape *[M, K]* if *aTranspose* is false, or *[K, M]* if *aTranspose* is true. + - b: an {{MLOperand}}. The second input 2-D tensor with shape *[K, N]* if *bTranspose* is false, or *[N, K]* if *bTranspose* is true. + - options: an optional {{MLGemmOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output 2-D tensor of shape *[M, N]* that contains the calculated product of all the inputs. +
+ +{{MLGemmSupportLimits}} has the following members: +
+ : a + :: {{MLSupportLimits}} for a operand. + : b + :: {{MLSupportLimits}} for b operand. + : c + :: {{MLSupportLimits}} for c operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/gemm()}}: +
+ : gemm + :: Support limits for operator {{MLGraphBuilder/gemm()}}. +
+ +
+ + The gemm(|a|, |b|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |a| and |b| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. If |b|'s [=MLOperand/dataType=] is not equal to |a|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. If |a|'s [=MLOperand/rank=] is not 2 or |b|'s [=MLOperand/rank=] is not 2, then [=exception/throw=] a {{TypeError}}. + 1. Set |options|.{{MLGemmOptions/alpha}} to the result of [=casting=] |options|.{{MLGemmOptions/alpha}} to |a|'s [=MLOperand/dataType=]. + 1. Set |options|.{{MLGemmOptions/beta}} to the result of [=casting=] |options|.{{MLGemmOptions/beta}} to |a|'s [=MLOperand/dataType=]. + 1. Let |shapeA| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. + 1. Let |shapeB| be a [=list/clone=] of |b|'s [=MLOperand/shape=]. + 1. If |options|.{{MLGemmOptions/aTranspose}} is true, then reverse the order of the items in |shapeA|. + 1. If |options|.{{MLGemmOptions/bTranspose}} is true, then reverse the order of the items in |shapeB|. + 1. If |shapeA|[1] is not equal to |shapeB|[0], then [=exception/throw=] a {{TypeError}}. + 1. If |options|.{{MLGemmOptions/c}} [=map/exists=]: + 1. If it is not [=unidirectionally broadcastable=] to the shape « |shapeA|[0], |shapeB|[1] », then [=exception/throw=] a {{TypeError}}. + 1. If its [=MLOperand/dataType=] is not equal to |a|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |a|'s [=MLOperand/dataType=] and « |shapeA|[0], |shapeB|[1] ». + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. + 1. Let |operator| be an [=operator=] for the "gemm" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |a| and |b|. + 1. If |options|.{{MLGemmOptions/c}} [=map/exists=], then add it to |operator|'s [=operator/inputs=]. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function gemm(builder, a, b, options) {
+      if (options.aTranspose)
+        a = builder.transpose(a);
+
+      if (options.bTranspose)
         b = builder.transpose(b);
 
       let ab = builder.matmul(
@@ -5982,74 +6538,447 @@ partial dictionary MLOpSupportLimits {
     1. Return |output|.
 
-### reshape ### {#api-mlgraphbuilder-reshape-method} -Alter the shape of a tensor to a new shape. Reshape does not copy or change the content of the tensor. It just changes the tensor's logical shape for the subsequent operations. +### reshape ### {#api-mlgraphbuilder-reshape-method} +Alter the shape of a tensor to a new shape. Reshape does not copy or change the content of the tensor. It just changes the tensor's logical shape for the subsequent operations. + +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - newShape: [=sequence=]<{{unsigned long}}>. The shape of the output tensor. + The number of elements implied by *newShape* must be the same as the + number of elements in the input tensor. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor. The values of the output + tensor are the same as values of the input tensor. The shape of the output + tensor is specified by the *newShape* argument. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reshape()}}: +
+ : reshape + :: Support limits for operator {{MLGraphBuilder/reshape()}}. +
+ +
+ + The reshape(|input|, |newShape|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. Let |outputShape| be an empty array of {{unsigned long}}. + 1. If |newShape|'s [=list/size=] is 0, set |outputShape| to an empty [=/list=] for a scalar. + 1. If any [=list/item=] in |newShape| is not a [=valid dimension=], then [=exception/throw=] a {{TypeError}}. + 1. Let |inputElementCount| be the product of all elements in |input|'s [=MLOperand/shape=]. Empty dimensions yield an |inputElementCount| of 1. + 1. If product of all values in |newShape| is not equal to |inputElementCount|, then [=exception/throw=] a {{TypeError}}. + 1. Let |desc| be a copy of |input|.{{MLOperand/[[descriptor]]}}. + 1. Set |desc|.{{MLOperandDescriptor/shape}} to |newShape|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. + 1. Let |operator| be an [=operator=] for the "reshape" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ + +### scatterElements ### {#api-mlgraphbuilder-scatterelements} +!!!Scatter values of the input tensor along an axis according to the indices. + + +{{MLGatherOptions}} has the following members: +
+ : axis + :: + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. +
+ +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+ +
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
+ + The gather(|input|, |indices|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + Examples of how gather works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+ + +### scatterNd ### {#api-mlgraphbuilder-scatternd} +!!!Scatter values of the input tensor along an axis according to the indices. + + +{{MLGatherOptions}} has the following members: +
+ : axis + :: + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. +
+ +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+ +
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
+ + The gather(|input|, |indices|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + Examples of how gather works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+ + +### sigmoid ### {#api-mlgraphbuilder-sigmoid-method} +Compute the sigmoid function of the input tensor. The calculation follows the expression `1 / (exp(-x) + 1)`. -
+ +
**Arguments:** - input: an {{MLOperand}}. The input tensor. - - newShape: [=sequence=]<{{unsigned long}}>. The shape of the output tensor. - The number of elements implied by *newShape* must be the same as the - number of elements in the input tensor. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output tensor. The values of the output - tensor are the same as values of the input tensor. The shape of the output - tensor is specified by the *newShape* argument. + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reshape()}}: +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/sigmoid()}}:
- : reshape - :: Support limits for operator {{MLGraphBuilder/reshape()}}. + : sigmoid + :: Support limits for operator {{MLGraphBuilder/sigmoid()}}.
- The reshape(|input|, |newShape|, |options|) method steps are: + The sigmoid(|input|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. Let |outputShape| be an empty array of {{unsigned long}}. - 1. If |newShape|'s [=list/size=] is 0, set |outputShape| to an empty [=/list=] for a scalar. - 1. If any [=list/item=] in |newShape| is not a [=valid dimension=], then [=exception/throw=] a {{TypeError}}. - 1. Let |inputElementCount| be the product of all elements in |input|'s [=MLOperand/shape=]. Empty dimensions yield an |inputElementCount| of 1. - 1. If product of all values in |newShape| is not equal to |inputElementCount|, then [=exception/throw=] a {{TypeError}}. - 1. Let |desc| be a copy of |input|.{{MLOperand/[[descriptor]]}}. - 1. Set |desc|.{{MLOperandDescriptor/shape}} to |newShape|. + 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. - 1. Let |operator| be an [=operator=] for the "reshape" operation, given |options|. + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "sigmoid" operation, given |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/input=] to |input|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
+
+
+ + The behavior of this operation can be [EMULATED] + +
+    function sigmoid(builder, input) {
+      return builder.div(
+        builder.constant(input.dataType, 1),
+        builder.add(
+          builder.exp(builder.neg(input)), builder.constant(input.dataType, 1)));
+    }
+    
+
+
+ + +### sign ### {#api-mlgraphbuilder-sign} +!!!Compute the softplus function of the input tensor. The calculation follows the expression `ln(1 + exp(x))`. -### sigmoid ### {#api-mlgraphbuilder-sigmoid-method} -Compute the sigmoid function of the input tensor. The calculation follows the expression `1 / (exp(-x) + 1)`. -
+
**Arguments:** - input: an {{MLOperand}}. The input tensor. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. @@ -6058,22 +6987,22 @@ partial dictionary MLOpSupportLimits { - an {{MLOperand}}. The output tensor of the same shape as *input*.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/sigmoid()}}: +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/softplus()}}:
- : sigmoid - :: Support limits for operator {{MLGraphBuilder/sigmoid()}}. + : softplus + :: Support limits for operator {{MLGraphBuilder/softplus()}}.
- The sigmoid(|input|, |options|) method steps are: + The softplus(|input|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* 1. Let |output| be the result of [=copying an MLOperand=] given |input|. - 1. Let |operator| be an [=operator=] for the "sigmoid" operation, given |options|. + 1. Let |operator| be an [=operator=] for the "softplus" operation and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/input=] to |input|. 1. Set |operator|'s [=operator/output=] to |output|. @@ -6086,16 +7015,15 @@ partial dictionary MLOpSupportLimits { The behavior of this operation can be [EMULATED]
-    function sigmoid(builder, input) {
-      return builder.div(
-        builder.constant(input.dataType, 1),
-        builder.add(
-          builder.exp(builder.neg(input)), builder.constant(input.dataType, 1)));
+    function softplus(builder, input) {
+      return builder.log(
+        builder.add(builder.exp(input), builder.constant(input.dataType, 1)));
     }
     
+ ### slice ### {#api-mlgraphbuilder-slice} Produce a slice of the input tensor. + +{{MLGatherOptions}} has the following members: +
+ : axis + :: + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. +
+ +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+ +
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
+ + The gather(|input|, |indices|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + Examples of how gather works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+ ### transpose ### {#api-mlgraphbuilder-transpose} Permute the dimensions of the input tensor according to the *permutation* argument. + +{{MLConvTranspose2dOptions}} has the following members: +
+ : exclusive + :: + Whether to include or exclude the current value in the output, meaning inclusive presum addition (see https://en.wikipedia.org/wiki/Prefix_sum) or exclusive post-sum addition. Given input [1,2,3,4], inclusive addition would yield an output of [1,3,6,10] whereas exclusive would yield [0,1,3,6]. The default is inclusive. + + : reversed + :: + Whether to reverse the summation direction along the active axis to instead start from the high coordinate to low coordinate. Given input *[1,2,3,4]*, inclusive forward addition would yield an output of *[1,3,6,10]* whereas backward summation would yield *[10,9,7,4]*. The default is exclusive. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - options: an {{MLCumulativeSumOptions}}. Specifies the optional parameters of the operation. + + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/cumulativeSum()}}: +
+ : cumulativeSum + :: Support limits for operator {{MLGraphBuilder/cumulativeSum()}}. +
+ +
+ + The cumulativeSum(|input|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "softplus" operation and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function softplus(builder, input) {
+      return builder.log(
+        builder.add(builder.exp(input), builder.constant(input.dataType, 1)));
+    }
+    
+
+
+ + ### Element-wise binary operations ### {#api-mlgraphbuilder-binary} Compute the element-wise binary addition, subtraction, multiplication, division, power, maximum and minimum of the two input tensors. @@ -3443,24 +3520,14 @@ partial dictionary MLOpSupportLimits { ### gatherElements ### {#api-mlgraphbuilder-gatherelements} !!!Gather values of the input tensor along an axis according to the indices. @@ -3598,57 +3665,30 @@ partial dictionary MLOpSupportLimits { ### gatherNd ### {#api-mlgraphbuilder-gathernd} !!!Gather values of the input tensor along an axis according to the indices. -{{MLGatherOptions}} has the following members: -
- : axis - :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. -
-
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLGatherSupportLimits}} has the following members: -
- : input - :: {{MLSupportLimits}} for input operand. - : indices - :: {{MLSupportLimits}} for indices operand. - : output - :: {{MLSupportLimits}} for output operand. -
- -{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherNd()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : gatherNd + :: Support limits for operator {{MLGraphBuilder/gatherNd()}}.
From 9d0d2319eccf49ed1c994c8facb653f8625fc579 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Wed, 15 Jan 2025 21:09:08 -0800 Subject: [PATCH 03/39] Update more operators --- index.bs | 641 +++++++++++++++++++++++++++---------------------------- 1 file changed, 316 insertions(+), 325 deletions(-) diff --git a/index.bs b/index.bs index 7b0c2312..b7e8a253 100644 --- a/index.bs +++ b/index.bs @@ -2919,7 +2919,9 @@ partial dictionary MLOpSupportLimits { 1. Return |output|.
-### cumulativeSum ### {#api-mlgraphbuilder-sign} + +### cumulativeSum ### {#api-mlgraphbuilder-cumulativesum} +!!! Compute the accumulated sum of a series of values along the given axis, either including or excluding the current value. -{{MLConvTranspose2dOptions}} has the following members: + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/cumulativeSum()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ +{{MLCumulativeSumOptions}} has the following members:
: exclusive :: - Whether to include or exclude the current value in the output, meaning inclusive presum addition (see https://en.wikipedia.org/wiki/Prefix_sum) or exclusive post-sum addition. Given input [1,2,3,4], inclusive addition would yield an output of [1,3,6,10] whereas exclusive would yield [0,1,3,6]. The default is inclusive. + Whether to include or exclude the current value in the output, meaning inclusive presum addition (see https://en.wikipedia.org/wiki/Prefix_sum) or exclusive post-sum addition. Given input *[1,2,3,4]*, inclusive addition would yield an output of *[1,3,6,10]* whereas exclusive would yield *[0,1,3,6]*. The default is inclusive. : reversed :: @@ -2953,6 +2977,7 @@ partial dictionary MLOpSupportLimits {
**Arguments:** - input: an {{MLOperand}}. The input tensor. + - axis: an {{unsigned long}} scalar. The dimension the reduction will be performed on. - options: an {{MLCumulativeSumOptions}}. Specifies the optional parameters of the operation. **Returns:** @@ -2969,32 +2994,19 @@ partial dictionary MLOpSupportLimits { The cumulativeSum(|input|, |options|) method steps are: - 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-cumulativesum)), then [=exception/throw=] a {{TypeError}}. + 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* 1. Let |output| be the result of [=copying an MLOperand=] given |input|. - 1. Let |operator| be an [=operator=] for the "softplus" operation and |options|. + 1. Let |operator| be an [=operator=] for the "cumulativeSum" operation and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/input=] to |input|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|. -
-
- - The behavior of this operation can be [EMULATED] - -
-    function softplus(builder, input) {
-      return builder.log(
-        builder.add(builder.exp(input), builder.constant(input.dataType, 1)));
-    }
-    
-
-
- ### Element-wise binary operations ### {#api-mlgraphbuilder-binary} Compute the element-wise binary addition, subtraction, multiplication, division, power, maximum and minimum of the two input tensors. @@ -3416,6 +3428,7 @@ partial interface MLGraphBuilder { MLOperand neg(MLOperand input, optional MLOperatorOptions options = {}); MLOperand reciprocal(MLOperand input, optional MLOperatorOptions options = {}); MLOperand sin(MLOperand input, optional MLOperatorOptions options = {}); + MLOperand sign(MLOperand input, optional MLOperatorOptions options = {}); MLOperand sqrt(MLOperand input, optional MLOperatorOptions options = {}); MLOperand tan(MLOperand input, optional MLOperatorOptions options = {}); }; @@ -3432,12 +3445,13 @@ partial dictionary MLOpSupportLimits { MLSingleInputSupportLimits neg; MLSingleInputSupportLimits reciprocal; MLSingleInputSupportLimits sin; + MLSingleInputSupportLimits sign; MLSingleInputSupportLimits sqrt; MLSingleInputSupportLimits tan; }; -
+
**Arguments:** - input: an {{MLOperand}}. The input tensor. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. @@ -3447,7 +3461,7 @@ partial dictionary MLOpSupportLimits { tensor is the same as the shape of input tensor.
- +
@@ -3494,6 +3508,8 @@ partial dictionary MLOpSupportLimits { : sin :: Support limits for operator {{MLGraphBuilder/sin()}}. : sqrt + :: Support limits for operator {{MLGraphBuilder/sign()}}. + : sqrt :: Support limits for operator {{MLGraphBuilder/sqrt()}}. : tan :: Support limits for operator {{MLGraphBuilder/tan()}}. @@ -3512,6 +3528,7 @@ partial dictionary MLOpSupportLimits { - *neg*: Compute the numerical negative value of the input tensor, element-wise. - *reciprocal*: Compute the reciprocal of the input tensor, element-wise. - *sin*: Compute the sine of the input tensor, element-wise. + - *sign*: Compute the sign (-1, 0, 1) of the input tensor, element-wise, returning 1 if > 0, -1 if < 0, and 0 otherwise. - *sqrt*: Compute the square root of the input tensor, element-wise. - *tan*: Compute the tangent of the input tensor, element-wise. @@ -3520,7 +3537,7 @@ partial dictionary MLOpSupportLimits { To create element-wise unary operation given [=string=] |op|, {{MLOperand}} |input|, optional [=/list=] |allowedDataTypes|, and |options|, run the following steps: - 1. [=Assert=]: |op| is one of "abs", "ceil", "cos", "erf", "exp", "floor", "identity", "log", "neg", "reciprocal", "sin", "sqrt", "tan". + 1. [=Assert=]: |op| is one of "abs", "ceil", "cos", "erf", "exp", "floor", "identity", "log", "neg", "reciprocal", "sin", "sign", "sqrt", "tan". 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |allowedDataTypes| is given and it does not [=list/contain=] |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. @@ -3614,6 +3631,13 @@ partial dictionary MLOpSupportLimits { 1. Return |output|. +
+ The sign(|input|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-unary-op | create element-wise unary operation=] given "sign", |input|, signed types « {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}, {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"int8"}} », and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+
The sqrt(|input|, |options|) method steps are: 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-unary-op | create element-wise unary operation=] given "sqrt", |input|, « {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}} », and |options|. @@ -3629,65 +3653,88 @@ partial dictionary MLOpSupportLimits {
+
+
+ + The behavior of the {{MLGraphBuilder/sign()}} operation can be [EMULATED] + +
+    function sign(builder, input, options) {
+      let zero = builder.constant(input.dataType, 0);
+      let positiveOne = builder.constant(input.dataType, 1);
+      let negativeOne = builder.constant(input.dataType, -1);
+
+      return builder.where(
+        builder.greater(input, zero),
+        positiveOne,
+        builder.where(
+          builder.lesser(input, zero),
+          negativeOne,
+          positiveOne));
+    }
+    
+
+
### dequantizeLinear ### {#api-mlgraphbuilder-dequantizelinear} -!!!Select the values from the trueValue or the falseValue tensor depending on the corresponding values of the condition tensor, where non-zero is true and zero is false. The condition tensor is often the output of one of the element-wise logical operations. +!!! +Dequantizes an integer tensor to floating point space using the scale and zero-point bias. The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors. -
+
**Arguments:** - - condition: an {{MLOperand}}. The condition tensor. - - trueValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to true. - - falseValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to false. + - input: an {{MLOperand}}. The condition tensor. + - scale: an {{MLOperand}}. ---- + - zeroPoint: an {{MLOperand}}. ---- - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor.
-{{MLWhereSupportLimits}} has the following members: -
- : condition - :: {{MLSupportLimits}} for condition operand. - : trueValue - :: {{MLSupportLimits}} for trueValue operand. - : falseValue - :: {{MLSupportLimits}} for falseValue operand. +{{MLQuantizationSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : scale + :: {{MLSupportLimits}} for scale operand. + : zeroPoint + :: {{MLSupportLimits}} for zeroPoint operand. : output :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/where()}}: +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/dequantizeLinear()}}:
- : where - :: Support limits for operator {{MLGraphBuilder/where()}}. + : dequantizeLinear + :: Support limits for operator {{MLGraphBuilder/dequantizeLinear()}}.
- The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: + The where(|input|, |scale|, |zeroPoint|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. @@ -3713,6 +3760,7 @@ partial dictionary MLOpSupportLimits { The behavior of this operation can be [EMULATED]
+    ----
     function where(builder, condition, trueValue, falseValue) {
       const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
       builder.add(
@@ -3726,62 +3774,63 @@ partial dictionary MLOpSupportLimits {
 
 
 ### quantizeLinear ### {#api-mlgraphbuilder-quantizelinear}
-!!!Select the values from the trueValue or the falseValue tensor depending on the corresponding values of the condition tensor, where non-zero is true and zero is false. The condition tensor is often the output of one of the element-wise logical operations.
+!!!
+Quantizes a floating point tensor to integer point space using the scale and zero-point bias.
 
 The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors.
 
 
 
-
+
**Arguments:** - - condition: an {{MLOperand}}. The condition tensor. - - trueValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to true. - - falseValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to false. + - input: an {{MLOperand}}. The condition tensor. + - scale: an {{MLOperand}}. !!! + - zeroPoint: an {{MLOperand}}. !!! - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor. + **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from ------.
-{{MLWhereSupportLimits}} has the following members: -
- : condition - :: {{MLSupportLimits}} for condition operand. - : trueValue - :: {{MLSupportLimits}} for trueValue operand. - : falseValue - :: {{MLSupportLimits}} for falseValue operand. +{{MLQuantizationSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : scale + :: {{MLSupportLimits}} for scale operand. + : zeroPoint + :: {{MLSupportLimits}} for zeroPoint operand. : output :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/where()}}: +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/quantizeLinear()}}:
- : where - :: Support limits for operator {{MLGraphBuilder/where()}}. + : quantizeLinear + :: Support limits for operator {{MLGraphBuilder/quantizeLinear()}}.
- The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: + The quantizeLinear(|input|, |scale|, |zeroPoint|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. @@ -3807,6 +3856,7 @@ partial dictionary MLOpSupportLimits { The behavior of this operation can be [EMULATED]
+    ------
     function where(builder, condition, trueValue, falseValue) {
       const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
       builder.add(
@@ -4164,7 +4214,9 @@ partial dictionary MLOpSupportLimits {
 
### gatherElements ### {#api-mlgraphbuilder-gatherelements} -!!!Gather values of the input tensor along an axis according to the indices. +!!! +Gather values of the input tensor along an axis according to the indices. + + +{{MLLayerNormalizationOptions}} has the following members: +
+ : axes + :: + The indices to the input dimensions to reverse. When this member is not present, it is treated as if all dimensions are reversed. If explicitly passed as empty, no dimensions are reversed. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*. +
+ +
Constraints for element-wise unary options
+ + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/reverse()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reverse()}}: +
+ : reverse + :: Support limits for operator {{MLGraphBuilder/reverse()}}. +
+ +
+ + The reverse(|input|, |options|) method steps are: + + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-reverse)), then [=exception/throw=] a {{TypeError}}. + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "reverse" operation and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
### scatterElements ### {#api-mlgraphbuilder-scatterelements} -!!!Scatter values of the input tensor along an axis according to the indices. +!!! +Scatter values of the input tensor along an axis according to the indices. + -{{MLGatherOptions}} has the following members: -
+{{MLScatterOptions}} has the following members: +
: axis :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. + The axis along which the scattered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor.
-
+
**Arguments:** - - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from which the values are scattered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - updates: an {{MLOperand}}. !!!! + - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLGatherSupportLimits}} has the following members: -
+{{MLScatterSupportLimits}} has the following members: +
: input :: {{MLSupportLimits}} for input operand. : indices @@ -7903,26 +8038,26 @@ partial dictionary MLOpSupportLimits { :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterElements()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : scatterElements + :: Support limits for operator {{MLGraphBuilder/scatterElements()}}.
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterElements(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The gather(|input|, |indices|, |options|) method steps are: + The scatterElements(|input|, |indices|, |updates|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. - 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. Let |axis| be |options|.{{MLScatterOptions/axis}}. 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. 1. Let |dimCount| be zero. 1. Let |rankOutput| be zero. @@ -7945,7 +8080,7 @@ partial dictionary MLOpSupportLimits { 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. - 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Let |operator| be an [=operator=] for the "scatterElements" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. @@ -7955,7 +8090,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how gather works in different slicing schemes. + Examples of how scatterElements works in different slicing schemes.
     // input of shape [4,3]:
@@ -7982,7 +8117,7 @@ partial dictionary MLOpSupportLimits {
     // output of shape [2,3]:
     //   [[30, 31, 32],
     //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
+    const output1 = builder.scatterElements(input, indices1, updates);
 
     // axis = 1
     // indices of shape [3]:
@@ -7992,7 +8127,7 @@ partial dictionary MLOpSupportLimits {
     //    [12, 11, 11],
     //    [22, 21, 21],
     //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
+    const output2 = builder.scatterElements(input, indices2, updates, {axis: 1});
 
     // axis = 1
     // indices of shape [2,2]:
@@ -8003,54 +8138,57 @@ partial dictionary MLOpSupportLimits {
     //    [[10, 11], [11, 12]],
     //    [[20, 21], [21, 22]],
     //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    const output3 = builder.scatterElements(input, indices3, updates, {axis: 1});
   
### scatterNd ### {#api-mlgraphbuilder-scatternd} -!!!Scatter values of the input tensor along an axis according to the indices. +!!! +Scatter values of the input tensor along an axis according to the indices. + -{{MLGatherOptions}} has the following members: -
+{{MLScatterOptions}} has the following members: +
: axis :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. + The axis along which the scattered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor.
-
+
**Arguments:** - - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from which the values are scattered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLGatherSupportLimits}} has the following members: -
+{{MLScatterSupportLimits}} has the following members: +
: input :: {{MLSupportLimits}} for input operand. : indices @@ -8059,26 +8197,26 @@ partial dictionary MLOpSupportLimits { :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterNd()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : scatterNd + :: Support limits for operator {{MLGraphBuilder/scatterNd()}}.
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The gather(|input|, |indices|, |options|) method steps are: + The scatterNd(|input|, |indices|, |updates|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. - 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. Let |axis| be |options|.{{MLScatterOptions/axis}}. 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. 1. Let |dimCount| be zero. 1. Let |rankOutput| be zero. @@ -8101,7 +8239,7 @@ partial dictionary MLOpSupportLimits { 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. - 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Let |operator| be an [=operator=] for the "scatterNd" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. @@ -8111,7 +8249,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how gather works in different slicing schemes. + Examples of how scatterNd works in different slicing schemes.
     // input of shape [4,3]:
@@ -8138,7 +8276,7 @@ partial dictionary MLOpSupportLimits {
     // output of shape [2,3]:
     //   [[30, 31, 32],
     //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
+    const output1 = builder.scatterNd(input, indices1, updates);
 
     // axis = 1
     // indices of shape [3]:
@@ -8148,7 +8286,7 @@ partial dictionary MLOpSupportLimits {
     //    [12, 11, 11],
     //    [22, 21, 21],
     //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
+    const output2 = builder.scatterNd(input, indices2, updates, {axis: 1});
 
     // axis = 1
     // indices of shape [2,2]:
@@ -8159,7 +8297,7 @@ partial dictionary MLOpSupportLimits {
     //    [[10, 11], [11, 12]],
     //    [[20, 21], [21, 22]],
     //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    const output3 = builder.scatterNd(input, indices3, updates, {axis: 1});
   
@@ -8245,86 +8383,42 @@ partial dictionary MLOpSupportLimits {
- -### sign ### {#api-mlgraphbuilder-sign} -!!!Compute the softplus function of the input tensor. The calculation follows the expression `ln(1 + exp(x))`. - - - -
- **Arguments:** - - input: an {{MLOperand}}. The input tensor. - - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. - - **Returns:** - - an {{MLOperand}}. The output tensor of the same shape as *input*. -
- -{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/softplus()}}: -
- : softplus - :: Support limits for operator {{MLGraphBuilder/softplus()}}. -
- -
- - The softplus(|input|, |options|) method steps are: - - 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. - 1. *Make graph connections:* - 1. Let |output| be the result of [=copying an MLOperand=] given |input|. - 1. Let |operator| be an [=operator=] for the "softplus" operation and |options|. - 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/input=] to |input|. - 1. Set |operator|'s [=operator/output=] to |output|. - 1. Return |output|. -
- -
-
- - The behavior of this operation can be [EMULATED] - -
-    function softplus(builder, input) {
-      return builder.log(
-        builder.add(builder.exp(input), builder.constant(input.dataType, 1)));
-    }
-    
-
-
- - ### slice ### {#api-mlgraphbuilder-slice} Produce a slice of the input tensor. + +{{MLSliceOptions}} has the following members: +
+ : strides + :: + The stride to step over each input along each axis. + The length of the strides array must equal the [=MLOperand/rank=] of the input tensor. + The the default is an array of length [=MLOperand/rank=] consisting of all 1's. + e.g. [1,1,1] for a 3-D tensor. + Strides must be greater than zero. +
+
**Arguments:** - input: an {{MLOperand}}. The input tensor. - starts: [=sequence=]<{{unsigned long}}>. The starting index to slice of each input dimension, of length N where N is the [=MLOperand/rank=] of the input tensor. For each dimension *d* of *input*, *starts[d]* indicates the starting index to slice in that dimension. The starting index must be in the range [0, input size - 1] in that dimension. - sizes: [=sequence=]<{{unsigned long}}>. The number of elements to slice of each input dimension, of length N where N is the [=MLOperand/rank=] of the input tensor. For each dimension *d* of *input*, *sizes[d]* indicates the number of elements to slice in that dimension. The size must not be 0 and must satisfy the constraint `starting index + size <= input size` in that dimension. - - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + - options: an {{MLSliceOptions}}. Specifies the optional parameters of the operation. **Returns:** an {{MLOperand}}. The output tensor of the same rank as the input tensor with tensor values stripped to the specified starting and ending indices in each dimension.
@@ -8371,6 +8465,7 @@ partial dictionary MLOpSupportLimits { 1. If |starts|[|index|] is greater than or equal to |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}. 1. If |starts|[|index|] + |sizes|[|index|] is greater than |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}. + 1. !!!!----- 1. *Make graph connections:* 1. Let |output| be the result of [=copying an MLOperand=] given |input|. 1. Let |operator| be an [=operator=] for the "slice" operation, given |starts|, |sizes|, and |options|. @@ -8847,160 +8942,56 @@ partial dictionary MLOpSupportLimits {
### tile ### {#api-mlgraphbuilder-tile} -!!!Gather values of the input tensor along an axis according to the indices. - -{{MLGatherOptions}} has the following members: -
- : axis - :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. -
- -
+
**Arguments:** - - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor. + - repetitions: A count per each dimension of how many times to repeat that dimension. The repetitions count must match the input rank, using 1's for any axis that should retain the same size. + - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. + **Returns:** an {{MLOperand}}. The permuted or transposed N-D tensor.
-{{MLGatherSupportLimits}} has the following members: -
- : input - :: {{MLSupportLimits}} for input operand. - : indices - :: {{MLSupportLimits}} for indices operand. - : output - :: {{MLSupportLimits}} for output operand. -
- -{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/tile()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : tile + :: Support limits for operator {{MLGraphBuilder/tile()}}.
-
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. -
-
- The gather(|input|, |indices|, |options|) method steps are: + The tile(|input|, |repetitions|, |options|) method steps are: - 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. - 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. - 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. - 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. - 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. - 1. Let |dimCount| be zero. - 1. Let |rankOutput| be zero. - 1. Let |shapeOutput| be an empty list. - 1. [=list/For each=] |size| of |shapeInput|: - 1. If |dimCount| is equal to |axis| then [=iteration/break=]. - 1. Set |shapeOutput|[|dimCount|] to |size|. - 1. Increment |dimCount| by one. - 1. Set |rankOutput| to |dimCount|. - 1. Let |dimCount| be zero. - 1. [=list/For each=] |size| of |shapeIndices|: - 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. - 1. Increment |dimCount| by one. - 1. Set |rankOutput| to |rankOutput| + |dimCount|. - 1. Let |dimCount| be zero. - 1. [=list/For each=] |size| of |shapeInput|: - 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. - 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. - 1. Increment |dimCount| by one. - 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + !!!1. If |options|.{{MLTransposeOptions/permutation}} does not [=map/exist=], let |options|.{{MLTransposeOptions/permutation}} be the reversed sequence of all indices for |input|'s [=MLOperand/shape=]. + 1. Otherwise if |options|.{{MLTransposeOptions/permutation}} [=map/exists=]: + 1. If its [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If its values are not in [=the range=] 0 to |input|'s [=MLOperand/rank=] exclusive, then [=exception/throw=] a {{TypeError}}. + 1. If it contains duplicate values, then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. - 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "transpose" operation, given |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/input=] to |input|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
-
-
- - Examples of how gather works in different slicing schemes. - -
-    // input of shape [4,3]:
-    //   [[ 0,  1,  2],
-    //    [10, 11, 12],
-    //    [20, 21, 22],
-    //    [30, 31, 32]]
-    const input = builder.constant(
-      {shape: [4, 3]},
-      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
-
-    const indices1 = builder.constant(
-      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
-
-    const indices2 = builder.constant(
-      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
-
-    const indices3 = builder.constant(
-      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
-
-    // axis = 0 (default)
-    // indices of shape [2]:
-    //   [3,1]
-    // output of shape [2,3]:
-    //   [[30, 31, 32],
-    //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
-
-    // axis = 1
-    // indices of shape [3]:
-    //   [2,1,1]
-    // output of shape [4,3]:
-    //   [[ 2,  1,  1],
-    //    [12, 11, 11],
-    //    [22, 21, 21],
-    //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
-
-    // axis = 1
-    // indices of shape [2,2]:
-    //   [[0, 1],
-    //    [1, 2]]
-    // output of shape [4,2,2]:
-    //   [[[ 0,  1], [ 1,  2]],
-    //    [[10, 11], [11, 12]],
-    //    [[20, 21], [21, 22]],
-    //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
-  
-
-
- ### transpose ### {#api-mlgraphbuilder-transpose} Permute the dimensions of the input tensor according to the *permutation* argument. - +
@@ -3307,12 +3306,12 @@ partial dictionary MLOpSupportLimits { - *lesserOrEqual*: Compare if the values of the first input tensor is lesser or equal, element-wise. - *logicalNot*: Invert the values of the input tensor to values 0 or 1, element-wise. Specifically, when the input value is non-zero, invert it to 0. Conversely, for a zero input value, invert it to 1. - *logicalAnd*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. - - *logicalOr*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. - - *logicalXor*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalOr*: Compute the logical *or* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalXor*: Compute the logical *xor* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1.
-Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/lesserOrEqual()}} can each be implemented in terms of operations {{MLGraphBuilder/logicalNot()}}, {{MLGraphBuilder/lesser()}}, and {{MLGraphBuilder/greater()}} in other words `builder.greaterOrEqual(a, b)` is `builder.logicalNot(builder.lesser(a, b))`, they are specifically defined to handle NaN cases and for performance reason to avoid double comparisons. +Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/lesserOrEqual()}} can each be implemented in terms of operations {{MLGraphBuilder/logicalNot()}}, {{MLGraphBuilder/lesser()}}, and {{MLGraphBuilder/greater()}} (in other words `builder.greaterOrEqual(a, b)` is `builder.logicalNot(builder.lesser(a, b))`), they are specifically defined to handle NaN cases and for performance reason to avoid double comparisons.
@@ -3678,10 +3677,9 @@ partial dictionary MLOpSupportLimits { ### dequantizeLinear ### {#api-mlgraphbuilder-dequantizelinear} -!!! -Dequantizes an integer tensor to floating point space using the scale and zero-point bias. +Dequantizes an integer tensor to floating point space using the scale and zero-point bias, where `output = (input - zeroPoint) * scale`. -The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors. +The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors, and each dimension must be blockwise compatible with the output (e.g. given an input shape [12], scales of the following shapes are blockwise compatible {[1], [3], [4], [6], [12]} as they are all multiples of the input dimensions, but a shape of [5] would not be). -
+
**Arguments:** - input: an {{MLOperand}}. The condition tensor. - - scale: an {{MLOperand}}. !!! - - zeroPoint: an {{MLOperand}}. !!! + - scale: an {{MLOperand}}. The scale tensor to multiply each input value by after adjusting by the zero point. + - zeroPoint: an {{MLOperand}}. The zero point tensor to subtract from each input value. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from ------. + **Returns:** an {{MLOperand}}. The output tensor that contains the values computed from the quantization.
{{MLQuantizationSupportLimits}} has the following members: @@ -3830,24 +3806,11 @@ partial dictionary MLOpSupportLimits {
- The quantizeLinear(|input|, |scale|, |zeroPoint|, |options|) method steps are: + The where(|input|, |scale|, |zeroPoint|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |condition|'s [=MLOperand/dataType=] is not equal to {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. - 1. If |trueValue|'s [=MLOperand/dataType=] is not equal to |falseValue|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. - 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |trueValue|'s [=MLOperand/shape=] and |falseValue|'s [=MLOperand/shape=]. - 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. - 1. Set |outputShape| to the result of [=bidirectionally broadcasting=] |condition|'s [=MLOperand/shape=] and |outputShape]. - 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. - 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given |trueValue|'s [=MLOperand/dataType=] and |outputShape|. - 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. - 1. Let |operator| be an [=operator=] for the "where" operation, given |condition|, |trueValue|, |falseValue|, and |options|. - 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/inputs=] to |condition|, |trueValue| and |falseValue|. - 1. Set |operator|'s [=operator/output=] to |output|. - 1. Return |output|. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. + TODO: Add validation for scale and zero point shape.
@@ -3856,14 +3819,7 @@ partial dictionary MLOpSupportLimits { The behavior of this operation can be [EMULATED]
-    ------
-    function where(builder, condition, trueValue, falseValue) {
-      const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
-      builder.add(
-        builder.mul(trueValue, builder.cast(c, trueValue.dataType)),
-        builder.mul(
-          falseValue, builder.cast(builder.logicalNot(c), falseValue.dataType)));
-    }
+    TODO:
     
@@ -4214,7 +4170,6 @@ partial dictionary MLOpSupportLimits { ### gatherElements ### {#api-mlgraphbuilder-gatherelements} -!!! Gather values of the input tensor along an axis according to the indices. {{MLLayerNormalizationOptions}} has the following members: -
+
: axes :: The indices to the input dimensions to reverse. When this member is not present, it is treated as if all dimensions are reversed. If explicitly passed as empty, no dimensions are reversed. @@ -7985,7 +7874,6 @@ partial dictionary MLOpSupportLimits { ### scatterElements ### {#api-mlgraphbuilder-scatterelements} -!!! Scatter values of the input tensor along an axis according to the indices. -{{MLGatherOptions}} has the following members: -
- : axis - :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. -
- -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*.
{{MLGatherSupportLimits}} has the following members: @@ -4210,14 +4264,40 @@ partial dictionary MLOpSupportLimits { :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
Constraints for {{MLGraphBuilder/cumulativeSum()}}
+ + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/gatherElements()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=]> 1
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}*input*'s [=MLOperand/rank=]
*output*[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=]
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherElements()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : gatherElements + :: Support limits for operator {{MLGraphBuilder/gatherElements()}}.
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/gatherElements(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
@@ -4230,60 +4310,15 @@ partial dictionary MLOpSupportLimits {
- Examples of how gather works in different slicing schemes. + Examples of how gatherElements works in different slicing schemes.
-    // input of shape [4,3]:
-    //   [[ 0,  1,  2],
-    //    [10, 11, 12],
-    //    [20, 21, 22],
-    //    [30, 31, 32]]
-    const input = builder.constant(
-      {shape: [4, 3]},
-      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
-
-    const indices1 = builder.constant(
-      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
-
-    const indices2 = builder.constant(
-      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
-
-    const indices3 = builder.constant(
-      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
-
-    // axis = 0 (default)
-    // indices of shape [2]:
-    //   [3,1]
-    // output of shape [2,3]:
-    //   [[30, 31, 32],
-    //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
-
-    // axis = 1
-    // indices of shape [3]:
-    //   [2,1,1]
-    // output of shape [4,3]:
-    //   [[ 2,  1,  1],
-    //    [12, 11, 11],
-    //    [22, 21, 21],
-    //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
-
-    // axis = 1
-    // indices of shape [2,2]:
-    //   [[0, 1],
-    //    [1, 2]]
-    // output of shape [4,2,2]:
-    //   [[[ 0,  1], [ 1,  2]],
-    //    [[10, 11], [11, 12]],
-    //    [[20, 21], [21, 22]],
-    //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    TODO:
   
-### gatherNd ### {#api-mlgraphbuilder-gathernd} +### gatherND ### {#api-mlgraphbuilder-gathernd} Gather values of the input tensor along an axis according to the indices. -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherNd()}}: + + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/gatherND()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=]> 1
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}> 1
*output*[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherND()}}:
- : gatherNd - :: Support limits for operator {{MLGraphBuilder/gatherNd()}}. + : gatherND + :: Support limits for operator {{MLGraphBuilder/gatherND()}}.
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/gatherND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The gather(|input|, |indices|, |options|) method steps are: + The gatherND(|input|, |indices|, |options|) method steps are: TODO:
@@ -4327,55 +4388,10 @@ partial dictionary MLOpSupportLimits {
- Examples of how gather works in different slicing schemes. + Examples of how gatherND works in different slicing schemes.
-    // input of shape [4,3]:
-    //   [[ 0,  1,  2],
-    //    [10, 11, 12],
-    //    [20, 21, 22],
-    //    [30, 31, 32]]
-    const input = builder.constant(
-      {shape: [4, 3]},
-      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
-
-    const indices1 = builder.constant(
-      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
-
-    const indices2 = builder.constant(
-      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
-
-    const indices3 = builder.constant(
-      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
-
-    // axis = 0 (default)
-    // indices of shape [2]:
-    //   [3,1]
-    // output of shape [2,3]:
-    //   [[30, 31, 32],
-    //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
-
-    // axis = 1
-    // indices of shape [3]:
-    //   [2,1,1]
-    // output of shape [4,3]:
-    //   [[ 2,  1,  1],
-    //    [12, 11, 11],
-    //    [22, 21, 21],
-    //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
-
-    // axis = 1
-    // indices of shape [2,2]:
-    //   [[0, 1],
-    //    [1, 2]]
-    // output of shape [4,2,2]:
-    //   [[[ 0,  1], [ 1,  2]],
-    //    [[10, 11], [11, 12]],
-    //    [[20, 21], [21, 22]],
-    //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    TODO:
   
@@ -7814,7 +7830,7 @@ partial dictionary MLOpSupportLimits { }; -{{MLLayerNormalizationOptions}} has the following members: +{{MLReverseOptions}} has the following members:
: axes :: @@ -7874,7 +7890,7 @@ partial dictionary MLOpSupportLimits {
### scatterElements ### {#api-mlgraphbuilder-scatterelements} -Scatter values of the input tensor along an axis according to the indices. +Scatter values from the updates tensor along an axis according to the indices in place of the input tensor. -{{MLScatterOptions}} has the following members: -
- : axis - :: - The axis along which the scattered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. -
- -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are scattered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - indices: an {{MLOperand}}. TODO: Elaborate on indices coordinate order. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - updates: an {{MLOperand}}. New values to replace atop the input. - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
{{MLScatterSupportLimits}} has the following members: @@ -8003,23 +8041,56 @@ partial dictionary MLOpSupportLimits { :: {{MLSupportLimits}} for input operand. : indices :: {{MLSupportLimits}} for indices operand. + : updates + :: {{MLSupportLimits}} for updates operand. : output :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterNd()}}: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/scatterND()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=]> 1
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}> 1
{{updates}}[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1
*output*[=/same type as|same as=] {{input}}> 1
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterND()}}:
- : scatterNd - :: Support limits for operator {{MLGraphBuilder/scatterNd()}}. + : scatterND + :: Support limits for operator {{MLGraphBuilder/scatterND()}}.
- The {{MLGraphBuilder/scatterNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The scatterNd(|input|, |indices|, |updates|, |options|) method steps are: + The scatterND(|input|, |indices|, |updates|, |options|) method steps are: TODO:
@@ -8027,7 +8098,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how scatterNd works in different slicing schemes. + Examples of how scatterND works in different slicing schemes.
     TODO:
@@ -8191,6 +8262,8 @@ partial dictionary MLOpSupportLimits {
     1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}.
     1. If any of |sizes|'s [=list/items=] are 0, then [=exception/throw=] a {{TypeError}}.
     1. If |starts|'s [=list/size=] and |sizes|'s [=list/size=] are not both equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}.
+    1. If |options|.{{MLSliceOptions/strides}} [=map/exists=]:
+        1. If |options|.{{MLSliceOptions/strides}}'s [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}.
     1. [=list/For each=] |index| in [=the range=] 0 to |input|'s [=MLOperand/rank=], exclusive:
         1. If |sizes|[|index|] is 0, then [=exception/throw=] a {{TypeError}}.
 
@@ -8198,7 +8271,8 @@ partial dictionary MLOpSupportLimits {
 
         1. If |starts|[|index|] is greater than or equal to |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}.
         1. If |starts|[|index|] + |sizes|[|index|] is greater than |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}.
-        1. TODO: Validate steps.
+        1. If |options|.{{MLSliceOptions/strides}} [=map/exists=]:
+            1. If |options|.{{MLSliceOptions/strides}}[|index|] is less than 1, then [=exception/throw=] a {{TypeError}}.
     1. *Make graph connections:*
         1. Let |output| be the result of [=copying an MLOperand=] given |input|.
         1. Let |operator| be an [=operator=] for the "slice" operation, given |starts|, |sizes|, and |options|.
@@ -8675,7 +8749,7 @@ partial dictionary MLOpSupportLimits {
 
### tile ### {#api-mlgraphbuilder-tile} -Repeat a tensor the number of times along each dimension. +Repeat a tensor the given number of times along each dimension. -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor. - - repetitions: A count per each dimension of how many times to repeat that dimension. The repetitions count must match the input rank, using 1's for any axis that should retain the same size. + - repetitions: A count per dimension of how many times to repeat that dimension. The |repetitions| [=list/size=] must match the |input|'s [=MLOperand/rank=], using 1's for any axis that should retain the same size. - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The reversed N-D tensor.
+ + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/tile()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ {{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/tile()}}:
: tile @@ -8710,7 +8805,18 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - TODO: + 1. If |repetitions|'s [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |repetitions|'s values contain 0's, then [=exception/throw=] a {{TypeError}}. + + Issue(391): If 0-size dimensions are allowed, revise these steps. + + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "tile" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. ### transpose ### {#api-mlgraphbuilder-transpose} From 4da4febdded3f8c22ef72fcc85e67a1013c4a1df Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 16 Jan 2025 19:24:18 -0800 Subject: [PATCH 06/39] Rank typo for Q and DQ --- index.bs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/index.bs b/index.bs index a9707d4f..b3ec9d12 100644 --- a/index.bs +++ b/index.bs @@ -3659,9 +3659,9 @@ partial dictionary MLOpSupportLimits {
     function sign(builder, input, options) {
-      let zero = builder.constant(input.dataType, 0);
-      let positiveOne = builder.constant(input.dataType, 1);
-      let negativeOne = builder.constant(input.dataType, -1);
+      const zero = builder.constant(input.dataType, 0);
+      const positiveOne = builder.constant(input.dataType, 1);
+      const negativeOne = builder.constant(input.dataType, -1);
 
       return builder.where(
         builder.greater(input, zero),
@@ -3738,7 +3738,7 @@ partial dictionary MLOpSupportLimits {
   
     *output*
     [=/same type as|same as=] {{scale}}
-    4
+    [=/same rank as|same as=] {{input}}
   
 
 
@@ -3843,7 +3843,7 @@ partial dictionary MLOpSupportLimits {
   
     *output*
     [=/same type as|same as=] {{zeroPoint}}
-    4
+    [=/same rank as|same as=] {{input}}
   
 
 

From 83efdf307c04f6d4632cfa653b80755522290bfb Mon Sep 17 00:00:00 2001
From: Dwayne Robinson 
Date: Thu, 13 Feb 2025 06:58:49 -0800
Subject: [PATCH 07/39] CR feedback

---
 index.bs | 82 +++++++++++++++++++++++++++++++-------------------------
 1 file changed, 46 insertions(+), 36 deletions(-)

diff --git a/index.bs b/index.bs
index dc98d3e8..873e8513 100644
--- a/index.bs
+++ b/index.bs
@@ -2974,11 +2974,11 @@ partial dictionary MLOpSupportLimits {
 
: exclusive :: - Whether to include or exclude the current value in the output, meaning inclusive presum addition (see https://en.wikipedia.org/wiki/Prefix_sum) or exclusive post-sum addition. Given input *[1,2,3,4]*, inclusive addition would yield an output of *[1,3,6,10]* whereas exclusive would yield *[0,1,3,6]*. The default is inclusive. + Whether to include or exclude the current value in the output, meaning inclusive prefix sum or exclusive prefix sum [[Prefix-sum]]. Given input *[1,2,3,4]*, inclusive addition would yield an output of *[1,3,6,10]* whereas exclusive would yield *[0,1,3,6]*. The default is inclusive. : reversed :: - Whether to reverse the summation direction along the active axis to instead start from the high coordinate to low coordinate. Given input *[1,2,3,4]*, inclusive forward addition would yield an output of *[1,3,6,10]* whereas backward summation would yield *[10,9,7,4]*. The default is exclusive. + Whether to reverse the summation direction along the active axis to instead start from the high coordinate to low coordinate. Given input *[1,2,3,4]*, inclusive forward addition would yield an output of *[1,3,6,10]* whereas backward summation would yield *[10,9,7,4]*. The default is forward.
@@ -3236,7 +3236,7 @@ partial dictionary MLOpSupportLimits { }; -
+
**Arguments:** - a: an {{MLOperand}}. The first input tensor. - b: an {{MLOperand}}. The second input tensor when specified. @@ -3245,7 +3245,7 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The output tensor that contains the result of element-wise comparison of the two input tensors.
- +
@@ -3313,9 +3313,9 @@ partial dictionary MLOpSupportLimits { - *lesser*: Compare if the values of the first input tensor is lesser, element-wise. - *lesserOrEqual*: Compare if the values of the first input tensor is lesser or equal, element-wise. - *logicalNot*: Invert the values of the input tensor to values 0 or 1, element-wise. Specifically, when the input value is non-zero, invert it to 0. Conversely, for a zero input value, invert it to 1. - - *logicalAnd*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. - - *logicalOr*: Compute the logical *or* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. - - *logicalXor*: Compute the logical *xor* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalAnd*: Compute the logical *and* of the two input tensors, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalOr*: Compute the logical *or* of the two input tensors, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalXor*: Compute the logical *xor* of the two input tensors, element-wise, treating any non-zero value as true and returning elements of 0 or 1.
@@ -3328,7 +3328,7 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. [=Assert=]: |op| is one of "equal", "notEqual", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot", "logicalAnd", "logicalOr", "logicalXor". 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If |op| is "logicalNot": + 1. If |op| is one of "logicalNot", "logicalAnd", "logicalOr", "logicalXor": 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |a| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. 1. Let |outputShape| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. @@ -3640,7 +3640,7 @@ partial dictionary MLOpSupportLimits {
The sign(|input|, |options|) method steps are: - 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-unary-op | create element-wise unary operation=] given "sign", |input|, signed types « {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}, {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"int8"}} », and |options|. + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-unary-op | create element-wise unary operation=] given "sign", |input|, « {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}, {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"int8"}} », and |options|. 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. 1. Return |output|.
@@ -3685,7 +3685,7 @@ partial dictionary MLOpSupportLimits { ### dequantizeLinear ### {#api-mlgraphbuilder-dequantizelinear} -Dequantizes an integer tensor to floating point space using the scale and zero-point bias, where `output = (input - zeroPoint) * scale`. +Dequantizes an integer tensor to floating point tensor using the scale and zero-point bias, where `output = (input - zeroPoint) * scale`. TODO: Elaborate on blockwise broadcasting - The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors, and each dimension must be blockwise compatible with the output (e.g. given an input shape *[12]*, scales of the following shapes are blockwise compatible {*[1]*, *[3]*, *[4]*, *[6]*, *[12]*} as they are all multiples of the input dimensions, but a shape of *[5]* would not be). @@ -3712,8 +3712,8 @@ partial dictionary MLOpSupportLimits {
**Arguments:** - input: an {{MLOperand}}. The input tensor. - - scale: an {{MLOperand}}. The scale tensor to multiply each input value by after adjusting by the zero point. - - zeroPoint: an {{MLOperand}}. The zero point tensor to subtract from each input value. + - scale: an {{MLOperand}}. The scale tensor to multiply each input value by after adjusting by the zero point. It has the same [=MLOperand/rank=] as the input, and its [=MLOperand/shape=] must evenly divide into the input [=MLOperand/shape=]. + - zeroPoint: an {{MLOperand}}. The zero point tensor to subtract from each input value. It has the same [=MLOperand/shape=] as the scale. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. **Returns:** an {{MLOperand}}. The output tensor that contains the dequantized values. @@ -3730,18 +3730,18 @@ partial dictionary MLOpSupportLimits {
- - + + - + - + @@ -3774,7 +3774,11 @@ partial dictionary MLOpSupportLimits { 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. - TODO: Add validation for scale and zero point shape. + 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] mismatches |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/shape=] mismatches |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. + 1. [=list/For each=] |axis| in [=the range=] 0 to |input|'s [=MLOperand/rank=], exclusive: + 1. If |scale|'s [=MLOperand/shape=][|axis|] is not exactly divisible into |input|'s [=MLOperand/shape=][|axis|], then [=exception/throw=] a {{TypeError}}. + 1. If |zeroPoint|'s [=MLOperand/shape=][|axis|] is not exactly divisible into |input|'s [=MLOperand/shape=][|axis|], then [=exception/throw=] a {{TypeError}}.
@@ -3790,7 +3794,7 @@ partial dictionary MLOpSupportLimits { ### quantizeLinear ### {#api-mlgraphbuilder-quantizelinear} -Quantizes a floating point tensor to integer point space using the scale and zero-point bias, where `output = clamp(roundToNearestEvens(input / scale) + zeroPoint, 0, 255)`. +Quantizes a floating point tensor to integer tensor using the scale and zero-point bias, where `output = clamp(roundToNearestEvens(input / scale) + zeroPoint, 0, 255)`. TODO: Elaborate on blockwise broadcasting - The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors, and each dimension must be blockwise compatible with the output (e.g. given an input shape *[12]*, scales of the following shapes are blockwise compatible {*[1]*, *[3]*, *[4]*, *[6]*, *[12]*} as they are all multiples of the input dimensions, but a shape of *[5]* would not be). @@ -3817,8 +3821,8 @@ partial dictionary MLOpSupportLimits {
**Arguments:** - input: an {{MLOperand}}. The condition tensor. - - scale: an {{MLOperand}}. The scale tensor to multiply each input value by after adjusting by the zero point. - - zeroPoint: an {{MLOperand}}. The zero point tensor to subtract from each input value. + - scale: an {{MLOperand}}. The scale tensor to divide each input value by after adjusting by the zero point. It has the same [=MLOperand/rank=] as the input, and its [=MLOperand/shape=] must evenly divide into the input [=MLOperand/shape=]. + - zeroPoint: an {{MLOperand}}. The zero point tensor to add to each rescaled input value. It has the same [=MLOperand/shape=] as the scale. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. **Returns:** an {{MLOperand}}. The output tensor that contains the quantized values. @@ -3836,17 +3840,17 @@ partial dictionary MLOpSupportLimits {
- + - + - - + + @@ -4284,7 +4288,7 @@ partial dictionary MLOpSupportLimits { - + @@ -4327,7 +4331,7 @@ partial dictionary MLOpSupportLimits { ### gatherND ### {#api-mlgraphbuilder-gathernd} -Gather values of the input tensor along an axis according to the indices. +Gather slices of the input tensor according to the indices. -
Constraints for element-wise logical options
{{input}}{{MLOperandDataType/"uint4"}}, {{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}}N{{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}}[=/any rank|N=]
{{scale}} {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}0 to {{input}}'s [=MLOperand/rank=][=/same rank as|same as=] {{input}}
{{zeroPoint}} [=/same type as|same as=] {{input}}0 to {{input}}'s [=MLOperand/rank=][=/same rank as|same as=] {{input}}
*output*
{{input}} {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}N[=/any rank|N=]
{{scale}} [=/same type as|same as=] {{input}}0 to {{input}}'s [=MLOperand/rank=][=/same rank as|same as=] {{input}}
{{zeroPoint}}{{MLOperandDataType/"uint4"}}, {{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}}0 to {{input}}'s [=MLOperand/rank=]{{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}}[=/same rank as|same as=] {{input}}
*output*
{{input}} [=/any data type|any=]> 11 to [=/any rank|N=]
{{indices}}
+
@@ -3357,6 +3358,7 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. Return |output|. +
The notEqual(|a|, |b|, |options|) method steps are: 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "notEqual", |a|, |b|, and |options|. 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. @@ -3514,7 +3516,7 @@ partial dictionary MLOpSupportLimits { :: Support limits for operator {{MLGraphBuilder/reciprocal()}}. : sin :: Support limits for operator {{MLGraphBuilder/sin()}}. - : sqrt + : sign :: Support limits for operator {{MLGraphBuilder/sign()}}. : sqrt :: Support limits for operator {{MLGraphBuilder/sqrt()}}. @@ -3803,7 +3805,7 @@ partial dictionary MLOpSupportLimits { function blockwiseBroadcast(builder, input, targetShape) { // This expands each axis by repeating the block the number of times per that axis, given the - // original input shape and target shape. However, backend implementations may have much more + // original input shape and target shape. However, backend implementations might have much more // efficient upsampling operators that can accept multiple dimensions to upsample all // dimensions at once by integer multiples (like tile) using nearest neighbor resampling: // output = resample(scale, {sizes: input.shape}) @@ -3909,22 +3911,10 @@ partial dictionary MLOpSupportLimits {
- +
Constraints for {{MLGraphBuilder/cumulativeSum()}}
*output* [=/same type as|same as=] {{zeroPoint}}[=/same rank as|same as=] {{input}}[=/same rank as|same as=] {{input}}
-{{MLQuantizationSupportLimits}} has the following members: -
- : input - :: {{MLSupportLimits}} for input operand. - : scale - :: {{MLSupportLimits}} for scale operand. - : zeroPoint - :: {{MLSupportLimits}} for zeroPoint operand. - : output - :: {{MLSupportLimits}} for output operand. -
- {{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/quantizeLinear()}}:
: quantizeLinear @@ -8178,18 +8168,6 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
-{{MLScatterSupportLimits}} has the following members: -
- : input - :: {{MLSupportLimits}} for input operand. - : indices - :: {{MLSupportLimits}} for indices operand. - : updates - :: {{MLSupportLimits}} for updates operand. - : output - :: {{MLSupportLimits}} for output operand. -
- @@ -8915,7 +8893,7 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The reversed N-D tensor. -
Constraints for {{MLGraphBuilder/scatterND()}}
+
From 62024053cfa32500d2ecec5230d7178fa2fa84af Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Fri, 14 Feb 2025 01:35:05 -0800 Subject: [PATCH 11/39] More bikeshed errors --- index.bs | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/index.bs b/index.bs index d9b3afd2..823ba595 100644 --- a/index.bs +++ b/index.bs @@ -3841,13 +3841,6 @@ partial interface MLGraphBuilder { optional MLOperatorOptions options = {}); }; -dictionary MLQuantizationSupportLimits { - MLSupportLimits input; - MLSupportLimits scale; - MLSupportLimits zeroPoint; - MLSupportLimits output; -}; - partial dictionary MLOpSupportLimits { MLQuantizationSupportLimits quantizeLinear; }; @@ -4309,16 +4302,6 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*. -{{MLGatherSupportLimits}} has the following members: -
- : input - :: {{MLSupportLimits}} for input operand. - : indices - :: {{MLSupportLimits}} for indices operand. - : output - :: {{MLSupportLimits}} for output operand. -
-
Constraints for {{MLGraphBuilder/tile()}}
@@ -8187,10 +8170,6 @@ partial dictionary MLOpSupportLimits { Scatter slices of values from the update tensor atop the input tensor according to the indices. -
+
**Arguments:** - a: an {{MLOperand}}. The first input tensor. - b: an {{MLOperand}}. The second input tensor when specified. @@ -3225,7 +3225,7 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The output tensor that contains the result of element-wise comparison of the two input tensors.
-
Constraints for {{MLGraphBuilder/gatherElements()}}
+
@@ -3308,20 +3308,21 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. [=Assert=]: |op| is one of "equal", "notEqual", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot", "logicalAnd", "logicalOr", "logicalXor". 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If |op| is one of "logicalNot", "logicalAnd", "logicalOr", "logicalXor": - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |a| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. - 1. Let |outputShape| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. - 1. Otherwise: - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |a| and |b| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |a| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |b| is passed: + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |b| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |a|'s [=MLOperand/dataType=] is not equal to |b|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |a|'s [=MLOperand/shape=] and |b|'s [=MLOperand/shape=]. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Otherwise: + 1. Let |outputShape| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. + 1. If |op| is one of "logicalNot", "logicalAnd", "logicalOr", "logicalXor": + 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given {{MLOperandDataType/"uint8"}} and |outputShape|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. - 1. Let |operator| be an [=operator=] for the |op| operation, given |a| and (if |op| is not "logicalNot") |b|, and |options|. + 1. Let |operator| be an [=operator=] for the |op| operation, given |a| and (if |b| is passed) |b|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/inputs=] to |a| and (if |op| is anything other than "logicalNot") |b|. + 1. Set |operator|'s [=operator/inputs=] to |a| and (if |b| is passed) |b|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|. @@ -3753,11 +3754,14 @@ partial dictionary MLOpSupportLimits { 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] mismatches |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. - 1. If |scale|'s [=MLOperand/shape=] mismatches |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/shape=] is not equal to |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. 1. If [=blockwise broadcasting=] |zeroPoints|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. - 1. Let |outputDescriptor| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/shape=]. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |zeroPoints|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. Let |outputDescriptor| be the result of [=creating an MLOperandDescriptor=] given |scale|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/shape=]. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |outputDescriptor|. 1. Let |operator| be an [=operator=] for the "dequantizeLinear" operation, given |input|, |scale|, |zeroPoint|, and |options|. @@ -3777,12 +3781,12 @@ partial dictionary MLOpSupportLimits { // output = (input - zeroPoint) * scale const floatInput = builder.cast(input, scale.dataType); const floatZeroPoint = builder.cast(zeroPoint, scale.dataType); - const upsampledScale = blockwiseBroadcast(builder, scale, input.shape); - const upsampledZeroPoint = blockwiseBroadcast(builder, floatZeroPoint, input.shape); + const upsampledScale = blockwiseExpand(builder, scale, input.shape); + const upsampledZeroPoint = blockwiseExpand(builder, floatZeroPoint, input.shape); return builder.mul(builder.sub(floatInput, upsampledZeroPoint), upsampledScale); } - function blockwiseBroadcast(builder, input, targetShape) { + function blockwiseExpand(builder, input, targetShape) { // This expands each axis by repeating the block the number of times per that axis, given the // original input shape and target shape. However, backend implementations might have much more // efficient upsampling operators that can accept multiple dimensions to upsample all @@ -3821,8 +3825,8 @@ partial dictionary MLOpSupportLimits { // e.g. inputShape = [4] with axis = 0 yields shape [1,4,1]. function getFlattenedShapeAroundAxis(inputShape, axis) { axis = Math.max(Math.min(axis, input.shape.length - 1), 0); - const countBefore = axis.slice(0, axis).reduce((a, b) => a * b); - const countAfter = axis.slice(axis + 1, input.shape.length).reduce((a, b) => a * b); + const countBefore = axis.slice(0, axis).reduce((a, b) => a * b, 1); + const countAfter = axis.slice(axis + 1, input.shape.length).reduce((a, b) => a * b, 1); return [countBefore, inputShape[axis], countAfter]; } @@ -3831,7 +3835,7 @@ partial dictionary MLOpSupportLimits { ### quantizeLinear ### {#api-mlgraphbuilder-quantizelinear} -Quantizes a floating point tensor to integer tensor using the scale and zero-point bias, where `output = clamp(roundToNearestEvens(input / scale) + zeroPoint, 0, 255)`. The *scale* and *zeroPoint* tensors can be smaller than the *input* tensor as they are [=blockwise broadcast=]. +Quantizes a floating point tensor to integer tensor using the scale and zero-point bias, where `output = clamp(roundEven(input / scale) + zeroPoint, 0, 255)`. The *scale* and *zeroPoint* tensors can be smaller than the *input* tensor as they are [=blockwise broadcast=]. -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
-
Constraints for element-wise logical options
- +
Constraints for {{MLGraphBuilder/gatherND()}}
+ @@ -4406,19 +4479,19 @@ partial dictionary MLOpSupportLimits {
Constraints for {{MLGraphBuilder/gatherNd()}}
operand
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherND()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherNd()}}:
- : gatherND - :: Support limits for operator {{MLGraphBuilder/gatherND()}}. + : gatherNd + :: Support limits for operator {{MLGraphBuilder/gatherNd()}}.
- The {{MLGraphBuilder/gatherND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/gatherNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The gatherND(|input|, |indices|, |options|) method steps are: + The gatherNd(|input|, |indices|, |options|) method steps are: TODO:
@@ -4426,7 +4499,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how gatherND works in different slicing schemes. + Examples of how gatherNd works in different slicing schemes.
     TODO:
@@ -8086,8 +8159,8 @@ partial dictionary MLOpSupportLimits {
 
 
**Arguments:** - - input: an {{MLOperand}}. The input N-D tensor from which the values are scattered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter over. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - input: an {{MLOperand}}. The input N-D tensor from to initialize the output with. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter over. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - updates: an {{MLOperand}}. New values to replace atop the input. - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. @@ -8151,7 +8224,103 @@ partial dictionary MLOpSupportLimits { The scatterElements(|input|, |indices|, |updates|, |options|) method steps are: - TODO: +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    // input of shape [2,2,2]:
+    //   [[[  0,   1],
+    //     [ 10,  11]],
+    //    [[100, 101],
+    //     [110, 111]],
+    //    [[200, 201],
+    //     [210, 211]],
+    //    [[300, 301],
+    //     [310, 311]],]
+    const input = builder.constant(
+      {shape: [2, 2, 2]},
+      new Float32Array([0, 1, 10, 11, 100, 101, 110, 111, 200, 201, 210, 211, 300, 301, 310, 311]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2, 3]},
+      new Uint32Array([3, 1, 2, 2, 0, 1]));
+
+    const updates1 = builder.constant(
+      {dataType: 'float32', shape: [2, 3]},
+      new Uint32Array([-1, -2, -3, -4, -5, -6]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [4, 1]},
+      new Uint32Array([2, 1, 0, 2]));
+
+    const updates2 =
+      builder.constant({dataType: 'float32', shape: [4, 1]},
+      new Uint32Array([-1, -2, -3, -4]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [1, 2, 2]},
+      new Uint32Array([0, 1, 1, 2]));
+
+    const updates3 =
+      builder.constant({dataType: 'float32', shape: [1, 2, 2]},
+      new Uint32Array([-1, -2, -3, -4]));
+
+    // axis = 0 (default)
+    // indices of shape [2,3]:
+    //   [[3, 1, 1],
+    //    [2, 0, 3]]
+    // updates of shape [2,3]:
+    //   [[-1, -2, -3],
+    //    [-4, -5, -6]]
+    // output of shape [2,3]:
+    //   [[ 0, -5,  2],
+    //    [10, -2, -3],
+    //    [-4, 21, 22],
+    //    [-1, 31, -6]]
+    const output1 = builder.scatterElements(input, indices1, updates1);
+
+    // axis = 1
+    // indices of shape [4,1]:
+    //   [[2],
+    //    [1],
+    //    [0],
+    //    [2]],
+    // updates of shape [4,1]:
+    //   [[-1],
+    //    [-2],
+    //    [-3],
+    //    [-4]],
+    // output of shape [4,3]:
+    //   [[ 0,  1, -1],
+    //    [10, -2, 12],
+    //    [-3, 21, 22],
+    //    [30, 31, -4]]
+    const output2 = builder.scatterElements(input, indices2, updates2, {axis: 1});
+
+    // axis = 2
+    // indices of shape [1,2,2]:
+    //   [[[0, 2],
+    //     [1, 3]]],
+    // updates of shape [1,2,2]:
+    //   [[[-1, -2],
+    //     [-3, -4]]],
+    // output of shape [4,3]:
+    //   [[[ -1,   1],
+    //     [ 10,  11]],
+    //    [[100, 101],
+    //     [ -3, 111]],
+    //    [[200,  -2],
+    //     [210, 211]],
+    //    [[300, 301],
+    //     [310,  -4]],]
+    const output3 = builder.scatterElements(input, indices3, updates3, {axis: 2});
+  
@@ -8166,34 +8335,34 @@ partial dictionary MLOpSupportLimits {
-### scatterND ### {#api-mlgraphbuilder-scatternd} +### scatterNd ### {#api-mlgraphbuilder-scatternd} Scatter slices of values from the update tensor atop the input tensor according to the indices. -
+
**Arguments:** - - input: an {{MLOperand}}. The input N-D tensor from which the values are scattered. - - indices: an {{MLOperand}}. TODO: Elaborate on indices coordinate order. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - input: an {{MLOperand}}. The input N-D tensor from to initialize the output with. + - indices: an {{MLOperand}}. TODO: Elaborate on indices coordinate order. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - updates: an {{MLOperand}}. New values to replace atop the input. - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
- - +
Constraints for {{MLGraphBuilder/scatterND()}}
+ @@ -8223,19 +8392,19 @@ partial dictionary MLOpSupportLimits {
Constraints for {{MLGraphBuilder/scatterNd()}}
operand
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterND()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterNd()}}:
- : scatterND - :: Support limits for operator {{MLGraphBuilder/scatterND()}}. + : scatterNd + :: Support limits for operator {{MLGraphBuilder/scatterNd()}}.
- The {{MLGraphBuilder/scatterND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The scatterND(|input|, |indices|, |updates|, |options|) method steps are: + The scatterNd(|input|, |indices|, |updates|, |options|) method steps are: TODO:
@@ -8243,7 +8412,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how scatterND works in different slicing schemes. + Examples of how scatterNd works in different slicing schemes.
     TODO:
@@ -9384,14 +9553,12 @@ To blockwise broadcast the shapes |s
 
 1. Let |sizeFrom| be |shapeFrom|'s [=list/size=].
 1. Let |sizeTo| be |shapeTo|'s [=list/size=].
-1. If |sizeFrom| != |sizeTo|, then return failure.
-1. Let |outputShape| be a new [=/list=].
+1. If |sizeFrom| is not equal to |sizeTo|, then return failure.
 1. [=list/For each=] |index| in [=the range=] 0 to |sizeTo|, exclusive:
     1. Let |dimFrom| be |shapeFrom|[|index|].
     1. Let |dimTo| be |shapeTo|[|index|].
-    1. If |dimFrom| is not an exactly divisible into |dimTo|, then return failure.
-    1. [=list/Append=] |dimTo| to |outputShape|.
-1. Return |outputShape|.
+    1. If |dimFrom| is not exactly divisible into |dimTo|, then return failure.
+1. Return |shapeTo|.
 
 
@@ -9599,8 +9766,8 @@ Operations present in other neural network inference APIs can often be emulated function flatten(builder, input, axis) { if (axis > input.shape.length) return input; - const before = axis.slice(0, axis).reduce((a, b) => a * b); - const after = axis.slice(axis, input.shape.length).reduce((a, b) => a * b); + const before = axis.slice(0, axis).reduce((a, b) => a * b, 1); + const after = axis.slice(axis, input.shape.length).reduce((a, b) => a * b, 1); return builder.reshape(input, [before, after]); }
From 9d11f09c805abfd14707c027ddb44243952b257d Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Fri, 14 Feb 2025 22:06:55 -0800 Subject: [PATCH 13/39] Fix more bikeshed errors --- index.bs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/index.bs b/index.bs index 156092d8..76a357fa 100644 --- a/index.bs +++ b/index.bs @@ -8399,7 +8399,7 @@ partial dictionary MLOpSupportLimits {
- The {{MLGraphBuilder/scatterNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterNd(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
@@ -9086,7 +9086,7 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The reversed N-D tensor.
- +
From 93de1fb4550bcee1632435a49883e881952ba0e9 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Tue, 18 Feb 2025 02:11:15 -0800 Subject: [PATCH 14/39] Add gather/scatterElements algorithm steps. Add gather/scatterNd examples. Fix bool bikeshed issue in cumulativeSum. --- index.bs | 417 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 313 insertions(+), 104 deletions(-) diff --git a/index.bs b/index.bs index 76a357fa..efe7d22d 100644 --- a/index.bs +++ b/index.bs @@ -2913,8 +2913,8 @@ Compute the accumulated sum of a series of values along the given axis, either i -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - indices: an {{MLOperand}}. The indices array contains entire coordinates into the input tensor, with the rightmost dimension holding the number of dimensions per coordinate. So an indices tensor of shape [10,1] holds 10 single-axis indices, and a shape of [4,3] holds 4 indices of 3D coordinates. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and each must be in the range -N (inclusive) to N (exclusive) where N is the size of the corresponding input dimension, and a negative index means indexing from the end of the corresponding dimension. @@ -4475,8 +4475,8 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
-
Constraints for {{MLGraphBuilder/tile()}}
- +
Constraints for {{MLGraphBuilder/gatherNd()}}
+ @@ -4501,19 +4501,19 @@ partial dictionary MLOpSupportLimits {
Constraints for {{MLGraphBuilder/gatherND()}}
operand
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherNd()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherND()}}:
- : gatherNd - :: Support limits for operator {{MLGraphBuilder/gatherNd()}}. + : gatherND + :: Support limits for operator {{MLGraphBuilder/gatherND()}}.
- The {{MLGraphBuilder/gatherNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/gatherND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The gatherNd(|input|, |indices|, |options|) method steps are: + The gatherND(|input|, |indices|, |options|) method steps are: 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. @@ -4532,7 +4532,7 @@ partial dictionary MLOpSupportLimits { 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |outputDesc|. - 1. Let |operator| be an [=operator=] for the "gatherNd" operation, given |input|, |indices|, and |options|. + 1. Let |operator| be an [=operator=] for the "gatherND" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. @@ -4542,7 +4542,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how gatherNd works in different slicing schemes. + Examples of how gatherND works in different slicing schemes.
     // input of shape [2,2]:
@@ -4563,7 +4563,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [3, 2]},
       new Uint32Array([0, 0, 1, 1, 1, 0]));
 
-    const output1 = builder.gatherNd(input1, indices1);
+    const output1 = builder.gatherND(input1, indices1);
 
     // input of shape [2,2]:
     //   [[0, 1],
@@ -4579,7 +4579,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [2, 1]},
       new Uint32Array([1, 0]));
 
-    const output2 = builder.gatherNd(input1, indices2);
+    const output2 = builder.gatherND(input1, indices2);
 
     // input of shape [2,2,2]:
     //   [[[0, 1],
@@ -4601,7 +4601,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [2, 2]},
       new Uint32Array([0, 1, 1, 0]));
 
-    const output3 = builder.gatherNd(input2, indices3);
+    const output3 = builder.gatherND(input2, indices3);
 
     // input of shape [2,2,2]:
     //   [[[0, 1],
@@ -4624,7 +4624,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [3, 1]},
       new Uint32Array([1, 0, 1]));
 
-    const output4 = builder.gatherNd(input2, indices4);
+    const output4 = builder.gatherND(input2, indices4);
 
     // input of shape [2,2,2]:
     //   [[[0, 1],
@@ -4644,7 +4644,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [5, 3]},
       new Uint32Array([0,0,1, 0,1,0, 1,0,0, 1,1,0, 1,1,1]));
 
-    const output5 = builder.gatherNd(input2, indices5);
+    const output5 = builder.gatherND(input2, indices5);
   
@@ -8463,23 +8463,23 @@ partial dictionary MLOpSupportLimits {
-### scatterNd ### {#api-mlgraphbuilder-scatternd} +### scatterND ### {#api-mlgraphbuilder-scatternd} Scatter slices of values from the update tensor atop a copy of the input tensor according to the indices. -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from to initialize the output with. - indices: an {{MLOperand}}. The indices array contains entire coordinates into the output tensor, with the rightmost dimension holding the number of dimensions per coordinate. So an indices tensor of shape [10,1] holds 10 single-axis indices, and a shape of [4,3] holds 4 indices of 3D coordinates. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and each must be in the range -N (inclusive) to N (exclusive) where N is the size of the corresponding output dimension, and a negative index means indexing from the end of the corresponding dimension. @@ -8489,8 +8489,8 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
- - +
Constraints for {{MLGraphBuilder/scatterNd()}}
+ @@ -8520,19 +8520,19 @@ partial dictionary MLOpSupportLimits {
Constraints for {{MLGraphBuilder/scatterND()}}
operand
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterNd()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterND()}}:
- : scatterNd - :: Support limits for operator {{MLGraphBuilder/scatterNd()}}. + : scatterND + :: Support limits for operator {{MLGraphBuilder/scatterND()}}.
- The {{MLGraphBuilder/scatterNd(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterND(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The scatterNd(|input|, |indices|, |updates|, |options|) method steps are: + The scatterND(|input|, |indices|, |updates|, |options|) method steps are: 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |indices|, and |updates| returns false, then [=exception/throw=] a {{TypeError}}. @@ -8554,7 +8554,7 @@ partial dictionary MLOpSupportLimits { 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |outputDesc|. - 1. Let |operator| be an [=operator=] for the "scatterNd" operation, given |input|, |indices|, |updates|, and |options|. + 1. Let |operator| be an [=operator=] for the "scatterND" operation, given |input|, |indices|, |updates|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/inputs=] to |input|, |indices|, and |updates|. 1. Set |operator|'s [=operator/output=] to |output|. @@ -8564,7 +8564,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how scatterNd works in different slicing schemes. + Examples of how scatterND works in different slicing schemes.
     // input of shape [8]:
@@ -8591,7 +8591,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [4]},
       new Uint32Array([-1, -2, -3, -4]));
 
-    const output1 = builder.scatterNd(input1, indices1, updates1);
+    const output1 = builder.scatterND(input1, indices1, updates1);
 
     // input of shape [2,2]:
     //   [[0, 1],
@@ -8617,7 +8617,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [2]},
       new Uint32Array([-1, -2]));
 
-    const output2 = builder.scatterNd(input2, indices2, updates2);
+    const output2 = builder.scatterND(input2, indices2, updates2);
 
     // input of shape [3,2]:
     //   [[0, 1],
@@ -8646,7 +8646,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [2, 2]},
       new Uint32Array([-1, -2, -3, 4]));
 
-    const output3 = builder.scatterNd(input3, indices3, updates3);
+    const output3 = builder.scatterND(input3, indices3, updates3);
 
     // input of shape [2,2,2]:
     //   [[[0, 1],
@@ -8677,7 +8677,7 @@ partial dictionary MLOpSupportLimits {
       {dataType: 'uint32', shape: [2, 2]},
       new Uint32Array([-1, -2, -3, 4]));
 
-    const output4 = builder.scatterNd(input4, indices4, updates4);
+    const output4 = builder.scatterND(input4, indices4, updates4);
   
From 5027d85d51398fa4608219f058e199c4a2f4007b Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 01:25:22 -0800 Subject: [PATCH 22/39] Update slice algorithm steps for strides --- index.bs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/index.bs b/index.bs index ed249220..892b70e1 100644 --- a/index.bs +++ b/index.bs @@ -8838,19 +8838,27 @@ partial dictionary MLOpSupportLimits { 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. 1. If any of |sizes|'s [=list/items=] are 0, then [=exception/throw=] a {{TypeError}}. 1. If |starts|'s [=list/size=] and |sizes|'s [=list/size=] are not both equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. Let |strides| be a new [=/list=]. 1. If |options|.{{MLSliceOptions/strides}} [=map/exists=]: - 1. If |options|.{{MLSliceOptions/strides}}'s [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. - 1. [=list/For each=] |index| in [=the range=] 0 to |input|'s [=MLOperand/rank=], exclusive: + 1. Set |strides| to |options|.{{MLSliceOptions/strides}}. + 1. If |strides|'s [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. Let |inputShape| be |input|'s [=MLOperand/shape=] and |inputRank| be |input|'s [=MLOperand/rank=]. + 1. Let |outputShape| be a new [=/list=]. + 1. [=list/For each=] |index| in [=the range=] 0 to |inputRank|, exclusive: 1. If |sizes|[|index|] is 0, then [=exception/throw=] a {{TypeError}}. Issue(391): If 0-size dimensions are allowed, revise these steps. - 1. If |starts|[|index|] is greater than or equal to |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}. - 1. If |starts|[|index|] + |sizes|[|index|] is greater than |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}. - 1. If |options|.{{MLSliceOptions/strides}} [=map/exists=]: - 1. If |options|.{{MLSliceOptions/strides}}[|index|] is less than 1, then [=exception/throw=] a {{TypeError}}. + 1. If |starts|[|index|] is greater than |inputShape|[|index|], then [=exception/throw=] a {{TypeError}}. + 1. If |starts|[|index|] + |sizes|[|index|] is greater than |inputShape|[|index|], then [=exception/throw=] a {{TypeError}}. + 1. Let |stride| be |strides|[|index|] if it is not empty, or 1 otherwise: + 1. If |stride| is less than 1, then [=exception/throw=] a {{TypeError}}. + 1. Let |outputSizeRoundingExcess| be 1 if |inputShape|[|index|] % |stride| != 0, or 0 otherwise. + 1. Let |outputSize| be |inputShape|[|index|] / |stride| + |outputSizeRoundingExcess| + 1. [=list/Append=] |outputSize| to |outputShape|. + 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |outputShape|. 1. *Make graph connections:* - 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |output| be the result of [=creating an MLOperand=] given |outputDesc|. 1. Let |operator| be an [=operator=] for the "slice" operation, given |starts|, |sizes|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/input=] to |input|. From c6792dfef62cd627bd99902899de6b16a71ad8aa Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 01:34:45 -0800 Subject: [PATCH 23/39] Fix slice algorithm slice size --- index.bs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/index.bs b/index.bs index 892b70e1..28208c4b 100644 --- a/index.bs +++ b/index.bs @@ -8845,16 +8845,18 @@ partial dictionary MLOpSupportLimits { 1. Let |inputShape| be |input|'s [=MLOperand/shape=] and |inputRank| be |input|'s [=MLOperand/rank=]. 1. Let |outputShape| be a new [=/list=]. 1. [=list/For each=] |index| in [=the range=] 0 to |inputRank|, exclusive: - 1. If |sizes|[|index|] is 0, then [=exception/throw=] a {{TypeError}}. + 1. Let |inputSize| be |inputShape|[|index|]. + 1. Let |inputSliceSize| be |sizes|[|index|]. + 1. Let |stride| be |strides|[|index|] if it is not empty, or 1 otherwise: + 1. If |inputSliceSize| is 0, then [=exception/throw=] a {{TypeError}}. Issue(391): If 0-size dimensions are allowed, revise these steps. - 1. If |starts|[|index|] is greater than |inputShape|[|index|], then [=exception/throw=] a {{TypeError}}. - 1. If |starts|[|index|] + |sizes|[|index|] is greater than |inputShape|[|index|], then [=exception/throw=] a {{TypeError}}. - 1. Let |stride| be |strides|[|index|] if it is not empty, or 1 otherwise: 1. If |stride| is less than 1, then [=exception/throw=] a {{TypeError}}. - 1. Let |outputSizeRoundingExcess| be 1 if |inputShape|[|index|] % |stride| != 0, or 0 otherwise. - 1. Let |outputSize| be |inputShape|[|index|] / |stride| + |outputSizeRoundingExcess| + 1. If |starts|[|index|] is greater than |inputSize|, then [=exception/throw=] a {{TypeError}}. + 1. If |starts|[|index|] + |inputSliceSize| is greater than |inputSize|, then [=exception/throw=] a {{TypeError}}. + 1. Let |outputSizeRoundingExcess| be 1 if |inputSliceSize| % |stride| != 0, or 0 otherwise. + 1. Let |outputSize| be |inputSliceSize| / |stride| + |outputSizeRoundingExcess| 1. [=list/Append=] |outputSize| to |outputShape|. 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |outputShape|. 1. *Make graph connections:* From 095027ca7bc10da9e7409ad71e820abe9ba89b34 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 01:45:20 -0800 Subject: [PATCH 24/39] Update blockwise broadcasting to return true/false --- index.bs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/index.bs b/index.bs index 28208c4b..50fb788f 100644 --- a/index.bs +++ b/index.bs @@ -3902,8 +3902,8 @@ partial dictionary MLOpSupportLimits { 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/shape=] is not equal to |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. - 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. - 1. If [=blockwise broadcasting=] |zeroPoints|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |zeroPoints|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. If |zeroPoints|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. @@ -9764,7 +9764,7 @@ The shapes of the input tensors must be compatible. A tensor is [=unidirectional Two tensors are [=bidirectionally broadcastable=] if they can be mutually "stretched" (repeated) across their various dimensions, starting from the last dimension. For example, a *[5,1]* tensor can be bidirectionally broadcast with a *[1,6]* tensor by repeating the first tensor 6 times in the last dimension and the second tensor 5 times in preceding dimension. The result of the operation will be a *[5,6]* tensor. Bidirectional broadcasting is convenient for element-wise operations. -A tensor is [=blockwise broadcastable=] if the all dimensions can be upsampled by integer multiples to the target tensor's shape. For example, a *[4,5]* tensor can be blockwise broadcast up to a *[16,10]* tensor as it is an exact multiple (16 % 4 = 0, 10 % 5 = 0) by repeating every element 4 times in the first dimension and every element 2 times in the last dimension (e.g. values *[1,2,3,4,5]* in a single slice would be repeated to *[1,1,2,2,3,3,4,4,5,5]*). However, a *[4,5]* tensor would be incompatible with a *[9,3]* tensor since both dimensions have a nonzero remainder (9 % 4 = 1, 3 % 5 = 3). Blockwise broadcasting is useful for sharing common values in larger blocks to save memory. Both tensors are expected to have the same rank, and the output shape is simply the target tensor's shape which the smaller one is being upsampled to. +A tensor is [=blockwise broadcastable=] if the all dimensions can be upsampled by integer multiples to the target tensor's shape. For example, a *[4,5]* tensor can be blockwise broadcast up to a *[16,10]* tensor as it is an exact multiple (16 % 4 = 0, 10 % 5 = 0) by repeating every element 4 times in the first dimension and every element 2 times in the last dimension (e.g. values *[1,2,3,4,5]* in the last dimensions would be repeated to *[1,1,2,2,3,3,4,4,5,5]*). However, a *[4,5]* tensor would be incompatible with a *[9,3]* tensor since both dimensions have a nonzero remainder (9 % 4 = 1, 3 % 5 = 3). Blockwise broadcasting is useful for sharing common values in larger blocks to save memory. Both tensors are expected to have the same rank, and the output shape is simply the target tensor's shape which the smaller one is being upsampled to. Some operations allow broadcasting with special semantics. For example, {{MLGraphBuilder/matmul()}} treats the last two dimensions of the input tensors as the rows and columns of the matrices, and the number of columns in the first matrix must be equal to the number of rows in the second matrix. The matrix multiplication is bidirectionally broadcast across any additional dimensions, treating the input tensors as stacks of matrices to multiply. @@ -9820,18 +9820,18 @@ To bidirectionally broadcast the sha
-To blockwise broadcast the shapes |shapeFrom| and |shapeTo|, perform the following steps. |shapeFrom| and |shapeTo| are [=/lists=] of positive integers, representing the dimensions of tensors, and the steps return a new [=/list=] of positive integers, or failure. +To blockwise broadcast the shapes |shapeFrom| and |shapeTo|, perform the following steps. |shapeFrom| and |shapeTo| are [=/lists=] of positive integers, representing the dimensions of tensors, and the steps return true or false. -1. If |shapeFrom|'s [=list/size=] is not equal to |shapeTo|'s [=list/size=], then return failure. +1. If |shapeFrom|'s [=list/size=] is not equal to |shapeTo|'s [=list/size=], then return false. 1. [=list/For each=] |index| in [=the range=] 0 to |shapeTo|'s [=list/size=], exclusive: - 1. If |shapeFrom|[|index|] is not exactly divisible into |shapeTo|[|index|], then return failure. -1. Return |shapeTo|. + 1. If |shapeFrom|[|index|] is not exactly divisible into |shapeTo|[|index|], then return false. +1. Return true.

-|shapeFrom| is blockwise broadcastable to |shapeTo| if [=blockwise broadcasting=] |shapeFrom| and |shapeTo| does not result in failure. +|shapeFrom| is blockwise broadcastable to |shapeTo| if [=blockwise broadcasting=] |shapeFrom| and |shapeTo| returns true.

## Casting ## {#algorithms-casting} From c556eb6ed3ebb5eab39d941afb91096a727ea4a7 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 11:09:24 -0800 Subject: [PATCH 25/39] Fix bikeshed naming for rankInput vs inputRank etc --- index.bs | 78 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/index.bs b/index.bs index 50fb788f..efb31be2 100644 --- a/index.bs +++ b/index.bs @@ -4196,29 +4196,29 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. - 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |input|'s [=MLOperand/rank=]. - 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |inputShape| be |input|'s [=MLOperand/shape=] and |inputRank| be |input|'s [=MLOperand/rank=]. + 1. Let |indicesShape| be |indices|'s [=MLOperand/shape=]. 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. - 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. If |axis| is greater than or equal to |inputRank|, then [=exception/throw=] a {{TypeError}}. 1. Let |dimCount| be zero. - 1. Let |rankOutput| be zero. - 1. Let |shapeOutput| be an empty list. - 1. [=list/For each=] |size| of |shapeInput|: + 1. Let |outputRank| be zero. + 1. Let |outputShape| be an empty list. + 1. [=list/For each=] |size| of |inputShape|: 1. If |dimCount| is equal to |axis| then [=iteration/break=]. - 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Set |outputShape|[|dimCount|] to |size|. 1. Increment |dimCount| by one. - 1. Set |rankOutput| to |dimCount|. + 1. Set |outputRank| to |dimCount|. 1. Let |dimCount| be zero. - 1. [=list/For each=] |size| of |shapeIndices|: - 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. [=list/For each=] |size| of |indicesShape|: + 1. Set |outputShape|[|outputRank| + |dimCount|] to |size|. 1. Increment |dimCount| by one. - 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Set |outputRank| to |outputRank| + |dimCount|. 1. Let |dimCount| be zero. - 1. [=list/For each=] |size| of |shapeInput|: + 1. [=list/For each=] |size| of |inputShape|: 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. - 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Set |outputShape|[|outputRank| + |dimCount| - |axis| - 1] to |size|. 1. Increment |dimCount| by one. - 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |outputShape|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. @@ -4356,9 +4356,9 @@ partial dictionary MLOpSupportLimits { 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. - 1. Let |expectedShapeIndices| be a copy of |input|'s [=MLOperand/shape=]. - 1. Set |expectedShapeIndices|[|axis|] to 1. - 1. If |indices|'s [=MLOperand/shape=] is not equal to |expectedShapeIndices| then [=exception/throw=] a {{TypeError}}. + 1. Let |indicesShapeExpected| be a copy of |input|'s [=MLOperand/shape=]. + 1. Set |indicesShapeExpected|[|axis|] to 1. + 1. If |indices|'s [=MLOperand/shape=] is not equal to |indicesShapeExpected| then [=exception/throw=] a {{TypeError}}. 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/dataType=]. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. @@ -4518,18 +4518,18 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s or |indices|'s [=MLOperand/dataType=]'s are not one of their [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. - 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |input|'s [=MLOperand/rank=]. - 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=] and |rankIndices| be |indices|'s [=MLOperand/rank=]. - 1. If |rankInput| or |rankIndices| is less than 1, then [=exception/throw=] a {{TypeError}}. - 1. Let |indexableSize| be |rankIndices| - 1. - 1. Let |coordinateSize| be |shapeIndices|[|indexableSize|]. + 1. Let |inputShape| be |input|'s [=MLOperand/shape=] and |inputRank| be |input|'s [=MLOperand/rank=]. + 1. Let |indicesShape| be |indices|'s [=MLOperand/shape=] and |indicesRank| be |indices|'s [=MLOperand/rank=]. + 1. If |inputRank| or |indicesRank| is less than 1, then [=exception/throw=] a {{TypeError}}. + 1. Let |indexableSize| be |indicesRank| - 1. + 1. Let |coordinateSize| be |indicesShape|[|indexableSize|]. 1. If |coordinateSize| is greater than |inputRank|, then [=exception/throw=] a {{TypeError}}. - 1. Let |shapeOutput| be an empty list. + 1. Let |outputShape| be an empty list. 1. [=list/For each=] |index| in [=the range=] 0 to |indexableSize|, exclusive: - 1. [=list/Append=] |shapeIndices|[|index|] to |shapeOutput|. - 1. [=list/For each=] |index| in [=the range=] |coordinateSize| to |rankInput|, exclusive: - 1. [=list/Append=] |shapeInput|[|index|] to |shapeOutput|. - 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. [=list/Append=] |indicesShape|[|index|] to |outputShape|. + 1. [=list/For each=] |index| in [=the range=] |coordinateSize| to |inputRank|, exclusive: + 1. [=list/Append=] |inputShape|[|index|] to |outputShape|. + 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |outputShape|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |outputDesc|. 1. Let |operator| be an [=operator=] for the "gatherND" operation, given |input|, |indices|, and |options|. @@ -8337,9 +8337,9 @@ partial dictionary MLOpSupportLimits { 1. If |updates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. - 1. Let |expectedShapeIndices| be a copy of |input|'s [=MLOperand/shape=]. - 1. Set |expectedShapeIndices|[|axis|] to 1. - 1. If |indices|'s [=MLOperand/shape=] is not equal to |expectedShapeIndices|, then [=exception/throw=] a {{TypeError}}. + 1. Let |indicesShapeExpected| be a copy of |input|'s [=MLOperand/shape=]. + 1. Set |indicesShapeExpected|[|axis|] to 1. + 1. If |indices|'s [=MLOperand/shape=] is not equal to |indicesShapeExpected|, then [=exception/throw=] a {{TypeError}}. 1. If |updates|'s [=MLOperand/shape=] is not equal to |indices|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/dataType=]. 1. *Make graph connections:* @@ -8538,20 +8538,20 @@ partial dictionary MLOpSupportLimits { 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |indices|, and |updates| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s or |indices|'s [=MLOperand/dataType=]'s are not one of their [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. 1. If |udpates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. - 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |input|'s [=MLOperand/rank=]. - 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=] and |rankIndices| be |indices|'s [=MLOperand/rank=]. - 1. If |rankInput| or |rankIndices| is less than 1, then [=exception/throw=] a {{TypeError}}. - 1. Let |indexableSize| be |rankIndices| - 1. - 1. Let |coordinateSize| be |shapeIndices|[|indexableSize|]. + 1. Let |inputShape| be |input|'s [=MLOperand/shape=] and |inputRank| be |input|'s [=MLOperand/rank=]. + 1. Let |indicesShape| be |indices|'s [=MLOperand/shape=] and |indicesRank| be |indices|'s [=MLOperand/rank=]. + 1. If |inputRank| or |indicesRank| is less than 1, then [=exception/throw=] a {{TypeError}}. + 1. Let |indexableSize| be |indicesRank| - 1. + 1. Let |coordinateSize| be |indicesShape|[|indexableSize|]. 1. If |coordinateSize| is greater than |inputRank|, then [=exception/throw=] a {{TypeError}}. 1. Let |shapeUpdates| be an empty list. 1. [=list/For each=] |index| in [=the range=] 0 to |indexableSize|, exclusive: - 1. [=list/Append=] |shapeIndices|[|index|] to |shapeUpdates|. - 1. [=list/For each=] |index| in [=the range=] |coordinateSize| to |rankInput|, exclusive: - 1. [=list/Append=] |shapeInput|[|index|] to |shapeUpdates|. + 1. [=list/Append=] |indicesShape|[|index|] to |shapeUpdates|. + 1. [=list/For each=] |index| in [=the range=] |coordinateSize| to |inputRank|, exclusive: + 1. [=list/Append=] |inputShape|[|index|] to |shapeUpdates|. 1. Let |updates|'s [=MLOperand/shape=] is not equal to |shapeUpdates|, then [=exception/throw=] a {{TypeError}}. 1. Let |outputShape| be a copy of |input|'s [=MLOperand/shape=]. - 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |outputShape|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |outputDesc|. 1. Let |operator| be an [=operator=] for the "scatterND" operation, given |input|, |indices|, |updates|, and |options|. From 99be3914cb63b2590bac080a31535c281ec9cd04 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 11:11:15 -0800 Subject: [PATCH 26/39] Apply Joshua's helpful suggestions. Co-authored-by: Joshua Bell --- index.bs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/index.bs b/index.bs index 50fb788f..d582379c 100644 --- a/index.bs +++ b/index.bs @@ -4344,7 +4344,7 @@ partial dictionary MLOpSupportLimits {
- The {{MLGraphBuilder/gatherElements(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/gatherElements(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the specified clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
@@ -4508,7 +4508,7 @@ partial dictionary MLOpSupportLimits {
- The {{MLGraphBuilder/gatherND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/gatherND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the specified clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
@@ -8324,7 +8324,7 @@ partial dictionary MLOpSupportLimits {
- The {{MLGraphBuilder/scatterElements(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterElements(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the specified clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
@@ -8527,7 +8527,7 @@ partial dictionary MLOpSupportLimits {
- The {{MLGraphBuilder/scatterND(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterND(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the specified clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
@@ -8537,7 +8537,7 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |indices|, and |updates| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s or |indices|'s [=MLOperand/dataType=]'s are not one of their [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. - 1. If |udpates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. If |updates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |input|'s [=MLOperand/rank=]. 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=] and |rankIndices| be |indices|'s [=MLOperand/rank=]. 1. If |rankInput| or |rankIndices| is less than 1, then [=exception/throw=] a {{TypeError}}. @@ -8551,7 +8551,7 @@ partial dictionary MLOpSupportLimits { 1. [=list/Append=] |shapeInput|[|index|] to |shapeUpdates|. 1. Let |updates|'s [=MLOperand/shape=] is not equal to |shapeUpdates|, then [=exception/throw=] a {{TypeError}}. 1. Let |outputShape| be a copy of |input|'s [=MLOperand/shape=]. - 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |outputShape|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |outputDesc|. 1. Let |operator| be an [=operator=] for the "scatterND" operation, given |input|, |indices|, |updates|, and |options|. From d0764ac11cc0e632da7c2e9340fb54358fdd0bd7 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 11:15:15 -0800 Subject: [PATCH 27/39] Refix udpates after merge conflict --- index.bs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/index.bs b/index.bs index 5e48197f..1e592699 100644 --- a/index.bs +++ b/index.bs @@ -8548,7 +8548,7 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |indices|, and |updates| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s or |indices|'s [=MLOperand/dataType=]'s are not one of their [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. - 1. If |udpates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. If |updates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. 1. Let |inputShape| be |input|'s [=MLOperand/shape=] and |inputRank| be |input|'s [=MLOperand/rank=]. 1. Let |indicesShape| be |indices|'s [=MLOperand/shape=] and |indicesRank| be |indices|'s [=MLOperand/rank=]. 1. If |inputRank| or |indicesRank| is less than 1, then [=exception/throw=] a {{TypeError}}. From 035133ca167d414f945c914bee0fd397c9250b6d Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 11:21:21 -0800 Subject: [PATCH 28/39] More of Joshua's suggestions Co-authored-by: Joshua Bell --- index.bs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/index.bs b/index.bs index 1e592699..66a2ce4c 100644 --- a/index.bs +++ b/index.bs @@ -3760,10 +3760,10 @@ partial dictionary MLOpSupportLimits { 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/shape=] is not equal to |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. - 1. If [=blockwise broadcasting=] |zeroPoints|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |zeroPoint|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. - 1. If |zeroPoints|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |zeroPoint|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. Let |outputDescriptor| be the result of [=creating an MLOperandDescriptor=] given |scale|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/shape=]. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |outputDescriptor|. @@ -3910,10 +3910,10 @@ partial dictionary MLOpSupportLimits { 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/shape=] is not equal to |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. - 1. If [=blockwise broadcasting=] |zeroPoints|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |zeroPoint|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. - 1. If |zeroPoints|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |zeroPoint|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. Let |outputDescriptor| be the result of [=creating an MLOperandDescriptor=] given |zeroPoint|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/shape=]. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |outputDescriptor|. @@ -8867,7 +8867,7 @@ partial dictionary MLOpSupportLimits { 1. If |starts|[|index|] is greater than |inputSize|, then [=exception/throw=] a {{TypeError}}. 1. If |starts|[|index|] + |inputSliceSize| is greater than |inputSize|, then [=exception/throw=] a {{TypeError}}. 1. Let |outputSizeRoundingExcess| be 1 if |inputSliceSize| % |stride| != 0, or 0 otherwise. - 1. Let |outputSize| be |inputSliceSize| / |stride| + |outputSizeRoundingExcess| + 1. Let |outputSize| be |inputSliceSize| / |stride| + |outputSizeRoundingExcess|: 1. [=list/Append=] |outputSize| to |outputShape|. 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |outputShape|. 1. *Make graph connections:* From 66ee789c11d29f30bb79a193a2bb9537f86a1d7d Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 11:23:20 -0800 Subject: [PATCH 29/39] Add reverse inputRank --- index.bs | 1 + 1 file changed, 1 insertion(+) diff --git a/index.bs b/index.bs index 1e592699..42927dd2 100644 --- a/index.bs +++ b/index.bs @@ -8230,6 +8230,7 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-reverse)), then [=exception/throw=] a {{TypeError}}. + 1. Let |inputRank| be |inputShape|'s [=list/size=]. 1. If |axes| is not given, let |axes| be [=the range=] 0 to |inputRank|, exclusive. 1. Otherwise, if |axes| contains duplicate values, or if any of its elements is not in [=the range=] 0 to |inputRank|, exclusive, then return failure. 1. *Make graph connections:* From 4c861bd74ec657b5f9732b02a525af834e14d19e Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 27 Feb 2025 11:27:19 -0800 Subject: [PATCH 30/39] =?UTF-8?q?More=20of=20Joshua's=20suggestions=20?= =?UTF-8?q?=F0=9F=91=8D=F0=9F=99=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Joshua Bell --- index.bs | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/index.bs b/index.bs index 66a2ce4c..877259aa 100644 --- a/index.bs +++ b/index.bs @@ -2914,10 +2914,9 @@ partial dictionary MLOpSupportLimits { Compute the accumulated sum of a series of values along the given axis, either including or excluding the current value. @@ -3711,7 +3711,7 @@ partial dictionary MLOpSupportLimits { {{input}} - {{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}} + {{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"int8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}} [=/any rank|N=] @@ -3757,11 +3757,11 @@ partial dictionary MLOpSupportLimits { 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/shape=] is not [=list/equal=] to |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. - 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. - 1. If [=blockwise broadcasting=] |zeroPoint|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. - 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. - 1. If |zeroPoint|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |zeroPoint|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |zeroPoint|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. Let |outputDescriptor| be the result of [=creating an MLOperandDescriptor=] given |scale|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/shape=]. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |outputDescriptor|. @@ -3787,38 +3787,40 @@ partial dictionary MLOpSupportLimits { return builder.mul(builder.sub(floatInput, upsampledZeroPoint), upsampledScale); } - function blockwiseExpand(builder, input, targetShape) { - // This expands each axis by repeating the block the number of times per that axis, given the - // original input shape and target shape. However, backend implementations might have much more - // efficient upsampling operators that can accept multiple dimensions to upsample all - // dimensions at once by integer multiples (like tile) using nearest neighbor resampling: + function blockwiseExpand(builder, input, outputShape) { + // Given the original input and a desired output shape, this expands each axis + // by repeating the block the number of times per that axis. Though, backend + // implementations might have much more efficient upsampling operators that + // can accept multiple dimensions to upsample all dimensions at once by + // integer multiples (like tile) using nearest neighbor resampling: // output = resample(scale, {sizes: input.shape}) - let expandedInput = input; + let output = input; for (let axis = 0; axis < input.shape.length; ++axis) { - const inputShape = expandedInput.shape; - const oldDimensionLength = inputShape[axis]; - const newDimensionLength = targetShape[axis]; + const oldShape = output.shape; + const oldDimensionLength = oldShape[axis]; + const newDimensionLength = outputShape[axis]; if (newDimensionLength != oldDimensionLength) { - // Since tile/expand can only accept repetitions of entire dimension slices (not repeating - // individual elements along an axis), temporarily reshape the tensor to enable them to broadcast - // the elements up to the full block size, utilizing an inserted dimension of size 1. + // Since tile/expand can only accept repetitions of entire dimension + // slices (not repeating individual elements along an axis), temporarily + // reshape the tensor to enable them to broadcast the elements up to the + // full block size, utilizing an inserted dimension of size 1. const elementRepeatCount = newDimensionLength / oldDimensionLength; - const flattenedShape = getFlattenedShapeAroundAxis(inputShape, axis); + const flattenedShape = getFlattenedShapeAroundAxis(oldShape, axis); const unexpandedShape = [flattenedShape[0], flattenedShape[1], 1, flattenedShape[2]]; const expandedShape = [flattenedShape[0], flattenedShape[1], elementRepeatCount, flattenedShape[2]]; - const reshapedInput = builder.reshape(expandedInput, unexpandedShape); - expandedInput = builder.expand(reshapedInput, expandedShape); - } + const reshapedInput = builder.reshape(output, unexpandedShape); + output = builder.expand(reshapedInput, expandedShape); - let newInputShape = [...inputShape]; - newInputShape[axis] = newDimensionLength; - expandedInput = builder.reshape(expandedInput, newInputShape); + let newShape = [...oldShape]; + newShape[axis] = newDimensionLength; + output = builder.reshape(output, newShape); + } } - return expandedInput; + return output; } // Compute the flattened shape before and after the given axis, yielding a 3-element list. @@ -3882,7 +3884,7 @@ partial dictionary MLOpSupportLimits { {{zeroPoint}} - {{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}} + {{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"int8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}} [=/same rank as|same as=] {{input}} @@ -4245,7 +4247,7 @@ partial dictionary MLOpSupportLimits { // [20, 21, 22], // [30, 31, 32]] const input = builder.constant( - {shape: [4, 3]}, + {dataType: 'float32', shape: [4, 3]}, new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32])); // axis = 0 (default) @@ -4333,12 +4335,12 @@ partial dictionary MLOpSupportLimits { {{indices}} {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}} - *input*'s [=MLOperand/rank=] + [=/same rank as|same as=] {{input}} *output* [=/same type as|same as=] {{input}} - *input*'s [=MLOperand/rank=] + [=/same rank as|same as=] {{input}} @@ -4358,7 +4360,7 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-gatherelements)), then [=exception/throw=] a {{TypeError}}. 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. Let |indicesShapeExpected| be a copy of |input|'s [=MLOperand/shape=]. @@ -4394,12 +4396,12 @@ partial dictionary MLOpSupportLimits { // [20, 1, 32]] const input1 = builder.constant( - {shape: [4, 3]}, + {dataType: 'float32', shape: [4, 3]}, new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32])); const indices1 = builder.constant( {dataType: 'uint32', shape: [2, 3]}, - new Uint32Array([3, 1, 2, 2, 0, 1])); + new Uint32Array([3, 1, 1, 2, 0, 3])); const output1 = builder.gatherElements(input1, indices1); @@ -4443,15 +4445,15 @@ partial dictionary MLOpSupportLimits { // [[[ 0, 201], // [110, 311]]] - const input2 = builder.constant( - {shape: [4, 2, 2]}, + const input3 = builder.constant( + {dataType: 'float32', shape: [4, 2, 2]}, new Float32Array([0, 1, 10, 11, 100, 101, 110, 111, 200, 201, 210, 211, 300, 301, 310, 311])); const indices3 = builder.constant( {dataType: 'uint32', shape: [1, 2, 2]}, - new Uint32Array([0, 1, 1, 2])); + new Uint32Array([0, 2, 1, 3])); - const output3 = builder.gatherElements(input2, indices3, {axis: 2}); + const output3 = builder.gatherElements(input3, indices3, {axis: 0});
@@ -4522,7 +4524,7 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s or |indices|'s [=MLOperand/dataType=]'s are not one of their [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=]'s is not one of the [=/allowed data types=] (according to [this table](#constraints-gathernd)), then [=exception/throw=] a {{TypeError}}. 1. Let |inputShape| be |input|'s [=MLOperand/shape=] and |inputRank| be |input|'s [=MLOperand/rank=]. 1. Let |indicesShape| be |indices|'s [=MLOperand/shape=] and |indicesRank| be |indices|'s [=MLOperand/rank=]. 1. If |inputRank| or |indicesRank| is less than 1, then [=exception/throw=] a {{TypeError}}. @@ -4561,7 +4563,7 @@ partial dictionary MLOpSupportLimits { // [0, 3, 2] const input1 = builder.constant( - {shape: [2, 2]}, + {dataType: 'float32', shape: [2, 2]}, new Float32Array([0, 1, 2, 3])); const indices1 = builder.constant( @@ -4599,7 +4601,7 @@ partial dictionary MLOpSupportLimits { // [4, 5]] <= row [4, 5] from input coordinates [1, 0, *] const input2 = builder.constant( - {shape: [2, 2, 2]}, + {dataType: 'float32', shape: [2, 2, 2]}, new Float32Array([0, 1, 2, 3, 4, 5, 6, 7])); const indices3 = builder.constant( @@ -4647,7 +4649,7 @@ partial dictionary MLOpSupportLimits { const indices5 = builder.constant( {dataType: 'uint32', shape: [5, 3]}, - new Uint32Array([0,0,1, 0,1,0, 1,0,0, 1,1,0, 1,1,1])); + new Uint32Array([0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1])); const output5 = builder.gatherND(input2, indices5); @@ -8278,7 +8280,7 @@ partial dictionary MLOpSupportLimits { **Arguments:** - input: an {{MLOperand}}. The input N-D tensor from to initialize the output with. - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter over. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - updates: an {{MLOperand}}. New values to replace atop the input. + - updates: an {{MLOperand}}. New values to replace atop the input, with the same shape as the indices. - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to *input*'s [=MLOperand/rank=]. @@ -8301,17 +8303,17 @@ partial dictionary MLOpSupportLimits { {{indices}} {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}} - {{input}}'s [=MLOperand/rank=] + [=/same rank as|same as=] {{input}} {{updates}} [=/same type as|same as=] {{input}} - {{input}}'s [=MLOperand/rank=] and {{indices}}'s [=MLOperand/shape=] + [=/same rank as|same as=] {{input}} *output* [=/same type as|same as=] {{input}} - {{input}}'s [=MLOperand/rank=] + [=/same rank as|same as=] {{input}} @@ -8343,7 +8345,7 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| and |updates| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-scatterelements)), then [=exception/throw=] a {{TypeError}}. 1. If |updates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. @@ -8386,7 +8388,7 @@ partial dictionary MLOpSupportLimits { // [-1, 31, -6]] const input1 = builder.constant( - {shape: [4, 3]}, + {dataType: 'float32', shape: [4, 3]}, new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32])); const indices1 = builder.constant( @@ -8446,7 +8448,7 @@ partial dictionary MLOpSupportLimits { // updates of shape [1,2,2]: // [[[-1, -2], // [-3, -4]]], - // axis = 2 + // axis = 0 // output of shape [4,2,2]: // [[[ -1, 1], // [ 10, 11]], @@ -8456,8 +8458,9 @@ partial dictionary MLOpSupportLimits { // [210, 211]], // [[300, 301], // [310, -4]],] - const input2 = builder.constant( - {shape: [4, 2, 2]}, + + const input3 = builder.constant( + {dataType: 'float32', shape: [4, 2, 2]}, new Float32Array([0, 1, 10, 11, 100, 101, 110, 111, 200, 201, 210, 211, 300, 301, 310, 311])); const indices3 = builder.constant( @@ -8468,7 +8471,7 @@ partial dictionary MLOpSupportLimits { builder.constant({dataType: 'float32', shape: [1, 2, 2]}, new Uint32Array([-1, -2, -3, -4])); - const output3 = builder.scatterElements(input2, indices3, updates3, {axis: 2}); + const output3 = builder.scatterElements(input3, indices3, updates3, {axis: 0}); @@ -8546,7 +8549,7 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |indices|, and |updates| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s or |indices|'s [=MLOperand/dataType=]'s are not one of their [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=]'s is not one of the [=/allowed data types=] (according to [this table](#constraints-scatternd)), then [=exception/throw=] a {{TypeError}}. 1. If |updates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. 1. Let |inputShape| be |input|'s [=MLOperand/shape=] and |inputRank| be |input|'s [=MLOperand/rank=]. 1. Let |indicesShape| be |indices|'s [=MLOperand/shape=] and |indicesRank| be |indices|'s [=MLOperand/rank=]. @@ -8554,12 +8557,12 @@ partial dictionary MLOpSupportLimits { 1. Let |indexableSize| be |indicesRank| - 1. 1. Let |coordinateSize| be |indicesShape|[|indexableSize|]. 1. If |coordinateSize| is greater than |inputRank|, then [=exception/throw=] a {{TypeError}}. - 1. Let |updatesShape| be an empty list. + 1. Let |expectedUpdatesShape| be an empty list. 1. [=list/For each=] |index| in [=the range=] 0 to |indexableSize|, exclusive: - 1. [=list/Append=] |indicesShape|[|index|] to |updatesShape|. + 1. [=list/Append=] |indicesShape|[|index|] to |expectedUpdatesShape|. 1. [=list/For each=] |index| in [=the range=] |coordinateSize| to |inputRank|, exclusive: - 1. [=list/Append=] |inputShape|[|index|] to |updatesShape|. - 1. Let |updates|'s [=MLOperand/shape=] is not [=list/equal=] to |updatesShape|, then [=exception/throw=] a {{TypeError}}. + 1. [=list/Append=] |inputShape|[|index|] to |expectedUpdatesShape|. + 1. If |updates|'s [=MLOperand/shape=] is not [=list/equal=] to |expectedUpdatesShape|, then [=exception/throw=] a {{TypeError}}. 1. Let |outputShape| be a copy of |input|'s [=MLOperand/shape=]. 1. Let |outputDesc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |outputShape|. 1. *Make graph connections:* @@ -8590,7 +8593,7 @@ partial dictionary MLOpSupportLimits { // [0, -3, 2, -2, -1, 5, 6, -4] const input1 = builder.constant( - {shape: [8]}, + {dataType: 'float32', shape: [8]}, new Float32Array([0, 1, 2, 3, 4, 5, 6, 7])); const indices1 = builder.constant( @@ -8616,7 +8619,7 @@ partial dictionary MLOpSupportLimits { // [ 2, -2]] <= -2 written to output coordinate [1, 1] const input2 = builder.constant( - {shape: [2, 2]}, + {dataType: 'float32', shape: [2, 2]}, new Float32Array([0, 1, 2, 3])); const indices2 = builder.constant( @@ -8645,7 +8648,7 @@ partial dictionary MLOpSupportLimits { // [-1, -2]] <= [-1, -2] written to output coordinates [2, *] const input3 = builder.constant( - {shape: [3, 2]}, + {dataType: 'float32', shape: [3, 2]}, new Float32Array([0, 1, 2, 3, 4, 5])); const indices3 = builder.constant( @@ -8676,7 +8679,7 @@ partial dictionary MLOpSupportLimits { // [ 6, 7]]] const input4 = builder.constant( - {shape: [2, 2, 2]}, + {dataType: 'float32', shape: [2, 2, 2]}, new Float32Array([0, 1, 2, 3, 4, 5, 6, 7])); const indices4 = builder.constant( @@ -8798,7 +8801,7 @@ partial dictionary MLOpSupportLimits { :: The stride to step over each input along each axis. The length of the strides array must equal the [=MLOperand/rank=] of the input tensor. - The the default is an array of length [=MLOperand/rank=] consisting of all 1's. + The default is an array of length [=MLOperand/rank=] consisting of all 1's. e.g. [1,1,1] for a 3-D tensor. Strides must be greater than zero. @@ -9587,7 +9590,8 @@ partial dictionary MLOpSupportLimits { // [9, 4, 8], // [2, 6, 3]] const input = builder.constant( - {shape: [3, 3]}, new Float32Array([7, 1, 2, 9, 4, 8, 2, 6, 3])); + {dataType: 'float32', shape: [3, 3]}, + new Float32Array([7, 1, 2, 9, 4, 8, 2, 6, 3])); // upper triangular matrix: // [[7, 1, 2], From 80eb2799a935b03f71d55c96a2f9c82f420ccd1b Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Sun, 9 Mar 2025 22:31:16 -0700 Subject: [PATCH 38/39] Remaining feedback from Ningxin --- index.bs | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/index.bs b/index.bs index 939462c2..c7fff183 100644 --- a/index.bs +++ b/index.bs @@ -3310,14 +3310,14 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. [=Assert=]: |op| is one of "equal", "notEqual", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot", "logicalAnd", "logicalOr", "logicalXor". 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |a| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |op| is one of "logicalNot", "logicalAnd", "logicalOr", "logicalXor": + 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. 1. If |b| is passed: 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |b| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |a|'s [=MLOperand/dataType=] is not equal to |b|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |a|'s [=MLOperand/shape=] and |b|'s [=MLOperand/shape=]. If that returns failure, then [=exception/throw=] a {{TypeError}}. 1. Otherwise: 1. Let |outputShape| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. - 1. If |op| is one of "logicalNot", "logicalAnd", "logicalOr", "logicalXor": - 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given {{MLOperandDataType/"uint8"}} and |outputShape|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. @@ -3755,13 +3755,13 @@ partial dictionary MLOpSupportLimits { 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |zeroPoint|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. If |scale|'s [=MLOperand/shape=] is not [=list/equal=] to |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. 1. If [=blockwise broadcasting=] |zeroPoint|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. - 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. - 1. If |zeroPoint|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-dequantizelinear)), then [=exception/throw=] a {{TypeError}}. 1. Let |outputDescriptor| be the result of [=creating an MLOperandDescriptor=] given |scale|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/shape=]. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |outputDescriptor|. @@ -3838,7 +3838,7 @@ partial dictionary MLOpSupportLimits { ### quantizeLinear ### {#api-mlgraphbuilder-quantizelinear} -Quantizes a floating point tensor to integer tensor using the scale and zero-point bias, where `output = clamp(roundEven(input / scale) + zeroPoint, 0, 255)`. The *scale* and *zeroPoint* tensors can be smaller than the *input* tensor as they are [=blockwise broadcast=]. +Quantizes a floating point tensor to integer tensor using the scale and zero-point bias (e.g. `output = clamp(roundEven(input / scale) + zeroPoint, 0, 255)` for "uint8"). The *scale* and *zeroPoint* tensors can be smaller than the *input* tensor as they are [=blockwise broadcast=].