From dc94953db2b884fdc8d0208cf01a5e4231b3c332 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 26 Mar 2021 17:54:15 -0400 Subject: [PATCH 01/60] Moved high level tf.nn ops to framework. Moved tf.raw.nn Ops to tf.nn. Changed generation to generate SoftmaxCrossEntropyWithLogits and SparseSoftmaxCrossEntropyWithLogits to core NNOps (tf.nn). --- ...pi_def_SoftmaxCrossEntropyWithLogits.pbtxt | 2 +- ..._SparseSoftmaxCrossEntropyWithLogits.pbtxt | 2 +- .../annotations/org/tensorflow/op/NnOps.java | 175 +++--------------- .../org/tensorflow/op/NnRawOps.java | 84 --------- .../SoftmaxCrossEntropyWithLogits.java | 8 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 8 +- .../op/nn/SigmoidCrossEntropyWithLogits.java | 14 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 44 +++-- .../SparseSoftmaxCrossEntropyWithLogits.java | 47 +++-- 9 files changed, 107 insertions(+), 277 deletions(-) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/{raw => }/SoftmaxCrossEntropyWithLogits.java (94%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/{raw => }/SparseSoftmaxCrossEntropyWithLogits.java (94%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SigmoidCrossEntropyWithLogits.java (91%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SoftmaxCrossEntropyWithLogits.java (87%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SparseSoftmaxCrossEntropyWithLogits.java (83%) diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt index 5dba2164cd6..e064562c0f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "SoftmaxCrossEntropyWithLogits" endpoint { - name: "nn.raw.SoftmaxCrossEntropyWithLogits" + name: "nn.SoftmaxCrossEntropyWithLogits" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt index cf80ff77565..7627d5f6074 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "SparseSoftmaxCrossEntropyWithLogits" endpoint { - name: "nn.raw.SparseSoftmaxCrossEntropyWithLogits" + name: "nn.SparseSoftmaxCrossEntropyWithLogits" } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 4f724578d14..0269d387859 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -83,7 +83,6 @@ import org.tensorflow.op.nn.Relu; import org.tensorflow.op.nn.Relu6; import org.tensorflow.op.nn.Selu; -import org.tensorflow.op.nn.SigmoidCrossEntropyWithLogits; import org.tensorflow.op.nn.Softmax; import org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits; import org.tensorflow.op.nn.Softsign; @@ -103,8 +102,6 @@ * @see {@link Ops} */ public final class NnOps { - public final NnRawOps raw; - private final Scope scope; private final Ops ops; @@ -112,7 +109,6 @@ public final class NnOps { NnOps(Ops ops) { this.scope = ops.scope(); this.ops = ops; - raw = new NnRawOps(ops); } /** @@ -1795,56 +1791,6 @@ public Selu selu(Operand features) { return Selu.create(scope, features); } - /** - * Computes sigmoid cross entropy given logits. - * - *

Measures the probability error in discrete classification tasks in which each class is - * independent and not mutually exclusive. For instance, one could perform multilabel - * classification where a picture can contain both an elephant and a dog at the same time. - * - *

For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is - * - *

-   *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
-   *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
-   *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
-   *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
-   *   = (1 - z) * x + log(1 + exp(-x))
-   *   = x - x * z + log(1 + exp(-x))
-   *  
- * - *

For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - *

-   *  x - x * z + log(1 + exp(-x))
-   *   = log(exp(x)) - x * z + log(1 + exp(-x))
-   *   = - x * z + log(1 + exp(x))
-   *  
- * - *

Hence, to ensure stability and avoid overflow, the implementation uses this equivalent - * formulation - * - *

-   *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
-   *  
- * - *

logits and labels must have the same type and shape. - * - *

- * - * @param scope The TensorFlow scope - * @param labels the labels - * @param logits the logits of type float32 or float64 - * @param the type of labels and logits - * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape - */ - public Operand sigmoidCrossEntropyWithLogits(Operand labels, - Operand logits) { - return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); - } - /** * Computes softmax activations. *

@@ -1861,54 +1807,20 @@ public Softmax softmax(Operand logits) { } /** - * Computes softmax cross entropy between logits and labels. - * - *

Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

NOTE: - * - *

While the classes are mutually exclusive, their probabilities need not be. All that is - * required is that each row of labels is a valid probability distribution. If they - * are not, the computation of the gradient will be incorrect. - * - *

If using exclusive labels (wherein one and only one class is true at a time), - * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} - * - *

Usage: - * - *

-   *    Operand<TFloat32> logits =
-   *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
-   *    Operand<TFloat32> labels =
-   *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
-   *    Operand<TFloat32> output =
-   *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
-   *    // output Shape = [2]
-   *    // dataType = FLOAT (1)
-   *    // values { 0.169846, 0.824745 }
-   *  
- * - *

Backpropagation will happen into both logits and labels. To - * disallow backpropagation into labels, pass label tensors through - * tf.stopGradient before feeding it to this function. + * Computes softmax cross entropy cost and gradients to backpropagate. + *

+ * Inputs are the logits, not probabilities. * - * @param scope current scope - * @param labels Each vector along the class dimension should hold a valid probability - * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] - * , each row of labels[i] must be a valid probability distribution. - * @param logits Per-label activations, typically a linear output. These activation energies are - * interpreted as unnormalized log probabilities. - * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands - * @return the softmax cross entropy loss. Its type is the same as logits and its - * shape is the same as labels except that it does not have the last dimension of - * labels. + * @param data type for {@code loss()} output + * @param features batch_size x num_classes matrix + * @param labels batch_size x num_classes matrix + * The caller must ensure that each batch of labels represents a valid + * probability distribution. + * @return a new instance of SoftmaxCrossEntropyWithLogits */ - public Operand softmaxCrossEntropyWithLogits( - Operand labels, Operand logits, int axis) { - return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( + Operand features, Operand labels) { + return SoftmaxCrossEntropyWithLogits.create(scope, features, labels); } /** @@ -2100,51 +2012,24 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo } /** - * Computes sparse softmax cross entropy between logits and labels. - * - *

Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

NOTE: - * - *

For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link - * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. - * - *

WARNING: - * - *

This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, - * as it will produce incorrect results. - * - *

A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. - * - * @param scope current scope - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log - * probabilities. - * @return A Tensor of the same shape as labels and of the same type as - * logits with the softmax cross entropy loss. - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank - * of the labels is not equal to the rank of the logits minus one. - */ - public Operand sparseSoftmaxCrossEntropyWithLogits( - Operand labels, Operand logits) { - return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); + * Computes softmax cross entropy cost and gradients to backpropagate. + *

+ * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * a matrix of label probabilities, but rather a single label per row + * of features. This label is considered to have probability 1.0 for the + * given row. + *

+ * Inputs are the logits, not probabilities. + * + * @param data type for {@code loss()} output + * @param features batch_size x num_classes matrix + * @param labels batch_size vector with values in [0, num_classes). + * This is the label for the given minibatch entry. + * @return a new instance of SparseSoftmaxCrossEntropyWithLogits + */ + public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( + Operand features, Operand labels) { + return SparseSoftmaxCrossEntropyWithLogits.create(scope, features, labels); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java deleted file mode 100644 index 13c6baa651a..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2020 The TensorFlow Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ============================================================================== -// -// This class has been generated, DO NOT EDIT! -// -package org.tensorflow.op; - -import org.tensorflow.Operand; -import org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits; -import org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits; -import org.tensorflow.types.family.TNumber; - -/** - * An API for building {@code nn.raw} operations as {@link Op Op}s - * - * @see {@link Ops} - */ -public final class NnRawOps { - private final Scope scope; - - private final Ops ops; - - NnRawOps(Ops ops) { - this.scope = ops.scope(); - this.ops = ops; - } - - /** - * Computes softmax cross entropy cost and gradients to backpropagate. - *

- * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss()} output - * @param features batch_size x num_classes matrix - * @param labels batch_size x num_classes matrix - * The caller must ensure that each batch of labels represents a valid - * probability distribution. - * @return a new instance of SoftmaxCrossEntropyWithLogits - */ - public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( - Operand features, Operand labels) { - return SoftmaxCrossEntropyWithLogits.create(scope, features, labels); - } - - /** - * Computes softmax cross entropy cost and gradients to backpropagate. - *

- * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept - * a matrix of label probabilities, but rather a single label per row - * of features. This label is considered to have probability 1.0 for the - * given row. - *

- * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss()} output - * @param features batch_size x num_classes matrix - * @param labels batch_size vector with values in [0, num_classes). - * This is the label for the given minibatch entry. - * @return a new instance of SparseSoftmaxCrossEntropyWithLogits - */ - public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( - Operand features, Operand labels) { - return SparseSoftmaxCrossEntropyWithLogits.create(scope, features, labels); - } - - /** - * Get the parent {@link Ops} object. - */ - public final Ops ops() { - return ops; - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java similarity index 94% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 8032a4c2512..5d3ab3c1100 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.nn.raw; +package org.tensorflow.op.nn; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -34,7 +34,7 @@ * * @param data type for {@code loss()} output */ -@Operator(group = "nn.raw") +@Operator(group = "nn") public final class SoftmaxCrossEntropyWithLogits extends RawOp { /** @@ -53,7 +53,7 @@ public static SoftmaxCrossEntropyWithLogits create(Scope opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); - return new SoftmaxCrossEntropyWithLogits(opBuilder.build()); + return new SoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } /** @@ -80,6 +80,6 @@ private SoftmaxCrossEntropyWithLogits(Operation operation) { super(operation); int outputIdx = 0; loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx++); + backprop = operation.output(outputIdx); } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java similarity index 94% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 67650760b1c..794beab4ded 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.nn.raw; +package org.tensorflow.op.nn; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -39,7 +39,7 @@ * * @param data type for {@code loss()} output */ -@Operator(group = "nn.raw") +@Operator(group = "nn") public final class SparseSoftmaxCrossEntropyWithLogits extends RawOp { /** @@ -57,7 +57,7 @@ public static SparseSoftmaxCrossEntropyWithLogits create( opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); - return new SparseSoftmaxCrossEntropyWithLogits(opBuilder.build()); + return new SparseSoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } /** @@ -84,6 +84,6 @@ private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { super(operation); int outputIdx = 0; loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx++); + backprop = operation.output(outputIdx); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java similarity index 91% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index 92c413f7e52..b55385839d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -1,4 +1,4 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; @@ -8,11 +8,17 @@ import org.tensorflow.op.core.Select; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; -import org.tensorflow.op.math.*; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.Exp; +import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.Log1p; +import org.tensorflow.op.math.Mul; +import org.tensorflow.op.math.Neg; +import org.tensorflow.op.math.Sub; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -@Operator(group = "nn") +//@Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** @@ -60,7 +66,7 @@ public class SigmoidCrossEntropyWithLogits { * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape */ - @Endpoint(name = "sigmoidCrossEntropyWithLogits") + //@Endpoint(name = "sigmoidCrossEntropyWithLogits") public static Operand sigmoidCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { if (!isCompatible(labels.shape(), logits.shape())) { diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java similarity index 87% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index ddeacbea4d4..0f5b8197f1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -1,11 +1,15 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; -import org.tensorflow.op.core.*; +import org.tensorflow.op.core.Concat; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.Reshape; +import org.tensorflow.op.core.Slice; import org.tensorflow.op.dtypes.Cast; import org.tensorflow.op.linalg.Transpose; import org.tensorflow.op.math.Sub; @@ -14,12 +18,11 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; import java.util.Arrays; import java.util.List; -@Operator(group = "nn") +// @Operator(group = "nn") public class SoftmaxCrossEntropyWithLogits { /** @@ -68,6 +71,7 @@ public class SoftmaxCrossEntropyWithLogits { * shape is the same as labels except that it does not have the last dimension of * labels. */ + @SuppressWarnings("unchecked") @Endpoint(name = "softmaxCrossEntropyWithLogits") public static Operand softmaxCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits, int axis) { @@ -78,7 +82,9 @@ public static Operand softmaxCrossEntr } if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { - Operand result = softmaxCrossEntropyWithLogits(scope, + Operand result = + softmaxCrossEntropyWithLogits( + scope, Cast.create(scope, labels, TFloat32.class), Cast.create(scope, logits, TFloat32.class), axis); @@ -86,10 +92,8 @@ public static Operand softmaxCrossEntr } if (logits.asOutput().type() != labels.asOutput().type()) { - return softmaxCrossEntropyWithLogits(scope, - Cast.create(scope, labels, logits.asOutput().type()), - logits, - axis); + return softmaxCrossEntropyWithLogits( + scope, Cast.create(scope, labels, logits.asOutput().type()), logits, axis); } Operand inputRank = Cast.create(scope, Rank.create(scope, logits), TInt64.class); @@ -101,13 +105,20 @@ public static Operand softmaxCrossEntr labels = moveDimToEnd(scope, labels, axis, inputRank); } + Operand tLabels; + if (labels.type() != logits.type()) { + tLabels = Cast.create(scope, labels, logits.type()); + } else { + // Unchecked warning checked in if statement. + tLabels = (Operand) labels; + } + Shape inputShape = logits.shape(); logits = flattenOuterDims(scope, logits); - labels = flattenOuterDims(scope, labels); + tLabels = flattenOuterDims(scope, tLabels); - org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits.create( - scope, logits, (Operand)labels); + org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits.create(scope, logits, tLabels); /* cannot use generic on cost, because cost may be recast later. */ Operand cost = smax.loss(); Operand outputShape = @@ -119,6 +130,9 @@ public static Operand softmaxCrossEntr cost = Reshape.create(scope, cost, outputShape); if (scope.env().isGraph() && !shape.hasUnknownDimension()) { long[] array = shape.asArray(); + if (array == null) { + array = new long[0]; + } long[] newArray = new long[array.length - 1]; if (axis < 0) { axis = shape.numDimensions() + axis; @@ -153,7 +167,7 @@ private static Operand flattenOuterDims(Scope scope, Oper boolean productValid = true; for (int i = ndims - 2; i >= 0; i--) { long d = shape.size(i); - if (d == org.tensorflow.ndarray.Shape.UNKNOWN_SIZE) { + if (d == Shape.UNKNOWN_SIZE) { productValid = false; break; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java similarity index 83% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 54b32bb5c63..64faa7c5d70 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -1,11 +1,10 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; import org.tensorflow.op.core.AssertThat; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.Reshape; @@ -22,7 +21,7 @@ import java.util.Collections; import java.util.List; -@Operator(group = "nn") +// @Operator(group = "nn") public class SparseSoftmaxCrossEntropyWithLogits { /** @@ -63,19 +62,24 @@ public class SparseSoftmaxCrossEntropyWithLogits { * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, * or TFloat64. These activation energies are interpreted as unnormalized log * probabilities. + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return A Tensor of the same shape as labels and of the same type as * logits with the softmax cross entropy loss. * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank * of the labels is not equal to the rank of the logits minus one. */ + @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") - public static Operand sparseSoftmaxCrossEntropyWithLogits( - Scope scope, Operand labels, Operand logits) { + public static + Operand sparseSoftmaxCrossEntropyWithLogits( + Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); - /** cannot use generics on preciseLogits as it may be recast later */ - Operand preciseLogits = logits; + Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { preciseLogits = Cast.create(scope, logits, TFloat32.class); + } else { + preciseLogits = logits; } Shape labelsStaticShape = labels.shape(); org.tensorflow.op.core.Shape labelsShape = @@ -108,14 +112,16 @@ public static Operand sparseSoftmaxCrossE } // Check if no reshapes are required. if (logitsShape.numDimensions() == 2) { - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits.create( + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( scope, preciseLogits, labels); - Operand loss = smax.loss(); - if (logits.asOutput().type() == TFloat16.class) { - loss = Cast.create(scope, loss, TFloat16.class); + Operand cost = smax.loss(); + if (cost.type() != logits.type()) { + return Cast.create(scope, cost, logits.type()); + } else { + // Unchecked cast already checked with previous if + return (Operand) cost; } - return loss; } List shapeChecks = new ArrayList<>(); @@ -145,14 +151,17 @@ public static Operand sparseSoftmaxCrossE preciseLogits = Reshape.create(scope, preciseLogits, Constant.arrayOf(scope, -1L, numClassses)); labels = Reshape.create(scope, labels, Constant.scalarOf(scope, -1)); scope.withControlDependencies(shapeChecks); - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits.create( + // call raw op + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( scope, preciseLogits, labels); - Operand cost = smax.loss(); + Operand cost = smax.loss(); cost = Reshape.create(scope, cost, labelsShape); - if (logits.asOutput().type() == TFloat16.class) { - cost = Cast.create(scope, cost, TFloat16.class); + if (cost.type() != logits.type()) { + return Cast.create(scope, cost, logits.type()); + } else { + // Unchecked cast already checked with previous if + return (Operand) cost; } - return cost; } } From 1878b609d82996c3376b28c5d1e7338dfc6e80f1 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 26 Mar 2021 18:02:55 -0400 Subject: [PATCH 02/60] Added FrameworkOps analogous to Ops. Added NnOps and SetOps as groups. Fixed MetricsHelper and Losses to use the bew FrameworkOps. Moved SetsOps to framework.op. --- .../tensorflow/framework/losses/Losses.java | 17 +- .../framework/metrics/impl/MetricsHelper.java | 4 +- .../tensorflow/framework/op/FrameworkOps.java | 136 ++++++++++++ .../org/tensorflow/framework/op/NnOps.java | 197 ++++++++++++++++++ .../{metrics/impl => op}/SetsOps.java | 64 +++--- .../SparseSoftmaxCrossEntropyWithLogits.java | 3 +- .../{SetsOpsTest.java => SetOpsTest.java} | 18 +- 7 files changed, 398 insertions(+), 41 deletions(-) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java rename tensorflow-framework/src/main/java/org/tensorflow/framework/{metrics/impl => op}/SetsOps.java (75%) rename tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/{SetsOpsTest.java => SetOpsTest.java} (86%) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 9aa94cf7fcf..aa5fa4ada6d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -19,6 +19,7 @@ import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.op.core.ReduceAll; import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceSum; @@ -181,7 +182,8 @@ public static Operand binaryCrossentropy( */ private static Operand binaryCrossentropyHelper( Ops tf, Operand target, Operand output, boolean fromLogits) { - if (fromLogits) return tf.nn.sigmoidCrossEntropyWithLogits(target, output); + FrameworkOps fop = FrameworkOps.create(tf); + if (fromLogits) { return fop.nn.sigmoidCrossEntropyWithLogits(target, output);} /* TODO - skip this logic for now. It requires walking back the inputs which is not yet possible if (!(output instanceof Variable) && (!tf.scope().env().isEager())) { @@ -191,7 +193,7 @@ private static Operand binaryCrossentropyHelper( // TODO if (output.op().numInputess() != 1) // TODO throw new IllegalArgumentException("output can only have 1 output"); // TODO output = output.op().inout(0); - // TODO return tf.nn.sigmoidCrossEntropyWithLogits(target, output); + // TODO return fop.nn.sigmoidCrossEntropyWithLogits(target, output); // TODO} } */ @@ -235,6 +237,7 @@ public static Operand categoricalCrossentropy( boolean fromLogits, float labelSmoothing, int axis) { + FrameworkOps fop = FrameworkOps.create(tf); Class predictionType = predictions.type(); Operand tLabels = cast(tf, labels, predictionType); LossTuple ops = LossesHelper.squeezeOrExpandDimensions(tf, tLabels, predictions, null); @@ -245,7 +248,7 @@ public static Operand categoricalCrossentropy( tLabels = smoothCategoricalLabels(tf, tLabels, labelSmoothing); } if (fromLogits) { - return tf.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, axis); + return fop.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, axis); } /* TODO if (!(predictions instanceof Variable) && (!tf.scope().env().isEager())) { @@ -255,7 +258,7 @@ public static Operand categoricalCrossentropy( if (predictions.op().numOutputs() != 1) throw new IllegalArgumentException("output can only have 1 output"); predictions = predictions.op().output(0); - return tf.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, -1); + return fop.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, -1); } } */ @@ -516,6 +519,7 @@ public static Operand sparseCategoricalCrossentropy( boolean fromLogits, int axis) { Class predictionType = predictions.type(); + FrameworkOps fop = FrameworkOps.create(tf); Operand epsilonConst = cast(tf, tf.constant(EPSILON), predictionType); Operand one = cast(tf, tf.constant(1), predictionType); Operand oneMinusEpsilonConst = tf.math.sub(one, epsilonConst); @@ -568,9 +572,8 @@ public static Operand sparseCategoricalCrossentropy( tf.constant( new long[] {-1L, predictionsShape.size(predictionsShape.numDimensions() - 1)})); } - - @SuppressWarnings("unchecked") - Operand loss = tf.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); + + Operand loss = fop.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); if (updateShape && predictionsRank >= 3) { Shape newShape = predictionsShape.take(predictionsShape.numDimensions() - 1); loss = tf.reshape(loss, tf.constant(newShape)); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index 8a352322f52..a82e1760d1f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -16,6 +16,7 @@ import org.tensorflow.Operand; import org.tensorflow.framework.metrics.exceptions.NotBroadcastableException; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; @@ -174,12 +175,13 @@ private static Operand canBroadcastNonscalarShapes( private static Operand canBroadcastDims( Ops tf, Operand weightsShape, Operand valuesShape) { tf = tf.withSubScope("canBroadcastDims"); + FrameworkOps fops = FrameworkOps.create(tf); Operand valuesShape2d = tf.expandDims(valuesShape, tf.constant(-1)); Operand validDims = tf.concat(Arrays.asList(valuesShape2d, tf.onesLike(valuesShape2d)), tf.constant(1)); Operand weightsShape2D = tf.expandDims(weightsShape, tf.constant(-1)); - Operand diffResult = SetsOps.difference(tf, weightsShape2D, validDims); + Operand diffResult = fops.sets.difference(weightsShape2D, validDims); Operand numInvalidDims = tf.size(diffResult); return tf.math.equal(tf.constant(0), numInvalidDims); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java new file mode 100644 index 00000000000..cecbecfed15 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -0,0 +1,136 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.DeviceSpec; +import org.tensorflow.EagerSession; +import org.tensorflow.ExecutionEnvironment; +import org.tensorflow.op.Op; +import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; + +/** + * An API for building framework operations as {@link Op Op}s + * + *

These are higher level ops that may invoke core ops. Higher level Ops may perform the + * operation solely in the TensorFlow framework or do preprocessing of the Operands before invoking + * a core level Op. + */ +public class FrameworkOps { + public final Ops coreOps; + private final Scope scope; + + public final NnOps nn; + public final SetsOps sets; + + /** + * Creates a FrameworkOps instance with the provided scope + * + * @param scope the scope + */ + private FrameworkOps(Scope scope) { + this.coreOps = Ops.create(scope.env()); + this.scope = scope; + nn = new NnOps(this); + sets = new SetsOps(this); + } + + /** + * Creates a FrameworkOps instance based on the provided Core Ops + * + * @param coreOps The TensorFlow Core Ops + */ + private FrameworkOps(Ops coreOps) { + this.coreOps = coreOps; + this.scope = coreOps.scope(); + nn = new NnOps(this); + sets = new SetsOps(this); + } + + + /** Returns the current {@link Scope scope} of this API */ + public final Scope scope() { + return scope; + } + + /** + * Gets the core Ops + * + * @return coreOps + */ + public final Ops coreOps() { + return coreOps; + } + + /** + * Returns an API that builds operations with the provided name prefix. + * + *

@link Scope#withSubScope(String)} + */ + public FrameworkOps withSubScope(String childScopeName) { + return new FrameworkOps(scope.withSubScope(childScopeName)); + } + + /** + * Returns an API that uses the provided name for an op. + * + *

{@link Scope#withName(String)} + */ + public FrameworkOps withName(String opName) { + return new FrameworkOps(scope.withName(opName)); + } + + /** + * Returns an API that places the created operations on the device(s) matching the provided spec. + * + *

{@link Scope#withDevice(DeviceSpec)} + */ + public FrameworkOps withDevice(DeviceSpec deviceSpec) { + return new FrameworkOps(scope.withDevice(deviceSpec)); + } + + /** + * Returns an API that adds operations to the graph with the provided control dependencies. + * + *

{@link Scope#withControlDependencies(Iterable)} + */ + public FrameworkOps withControlDependencies(Iterable controls) { + return new FrameworkOps(scope.withControlDependencies(controls)); + } + + /** Creates an API for building operations in the provided execution environment */ + public static FrameworkOps create(ExecutionEnvironment env) { + return new FrameworkOps(new Scope(env)); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + *

Invoking this method is equivalent to {@code + * FrameworkOps.create(EagerSession.getDefault())}. + */ + public static FrameworkOps create() { + return new FrameworkOps(new Scope(EagerSession.getDefault())); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + * @param coreOps the TensorFlow core Ops + */ + public static FrameworkOps create(Ops coreOps) { + return new FrameworkOps(coreOps); + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java new file mode 100644 index 00000000000..4054f3ddbb5 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -0,0 +1,197 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.framework.op.nn.SigmoidCrossEntropyWithLogits; +import org.tensorflow.framework.op.nn.SoftmaxCrossEntropyWithLogits; +import org.tensorflow.framework.op.nn.SparseSoftmaxCrossEntropyWithLogits; +import org.tensorflow.op.Op; +import org.tensorflow.op.Scope; +import org.tensorflow.types.family.TNumber; + +/** + * An API for building {@code nn} operations as {@link Op Op}s + * + *

These are higher level ops that may invoke core ops. Higher level Ops may perform the + * operation solely in the TensorFlow framework or do preprocessing of the Operands before invoking + * a core level Op. + * + *

{@link FrameworkOps} + */ +public class NnOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * @param frameworkOps the TensorFLow framework Ops + */ + NnOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Computes sigmoid cross entropy given logits. + * + *

Measures the probability error in discrete classification tasks in which each class is + * independent and not mutually exclusive. For instance, one could perform multilabel + * classification where a picture can contain both an elephant and a dog at the same time. + * + *

For brevity, let x = logits, z = labels. The logistic loss in + * pseudo-code is + * + *

+     *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
+     *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
+     *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
+     *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
+     *   = (1 - z) * x + log(1 + exp(-x))
+     *   = x - x * z + log(1 + exp(-x))
+     *  
+ * + *

For x < 0, to avoid overflow in exp(-x), we reformulate the above + * + *

+     *  x - x * z + log(1 + exp(-x))
+     *   = log(exp(x)) - x * z + log(1 + exp(-x))
+     *   = - x * z + log(1 + exp(x))
+     *  
+ * + *

Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * formulation + * + *

+     *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
+     *  
+ * + *

logits and labels must have the same type and shape. + * + *

+ * + * @param labels the labels + * @param logits the logits of type float32 or float64 + * @param the type of labels and logits + * @return the component-wise logistic losses. + * @throws IllegalArgumentException if logits' and labels' do not have the same shape + */ + public Operand sigmoidCrossEntropyWithLogits(Operand labels, + Operand logits) { + return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); + } + + /** + * Computes softmax cross entropy between logits and labels. + * + *

Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

NOTE: + * + *

While the classes are mutually exclusive, their probabilities need not be. All that is + * required is that each row of labels is a valid probability distribution. If they + * are not, the computation of the gradient will be incorrect. + * + *

If using exclusive labels (wherein one and only one class is true at a time), + * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} + * + *

Usage: + * + *

+     *    Operand<TFloat32> logits =
+     *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
+     *    Operand<TFloat32> labels =
+     *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
+     *    Operand<TFloat32> output =
+     *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
+     *    // output Shape = [2]
+     *    // dataType = FLOAT (1)
+     *    // values { 0.169846, 0.824745 }
+     *  
+ * + *

Backpropagation will happen into both logits and labels. To + * disallow backpropagation into labels, pass label tensors through + * tf.stopGradient before feeding it to this function. + * + * @param labels Each vector along the class dimension should hold a valid probability + * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] + * , each row of labels[i] must be a valid probability distribution. + * @param logits Per-label activations, typically a linear output. These activation energies are + * interpreted as unnormalized log probabilities. + * @param axis The class dimension. -1 is the last dimension. + * @param the number type of the operands + * @return the softmax cross entropy loss. Its type is the same as logits and its + * shape is the same as labels except that it does not have the last dimension of + * labels. + */ + public Operand softmaxCrossEntropyWithLogits( + Operand labels, Operand logits, int axis) { + return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + } + + /** + * Computes sparse softmax cross entropy between logits and labels. + * + *

Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

NOTE: + * + *

For this operation, the probability of a given label is considered exclusive. That is, soft + * classes are not allowed, and the labels vector must provide a single specific + * index for the true class for each row of logits (each minibatch entry). For soft + * softmax classification with a probability distribution for each entry, {@link + * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. + * + *

WARNING: + * + *

This op expects unscaled logits, since it performs a softmax on logits + * internally for efficiency. Do not call this op with the output of softmax, + * as it will produce incorrect results. + * + *

A common use case is to have logits of shape [batchSize, numClasses] and have + * labels of shape [batchSize], but higher dimensions are supported, in which case + * the dim-th dimension is assumed to be of size numClasses. + * logits must have the dataType of TFloat16, TFloat32 + * , or TFloat64, and labels must have the dtype of TInt32 + * or TInt64. + * + * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r + * is rank of labels and result) and the dataType is TInt32 + * or TInt64. Each entry in labels must be an index in [0, + * numClasses). Other values will raise an exception when this op is run on CPU, and + * return NaN for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., + * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, + * or TFloat64. These activation energies are interpreted as unnormalized log + * probabilities. + * @param The data type for the labels + * @param The data type for the logits and loss + * @return the loss + * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * of the labels is not equal to the rank of the logits minus one. + */ + + public Operand sparseSoftmaxCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); + } + + +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java similarity index 75% rename from tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java index 467dea19b57..d7833cdbb06 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java @@ -12,26 +12,40 @@ See the License for the specific language governing permissions and limitations under the License. =======================================================================*/ -package org.tensorflow.framework.metrics.impl; +package org.tensorflow.framework.op; import org.tensorflow.Operand; -import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; import org.tensorflow.op.SparseOps; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.dtypes.Cast; import org.tensorflow.op.sparse.DenseToDenseSetOperation; +import org.tensorflow.op.sparse.SparseToDense; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** Implementation of set operations */ public class SetsOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + SetsOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + /** * Computes set difference of elements in last dimension of a and b with * aMinusB set to true. * *

All but the last dimension of a and b must match * - * @param tf the TensorFlow Ops * @param a The first operand representing set a * @param b The other operand representing set b * @param the data type for the sets @@ -39,8 +53,8 @@ public class SetsOps { * last dimension the * same. Elements along the last dimension contain the results of the set * operation. */ - public static Operand difference(Ops tf, Operand a, Operand b) { - return difference(tf, a, b, true); + public Operand difference(Operand a, Operand b) { + return difference(a, b, true); } /** @@ -48,7 +62,6 @@ public static Operand difference(Ops tf, Operand a, Op * *

All but the last dimension of a and b must match * - * @param tf the TensorFlow Ops * @param a The first operand representing set a * @param b The other operand representing set b * @param aMinusB whether to subtract b from a, vs vice versa. @@ -57,15 +70,13 @@ public static Operand difference(Ops tf, Operand a, Op * last dimension the * same. Elements along the last dimension contain the results of the set * operation. */ - public static Operand difference( - Ops tf, Operand a, Operand b, boolean aMinusB) { - return setOperation(tf, a, b, aMinusB ? Operation.A_MINUS_B : Operation.B_MINUS_A); + public Operand difference(Operand a, Operand b, boolean aMinusB) { + return setOperation(a, b, aMinusB ? Operation.A_MINUS_B : Operation.B_MINUS_A); } /** * Computes set union of elements in last dimension of a and b. * - * @param tf the TensorFlow Ops * @param a The first operand representing set a * @param b The other operand representing set b * @param the data type for the sets @@ -73,14 +84,13 @@ public static Operand difference( * last dimension the * same. Elements along the last dimension contain the results of the set * operation. */ - public static Operand union(Ops tf, Operand a, Operand b) { - return setOperation(tf, a, b, Operation.UNION); + public Operand union(Operand a, Operand b) { + return setOperation(a, b, Operation.UNION); } /** * Computes set intersection of elements in last dimension of a and b. * - * @param tf the TensorFlow Ops * @param a The first operand representing set a * @param b The other operand representing set b * @param the data type for the sets @@ -88,14 +98,13 @@ public static Operand union(Ops tf, Operand a, Operand * last dimension the * same. Elements along the last dimension contain the results of the set * operation. */ - public static Operand intersection(Ops tf, Operand a, Operand b) { - return setOperation(tf, a, b, Operation.INTERSECTION); + public Operand intersection(Operand a, Operand b) { + return setOperation(a, b, Operation.INTERSECTION); } /** * Compute set operation of elements in last dimension of a and b. * - * @param tf the TensorFlow Ops * @param a The first set operation operand * @param b The other et operation operand * @param setOperation The set operation to perform, {@link Operation}. @@ -104,18 +113,23 @@ public static Operand intersection(Ops tf, Operand a, * last dimension the same. Elements along the last dimension contain the results of the set * operation. */ - public static Operand setOperation( - Ops tf, Operand a, Operand b, Operation setOperation) { + public Operand setOperation( + Operand a, Operand b, Operation setOperation) { DenseToDenseSetOperation setOperationResult = - tf.sparse.denseToDenseSetOperation( - a, b, setOperation.getSetOperation(), DenseToDenseSetOperation.validateIndices(true)); - - return tf.sparse.sparseToDense( + DenseToDenseSetOperation.create( + scope, + a, + b, + setOperation.getSetOperation(), + DenseToDenseSetOperation.validateIndices(true)); + + return SparseToDense.create( + scope, setOperationResult.resultIndices(), setOperationResult.resultShape(), setOperationResult.resultValues(), - cast(tf, tf.constant(0), a.type())); + Cast.create(scope, Constant.scalarOf(scope, 0), a.type())); } /** diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 64faa7c5d70..75766cf9bfb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -64,8 +64,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { * probabilities. * @param the data type for the labels * @param the data tyoe for the loss and logits. - * @return A Tensor of the same shape as labels and of the same type as - * logits with the softmax cross entropy loss. + * @return the loss * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank * of the labels is not equal to the rank of the logits minus one. */ diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java similarity index 86% rename from tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java rename to tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java index eceff2797f8..e10f016bd94 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java @@ -2,6 +2,8 @@ import org.junit.jupiter.api.Test; import org.tensorflow.Operand; +import org.tensorflow.framework.op.FrameworkOps; +import org.tensorflow.framework.op.SetsOps; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; @@ -15,7 +17,7 @@ import static org.tensorflow.framework.utils.CastHelper.cast; -class SetsOpsTest { +class SetOpsTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -28,6 +30,7 @@ public void testSetIntersectionMultirow2() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); Operand b = tf.constant(new int[][] {{1, 9}, {1, 5}}); int[][] expected = new int[][] {{1, 9}, {0, 0}}; @@ -35,7 +38,7 @@ public void testSetIntersectionMultirow2() { for (Class type : types) { Operand aa = cast(tf, a, type); Operand bb = cast(tf, b, type); - Operand intersection = SetsOps.intersection(tf, aa, bb); + Operand intersection = fops.sets.intersection(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); } @@ -49,6 +52,7 @@ public void testSetIntersectionDuplicates2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{1, 1, 3}}); Operand b = tf.constant(new int[][] {{1, 1}}); int[][] expected = {{1}}; @@ -56,7 +60,7 @@ public void testSetIntersectionDuplicates2d() { for (Class type : types) { Operand aa = cast(tf, a, type); Operand bb = cast(tf, b, type); - Operand intersection = SetsOps.intersection(tf, aa, bb); + Operand intersection = fops.sets.intersection(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); @@ -72,6 +76,7 @@ public void testDenseSetDifferenceMultirow2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{1, 5, 9}, {4, 5, 3}}); Operand b = tf.constant(new int[][] {{1, 2, 6}, {1, 2, 2}}); @@ -81,14 +86,14 @@ public void testDenseSetDifferenceMultirow2d() { int[][] expected = {{5, 9, 0}, {3, 4, 5}}; // a- b Shape expectedShape = Shape.of(2, 3); - Operand intersection = SetsOps.difference(tf, aa, bb); + Operand intersection = fops.sets.difference(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); // b - a expected = new int[][] {{2, 6}, {1, 2}}; expectedShape = Shape.of(2, 2); - intersection = SetsOps.difference(tf, aa, bb, false); + intersection = fops.sets.difference(aa, bb, false); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); @@ -103,6 +108,7 @@ public void testDenseUnionMultirow2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); Operand b = tf.constant(new int[][] {{1, 9}, {1, 2}}); int[][] expected = new int[][] {{5, 0}, {3, 4}}; @@ -111,7 +117,7 @@ public void testDenseUnionMultirow2d() { Operand bb = cast(tf, b, type); Shape expectedShape = Shape.of(2, 2); // a- b - Operand intersection = SetsOps.difference(tf, aa, bb); + Operand intersection = fops.sets.difference(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); } From 9225a48b7119f0fdc163deee9fe15607708a18ca Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 15:21:11 -0400 Subject: [PATCH 03/60] Added FrameworkOps analogous to Ops. Added NnOps and SetOps as groups. Fixed MetricsHelper and Losses to use the bew FrameworkOps. Moved SetsOps to framework.op. --- .../src/main/java/org/tensorflow/framework/losses/Losses.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index aa5fa4ada6d..33c8d50409d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -572,7 +572,7 @@ public static Operand sparseCategoricalCrossentropy( tf.constant( new long[] {-1L, predictionsShape.size(predictionsShape.numDimensions() - 1)})); } - + Operand loss = fop.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); if (updateShape && predictionsRank >= 3) { Shape newShape = predictionsShape.take(predictionsShape.numDimensions() - 1); From caab79bf3c58344bdf675087a25dac399e837462 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 15:36:41 -0400 Subject: [PATCH 04/60] Move l2Normalize to MathOps --- .../tensorflow/framework/losses/Losses.java | 23 ++----- .../tensorflow/framework/op/FrameworkOps.java | 3 + .../org/tensorflow/framework/op/MathOps.java | 67 +++++++++++++++++++ 3 files changed, 74 insertions(+), 19 deletions(-) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 33c8d50409d..398588cee67 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -337,13 +337,14 @@ public static Operand categoricalHinge( */ public static Operand cosineSimilarity( Ops tf, Operand labels, Operand predictions, int[] axis) { + FrameworkOps fops = FrameworkOps.create(tf); Operand tLabels = cast(tf, labels, predictions.type()); LossTuple lossTuple = LossesHelper.squeezeOrExpandDimensions(tf, tLabels, predictions, null); predictions = lossTuple.getTarget(); tLabels = lossTuple.getLabels(); - tLabels = l2Normalize(tf, tLabels, axis); - predictions = l2Normalize(tf, predictions, axis); + tLabels = fops.math.l2Normalize(tLabels, axis); + predictions = fops.math.l2Normalize(predictions, axis); Operand mathMul = tf.math.mul(tLabels, predictions); return tf.reduceSum(mathMul, tf.constant(axis), ReduceSum.keepDims(Boolean.FALSE)); } @@ -651,23 +652,7 @@ private static Operand smoothCategoricalLabels( return tf.math.add(tf.math.mul(labels, oneMinusSmoothing), tf.math.div(smoothing, numClasses)); } - // TODO this was tf.math.l2_normalize in TF Python - /** - * Normalizes along dimension axis using an L2 norm. - * - * @param tf The TensorFlow Ops - * @param x the input - * @param axis Dimension along which to normalize. - * @param the data type for the input and the result - * @return the normalized values based on L2 norm - */ - public static Operand l2Normalize(Ops tf, Operand x, int[] axis) { - Operand squareSum = - tf.reduceSum(tf.math.square(x), tf.constant(axis), ReduceSum.keepDims(Boolean.TRUE)); - Operand invNorm = - tf.math.rsqrt(tf.math.maximum(squareSum, cast(tf, tf.constant(1e-12F), x.type()))); - return tf.math.mul(x, invNorm); - } + /** * Converts binary labels into -1/1. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index cecbecfed15..18fb8ada6b7 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -34,6 +34,7 @@ public class FrameworkOps { public final NnOps nn; public final SetsOps sets; + public final MathOps math; /** * Creates a FrameworkOps instance with the provided scope @@ -45,6 +46,7 @@ private FrameworkOps(Scope scope) { this.scope = scope; nn = new NnOps(this); sets = new SetsOps(this); + math = new MathOps(this); } /** @@ -57,6 +59,7 @@ private FrameworkOps(Ops coreOps) { this.scope = coreOps.scope(); nn = new NnOps(this); sets = new SetsOps(this); + math = new MathOps(this); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java new file mode 100644 index 00000000000..57a18fc63c2 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -0,0 +1,67 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Maximum; +import org.tensorflow.op.math.Mul; +import org.tensorflow.op.math.Rsqrt; +import org.tensorflow.op.math.Square; +import org.tensorflow.types.family.TNumber; + +import static org.tensorflow.framework.utils.CastHelper.cast; + +public class MathOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + MathOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Normalizes along dimension axis using an L2 norm. + * + * @param x the input + * @param axis Dimension along which to normalize. + * @param the data type for the input and the result + * @return the normalized values based on L2 norm + */ + public Operand l2Normalize(Operand x, int[] axis) { + Operand squareSum = + ReduceSum.create(scope, + Square.create(scope, x), + Constant.vectorOf(scope, axis), + ReduceSum.keepDims(Boolean.TRUE)); + Operand invNorm = + Rsqrt.create(scope, + Maximum.create(scope, squareSum, + Cast.create(scope, + Constant.scalarOf(scope, 1e-12F), x.type()))); + return Mul.create(scope, x, invNorm); + } +} From bd072f4c56b05c007e91ffe68a025b6a0cf03f77 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 18:50:26 -0400 Subject: [PATCH 05/60] Reformat code, fix javadocs --- .../tensorflow/framework/op/FrameworkOps.java | 76 +++-- .../org/tensorflow/framework/op/MathOps.java | 68 ++-- .../org/tensorflow/framework/op/NnOps.java | 312 +++++++++--------- .../op/nn/SigmoidCrossEntropyWithLogits.java | 14 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 3 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 52 +-- 6 files changed, 271 insertions(+), 254 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index 18fb8ada6b7..c8b234f2c51 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -30,11 +30,10 @@ */ public class FrameworkOps { public final Ops coreOps; - private final Scope scope; - public final NnOps nn; public final SetsOps sets; public final MathOps math; + private final Scope scope; /** * Creates a FrameworkOps instance with the provided scope @@ -62,8 +61,43 @@ private FrameworkOps(Ops coreOps) { math = new MathOps(this); } + /** + * Creates an API for building operations in the provided execution environment + * + * @param env the exection environment + * @return the FrameworkOps + */ + public static FrameworkOps create(ExecutionEnvironment env) { + return new FrameworkOps(new Scope(env)); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + *

Invoking this method is equivalent to {@code + * FrameworkOps.create(EagerSession.getDefault())}. + * + * @return the FrameworkOps + */ + public static FrameworkOps create() { + return new FrameworkOps(new Scope(EagerSession.getDefault())); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + * @param coreOps the TensorFlow core Ops + * @return the FrameworkOps + */ + public static FrameworkOps create(Ops coreOps) { + return new FrameworkOps(coreOps); + } - /** Returns the current {@link Scope scope} of this API */ + /** + * Returns the current {@link Scope scope} of this API + * + * @return the current {@link Scope scope} of this API + */ public final Scope scope() { return scope; } @@ -81,6 +115,9 @@ public final Ops coreOps() { * Returns an API that builds operations with the provided name prefix. * *

@link Scope#withSubScope(String)} + * + * @param childScopeName the name of the child scope + * @return the FrameworkOps */ public FrameworkOps withSubScope(String childScopeName) { return new FrameworkOps(scope.withSubScope(childScopeName)); @@ -90,6 +127,9 @@ public FrameworkOps withSubScope(String childScopeName) { * Returns an API that uses the provided name for an op. * *

{@link Scope#withName(String)} + * + * @param opName the name of the scope + * @return the FrameworkOps */ public FrameworkOps withName(String opName) { return new FrameworkOps(scope.withName(opName)); @@ -99,6 +139,9 @@ public FrameworkOps withName(String opName) { * Returns an API that places the created operations on the device(s) matching the provided spec. * *

{@link Scope#withDevice(DeviceSpec)} + * + * @param deviceSpec the device specification for the scope + * @return the FrameworkOps */ public FrameworkOps withDevice(DeviceSpec deviceSpec) { return new FrameworkOps(scope.withDevice(deviceSpec)); @@ -108,32 +151,11 @@ public FrameworkOps withDevice(DeviceSpec deviceSpec) { * Returns an API that adds operations to the graph with the provided control dependencies. * *

{@link Scope#withControlDependencies(Iterable)} + * + * @param controls the operations + * @return the FrameworkOps */ public FrameworkOps withControlDependencies(Iterable controls) { return new FrameworkOps(scope.withControlDependencies(controls)); } - - /** Creates an API for building operations in the provided execution environment */ - public static FrameworkOps create(ExecutionEnvironment env) { - return new FrameworkOps(new Scope(env)); - } - - /** - * Creates an API for building operations in the default eager execution environment - * - *

Invoking this method is equivalent to {@code - * FrameworkOps.create(EagerSession.getDefault())}. - */ - public static FrameworkOps create() { - return new FrameworkOps(new Scope(EagerSession.getDefault())); - } - - /** - * Creates an API for building operations in the default eager execution environment - * - * @param coreOps the TensorFlow core Ops - */ - public static FrameworkOps create(Ops coreOps) { - return new FrameworkOps(coreOps); - } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 57a18fc63c2..5208cde98f3 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -15,7 +15,6 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; -import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.ReduceSum; @@ -26,42 +25,41 @@ import org.tensorflow.op.math.Square; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - public class MathOps { - private final Scope scope; + private final Scope scope; - private final FrameworkOps frameworkOps; + private final FrameworkOps frameworkOps; - /** - * Creates Framework {@code nn} Operations - * - * @param frameworkOps the TensorFLow framework Ops - */ - MathOps(FrameworkOps frameworkOps) { - this.scope = frameworkOps.scope(); - this.frameworkOps = frameworkOps; - } + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + MathOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } - /** - * Normalizes along dimension axis using an L2 norm. - * - * @param x the input - * @param axis Dimension along which to normalize. - * @param the data type for the input and the result - * @return the normalized values based on L2 norm - */ - public Operand l2Normalize(Operand x, int[] axis) { - Operand squareSum = - ReduceSum.create(scope, - Square.create(scope, x), - Constant.vectorOf(scope, axis), - ReduceSum.keepDims(Boolean.TRUE)); - Operand invNorm = - Rsqrt.create(scope, - Maximum.create(scope, squareSum, - Cast.create(scope, - Constant.scalarOf(scope, 1e-12F), x.type()))); - return Mul.create(scope, x, invNorm); - } + /** + * Normalizes along dimension axis using an L2 norm. + * + * @param x the input + * @param axis Dimension along which to normalize. + * @param the data type for the input and the result + * @return the normalized values based on L2 norm + */ + public Operand l2Normalize(Operand x, int[] axis) { + Operand squareSum = + ReduceSum.create( + scope, + Square.create(scope, x), + Constant.vectorOf(scope, axis), + ReduceSum.keepDims(Boolean.TRUE)); + Operand invNorm = + Rsqrt.create( + scope, + Maximum.create( + scope, squareSum, Cast.create(scope, Constant.scalarOf(scope, 1e-12F), x.type()))); + return Mul.create(scope, x, invNorm); + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java index 4054f3ddbb5..0fea3743d95 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -32,166 +32,164 @@ *

{@link FrameworkOps} */ public class NnOps { - private final Scope scope; + private final Scope scope; - private final FrameworkOps frameworkOps; + private final FrameworkOps frameworkOps; - /** - * Creates Framework {@code nn} Operations - * @param frameworkOps the TensorFLow framework Ops - */ - NnOps(FrameworkOps frameworkOps) { - this.scope = frameworkOps.scope(); - this.frameworkOps = frameworkOps; - } + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + NnOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } - /** - * Computes sigmoid cross entropy given logits. - * - *

Measures the probability error in discrete classification tasks in which each class is - * independent and not mutually exclusive. For instance, one could perform multilabel - * classification where a picture can contain both an elephant and a dog at the same time. - * - *

For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is - * - *

-     *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
-     *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
-     *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
-     *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
-     *   = (1 - z) * x + log(1 + exp(-x))
-     *   = x - x * z + log(1 + exp(-x))
-     *  
- * - *

For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - *

-     *  x - x * z + log(1 + exp(-x))
-     *   = log(exp(x)) - x * z + log(1 + exp(-x))
-     *   = - x * z + log(1 + exp(x))
-     *  
- * - *

Hence, to ensure stability and avoid overflow, the implementation uses this equivalent - * formulation - * - *

-     *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
-     *  
- * - *

logits and labels must have the same type and shape. - * - *

- * - * @param labels the labels - * @param logits the logits of type float32 or float64 - * @param the type of labels and logits - * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape - */ - public Operand sigmoidCrossEntropyWithLogits(Operand labels, - Operand logits) { - return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); - } - - /** - * Computes softmax cross entropy between logits and labels. - * - *

Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

NOTE: - * - *

While the classes are mutually exclusive, their probabilities need not be. All that is - * required is that each row of labels is a valid probability distribution. If they - * are not, the computation of the gradient will be incorrect. - * - *

If using exclusive labels (wherein one and only one class is true at a time), - * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} - * - *

Usage: - * - *

-     *    Operand<TFloat32> logits =
-     *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
-     *    Operand<TFloat32> labels =
-     *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
-     *    Operand<TFloat32> output =
-     *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
-     *    // output Shape = [2]
-     *    // dataType = FLOAT (1)
-     *    // values { 0.169846, 0.824745 }
-     *  
- * - *

Backpropagation will happen into both logits and labels. To - * disallow backpropagation into labels, pass label tensors through - * tf.stopGradient before feeding it to this function. - * - * @param labels Each vector along the class dimension should hold a valid probability - * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] - * , each row of labels[i] must be a valid probability distribution. - * @param logits Per-label activations, typically a linear output. These activation energies are - * interpreted as unnormalized log probabilities. - * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands - * @return the softmax cross entropy loss. Its type is the same as logits and its - * shape is the same as labels except that it does not have the last dimension of - * labels. - */ - public Operand softmaxCrossEntropyWithLogits( - Operand labels, Operand logits, int axis) { - return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); - } - - /** - * Computes sparse softmax cross entropy between logits and labels. - * - *

Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

NOTE: - * - *

For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link - * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. - * - *

WARNING: - * - *

This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, - * as it will produce incorrect results. - * - *

A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. - * - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log - * probabilities. - * @param The data type for the labels - * @param The data type for the logits and loss - * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank - * of the labels is not equal to the rank of the logits minus one. - */ - - public Operand sparseSoftmaxCrossEntropyWithLogits( - Operand labels, Operand logits) { - return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); - } + /** + * Computes sigmoid cross entropy given {@code logits}. + * + *

Measures the probability error in discrete classification tasks in which each class is + * independent and not mutually exclusive. For instance, one could perform multilabel + * classification where a picture can contain both an elephant and a dog at the same time. + * + *

For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in pseudo-code is + * + *

+   *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
+   *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
+   *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
+   *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
+   *   = (1 - z) * x + log(1 + exp(-x))
+   *   = x - x * z + log(1 + exp(-x))
+   *  
+ * + *

For {@code x < 0}, to avoid overflow in {@code exp(-x)}, we reformulate the above + * + *

+   *  x - x * z + log(1 + exp(-x))
+   *   = log(exp(x)) - x * z + log(1 + exp(-x))
+   *   = - x * z + log(1 + exp(x))
+   *  
+ * + *

Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * formulation + * + *

+   *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
+   *  
+ * + *

{@code logits} and {@code labels} must have the same type and shape. + * + *

+ * + * @param labels the labels + * @param logits the logits of type float32 or float64 + * @param the type of labels and logits + * @return the component-wise logistic losses. + * @throws IllegalArgumentException if logits' and labels' do not have the same shape + */ + public Operand sigmoidCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); + } + /** + * Computes softmax cross entropy between {@code logits} and {@code labels}. + * + *

Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

NOTE: + * + *

While the classes are mutually exclusive, their probabilities need not be. All that is + * required is that each row of {@code labels} is a valid probability distribution. If they are + * not, the computation of the gradient will be incorrect. + * + *

If using exclusive {@code labels} (wherein one and only one class is true at a time), see + * {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} + * + *

Usage: + * + *

+   *    Operand<TFloat32> logits =
+   *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
+   *    Operand<TFloat32> labels =
+   *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
+   *    Operand<TFloat32> output =
+   *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
+   *    // output Shape = [2]
+   *    // dataType = FLOAT (1)
+   *    // values { 0.169846, 0.824745 }
+   *  
+ * + *

Backpropagation will happen into both {@code logits} and {@code labels}. To disallow + * backpropagation into {@code labels}, pass label tensors through {@code tf.stopGradient} before + * feeding it to this function. + * + * @param labels Each vector along the class dimension should hold a valid probability + * distribution e.g. for the case in which labels are of shape {@code [batch_size, + * num_classes] }, each row of {@code labels[i]} must be a valid probability distribution. + * @param logits Per-label activations, typically a linear output. These activation energies are + * interpreted as unnormalized log probabilities. + * @param axis The class dimension. -1 is the last dimension. + * @param the number type of the operands + * @param the data type for the labels. + * @return the softmax cross entropy loss. Its type is the same as {@code logits} and its shape is + * the same as {@code labels} except that it does not have the last dimension of {@code + * labels}. + * + */ + public Operand softmaxCrossEntropyWithLogits( + Operand labels, Operand logits, int axis) { + return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + } + /** + * Computes sparse softmax cross entropy between {@code logits} and {@code labels}. + * + *

Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

NOTE: + * + *

For this operation, the probability of a given label is considered exclusive. That is, soft + * classes are not allowed, and the {@code labels} vector must provide a single specific index for + * the true class for each row of {@code logits} (each minibatch entry). For soft softmax + * classification with a probability distribution for each entry, {@link + * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. + * + *

WARNING: + * + *

This op expects unscaled logits, since it performs a {@code softmax} on {@code logits } + * internally for efficiency. Do not call this op with the output of {@code softmax}, as it will + * produce incorrect results. + * + *

A common use case is to have logits of shape {@code [batchSize, numClasses]} and have labels + * of shape {@code [batchSize]}, but higher dimensions are supported, in which case the {@code + * dim}-th dimension is assumed to be of size {@code numClasses}. {@code logits} must have the + * {@code dataType} of {@code TFloat16}, {@code TFloat32} , or {@code TFloat64}, and {@code + * labels} must have the dtype of {@code TInt32} or {@code TInt64}. + * + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r } is + * rank of {@code labels} and result) and the dataType is {@code TInt32} or {@code TInt64}. + * Each entry in {@code labels} must be an index in {@code [0, numClasses)}. Other values will + * raise an exception when this op is run on CPU, and return {@code NaN} for corresponding + * loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code + * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. + * @param The data type for the labels + * @param The data type for the logits and loss + * @return the loss + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank + * of the labels is not equal to the rank of the logits minus one. + */ + public Operand sparseSoftmaxCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits( + scope, labels, logits); + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index b55385839d3..fc3f7739363 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -3,8 +3,6 @@ import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; import org.tensorflow.op.core.Select; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; @@ -18,17 +16,17 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -//@Operator(group = "nn") +// @Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** - * Computes sigmoid cross entropy given logits. + * Computes sigmoid cross entropy given {@code logits}. * *

Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - *

For brevity, let x = logits, z = labels. The logistic loss in + *

For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in * pseudo-code is * *

@@ -40,7 +38,7 @@ public class SigmoidCrossEntropyWithLogits {
    *  = x - x * z + log(1 + exp(-x))
    * 
* - *

For x < 0, to avoid overflow in exp(-x), we reformulate the above + *

For {@code x < 0}, to avoid overflow in {@code exp(-x)}, we reformulate the above * *

    * x - x * z + log(1 + exp(-x))
@@ -55,7 +53,7 @@ public class SigmoidCrossEntropyWithLogits {
    *   max(x, 0) - x * z + log(1 + exp(-abs(x)))
    * 
* - *

logits and labels must have the same type and shape. + *

{@code logits} and {@code labels} must have the same type and shape. * *

* @@ -66,7 +64,7 @@ public class SigmoidCrossEntropyWithLogits { * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape */ - //@Endpoint(name = "sigmoidCrossEntropyWithLogits") + // @Endpoint(name = "sigmoidCrossEntropyWithLogits") public static Operand sigmoidCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { if (!isCompatible(labels.shape(), logits.shape())) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index 0f5b8197f1e..7d59941f27a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -66,7 +66,8 @@ public class SoftmaxCrossEntropyWithLogits { * @param logits Per-label activations, typically a linear output. These activation energies are * interpreted as unnormalized log probabilities. * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands + * @param the data type for the logits and return operand + * @param the data type for the labels * @return the softmax cross entropy loss. Its type is the same as logits and its * shape is the same as labels except that it does not have the last dimension of * labels. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 75766cf9bfb..0b2d29d6092 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -25,7 +25,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { /** - * Computes sparse softmax cross entropy between logits and labels. + * Computes sparse softmax cross entropy between {@code logits} and {@code labels}. * *

Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is @@ -34,45 +34,45 @@ public class SparseSoftmaxCrossEntropyWithLogits { *

NOTE: * *

For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft + * classes are not allowed, and the {@code labels} vector must provide a single specific + * index for the true class for each row of {@code logits} (each minibatch entry). For soft * softmax classification with a probability distribution for each entry, {@link * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. * *

WARNING: * - *

This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, + *

This op expects unscaled logits, since it performs a {@code softmax} on {@code logits + * } internally for efficiency. Do not call this op with the output of {@code softmax}, * as it will produce incorrect results. * - *

A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. + *

A common use case is to have logits of shape {@code [batchSize, numClasses]} and have + * labels of shape {@code [batchSize]}, but higher dimensions are supported, in which case + * the {@code dim}-th dimension is assumed to be of size {@code numClasses}. {@code + * logits} must have the {@code dataType} of {@code TFloat16}, {@code TFloat32} + * , or {@code TFloat64}, and {@code labels} must have the dtype of {@code TInt32} + * or {@code TInt64}. * * @param scope current scope - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r + * } is rank of {@code labels} and result) and the dataType is {@code TInt32} + * or {@code TInt64}. Each entry in {@code labels} must be an index in {@code [0, + * numClasses)}. Other values will raise an exception when this op is run on CPU, and + * return {@code NaN} for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, + * or {@code TFloat64}. These activation energies are interpreted as unnormalized log * probabilities. - * @param the data type for the labels - * @param the data tyoe for the loss and logits. + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank * of the labels is not equal to the rank of the logits minus one. */ @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") public static - Operand sparseSoftmaxCrossEntropyWithLogits( - Scope scope, Operand labels, Operand logits) { + Operand sparseSoftmaxCrossEntropyWithLogits( + Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { @@ -119,7 +119,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } @@ -160,7 +160,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } } From d29262b5a3169b6ac7f58890661138910ef6ac4f Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 16 Apr 2021 18:04:30 -0400 Subject: [PATCH 06/60] Add confusionMatrix() method. add Unit test --- .../org/tensorflow/framework/op/MathOps.java | 301 +++++++++++++ .../tensorflow/framework/op/MathOpsTest.java | 413 ++++++++++++++++++ 2 files changed, 714 insertions(+) create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 5208cde98f3..36f5b692cab 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -15,16 +15,37 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.LossTuple; +import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; +import org.tensorflow.op.core.AssertThat; import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Identity; +import org.tensorflow.op.core.OnesLike; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.ReduceAll; +import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.core.ScatterNd; +import org.tensorflow.op.core.Squeeze; +import org.tensorflow.op.core.Stack; +import org.tensorflow.op.core.Zeros; import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.Less; import org.tensorflow.op.math.Maximum; import org.tensorflow.op.math.Mul; import org.tensorflow.op.math.Rsqrt; import org.tensorflow.op.math.Square; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; +import java.util.Arrays; +import java.util.Collections; + public class MathOps { private final Scope scope; @@ -62,4 +83,284 @@ public Operand l2Normalize(Operand x, int[] axis) { scope, squareSum, Cast.create(scope, Constant.scalarOf(scope, 1e-12F), x.type()))); return Mul.create(scope, x, invNorm); } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

For example: + * + *

+   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
+   *         [[0 0 0 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 0 0 0]
+   *          [0 0 0 0 1]]
+   * 
+ * + *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix(Operand labels, Operand predictions) { + return confusionMatrix(labels, predictions, null, null, labels.type()); + } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

For example: + * + *

+   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
+   *         [[0 0 0 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 0 0 0]
+   *          [0 0 0 0 1]]
+   * 
+ * + *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param weights An optional Operand whose shape matches {@code predictions}. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix( + Operand labels, Operand predictions, Operand weights) { + return confusionMatrix(labels, predictions, weights, null, labels.type()); + } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

For example: + * + *

+   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
+   *         [[0 0 0 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 0 0 0]
+   *          [0 0 0 0 1]]
+   * 
+ * + *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param weights An optional Operand whose shape matches {@code predictions}. + * @param numClasses The possible number of labels the classification task can have. If this value + * is null, it will be calculated using both predictions and labels. + * @param type Data type of the confusion matrix. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix( + Operand labels, + Operand predictions, + Operand weights, + Operand numClasses, + Class type) { + Scope lScope = scope.withSubScope("confusionMatrix"); + LossTuple tuple = removeSqueezableDimensions(labels, predictions, 0); + Operand lLabels = Cast.create(lScope, tuple.getLabels(), TInt64.class); + Operand lPredictions = Cast.create(lScope, tuple.getTarget(), TInt64.class); + + Operand zero = Constant.scalarOf(lScope, 0L); + Operand one = Constant.scalarOf(lScope, 1L); + + AssertThat labelsNonNegative = + AssertThat.create( + lScope, + ReduceAll.create(lScope, GreaterEqual.create(lScope, lLabels, zero), allAxes(lLabels)), + Collections.singletonList( + Constant.scalarOf(lScope, "labels contains negative values"))); + lLabels = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(labelsNonNegative)), lLabels); + + AssertThat predictionsNonNegative = + AssertThat.create( + lScope, + ReduceAll.create( + lScope, GreaterEqual.create(lScope, lPredictions, zero), allAxes(lPredictions)), + Collections.singletonList( + Constant.scalarOf(lScope, "predictions contains negative values"))); + lPredictions = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(predictionsNonNegative)), + lPredictions); + + Operand lNumClasses; + if (numClasses == null) { + lNumClasses = + Add.create( + lScope, + Maximum.create( + lScope, + ReduceMax.create(lScope, lPredictions, zero), + ReduceMax.create(lScope, lLabels, zero)), + one); + } else { + lNumClasses = Cast.create(lScope, numClasses, TInt64.class); + AssertThat labelsLess = + AssertThat.create( + lScope, + Less.create(lScope, lLabels, lNumClasses), + Collections.singletonList(Constant.scalarOf(lScope, "labels out of bounds"))); + lLabels = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(labelsLess)), lLabels); + + AssertThat predictionsLess = + AssertThat.create( + lScope, + Less.create(lScope, lPredictions, lNumClasses), + Collections.singletonList(Constant.scalarOf(lScope, "predictions out of bounds"))); + lPredictions = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(predictionsLess)), + lPredictions); + } + + if (weights != null) { + if (!predictions.shape().isCompatibleWith(weights.shape())) { + throw new IllegalArgumentException( + String.format( + "predictions.shape() [%s], is not compatible with weights.shape() [ %s].", + predictions.shape(), weights.shape())); + } + } + + Operand shape = Stack.create(lScope, Arrays.asList(lNumClasses, lNumClasses)); + Operand indices = + Stack.create(lScope, Arrays.asList(lLabels, lPredictions), Stack.axis(1L)); + Operand values = weights == null ? OnesLike.create(lScope, predictions) : weights; + Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), type); + + return ScatterNd.create(lScope, indices, values, shape); + } + + /** + * Squeeze last dim if ranks differ from expected by exactly 1. + * + * @param labels Label values, a Operand whose dimensions match predictions + * . + * @param predictions Predicted values, a Tensor of arbitrary dimensions. + * @param expectedRankDiff Expected result of rank(predictions) - rank(labels). + * @param the data type for the labels, predictions and result + * @return labels and predictions, possibly with last dim squeezed. + */ + public LossTuple removeSqueezableDimensions( + Operand labels, Operand predictions, int expectedRankDiff) { + Scope lScope = scope.withSubScope("removeSqueezableDimensions"); + Shape predictionsShape = predictions.shape(); + int predictionsRank = predictionsShape.numDimensions(); + Shape labelsShape = labels.shape(); + int labelsRank = labelsShape.numDimensions(); + + if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { + // Use static rank. + int rankDiff = predictionsRank - labelsRank; + if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { + predictions = Squeeze.create(lScope, predictions); + } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.size(-1), 1)) { + labels = Squeeze.create(lScope, labels); + } + return new LossTuple<>(labels, predictions); + } + // Use dynamic rank. + + // TODO: hold for lazy select feature, + // Operand rankDiff = tf.math.sub(tf.rank(predictions), tf.rank(labels)); + if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.size(-1), 1)) { + /* + * TODO, if we ever get a select that does lazy evaluation, but for now do the tf.squeeze + * predictions = tf.select( tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), + * tf.squeeze(predictions, Squeeze.axis(Arrays.asList(-1L))), predictions ); * + */ + predictions = + Squeeze.create(lScope, predictions, Squeeze.axis(Collections.singletonList(-1L))); + } + if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.size(-1), 1)) { + /* + * TODO, if we ever get a select that does lazy evaluation labels = tf.select( + * tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), tf.squeeze(labels, + * Squeeze.axis(Arrays.asList(-1L))), predictions ); * + */ + labels = Squeeze.create(lScope, labels, Squeeze.axis(Collections.singletonList(-1L))); + } + return new LossTuple<>(labels, predictions); + } + + public Operand allAxes(Operand op) { + int rank = op.shape().numDimensions(); + if (rank != Shape.UNKNOWN_SIZE) { + int[] axes = new int[rank]; + for (int i = 0; i < rank; i++) { + axes[i] = i; + } + return Constant.vectorOf(scope, axes); + } else { + return Range.create( + scope, Constant.scalarOf(scope, 0), Rank.create(scope, op), Constant.scalarOf(scope, 1)); + } + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java new file mode 100644 index 00000000000..326e3cdc2d1 --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java @@ -0,0 +1,413 @@ +package org.tensorflow.framework.op; + +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt64; + +class MathOpsTest { + + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + double[][][] array = + new double[][][] { + { + {4.17021990e-01, 7.20324516e-01, 1.14374816e-04}, + {3.02332580e-01, 1.46755889e-01, 9.23385918e-02}, + {1.86260208e-01, 3.45560730e-01, 3.96767467e-01}, + {5.38816750e-01, 4.19194520e-01, 6.85219526e-01}, + {2.04452246e-01, 8.78117442e-01, 2.73875929e-02}, + {6.70467496e-01, 4.17304814e-01, 5.58689833e-01}, + {1.40386939e-01, 1.98101491e-01, 8.00744593e-01} + }, + { + {9.68261600e-01, 3.13424170e-01, 6.92322612e-01}, + {8.76389146e-01, 8.94606650e-01, 8.50442126e-02}, + {3.90547849e-02, 1.69830427e-01, 8.78142476e-01}, + {9.83468369e-02, 4.21107620e-01, 9.57889557e-01}, + {5.33165276e-01, 6.91877127e-01, 3.15515637e-01}, + {6.86500907e-01, 8.34625661e-01, 1.82882771e-02}, + {7.50144303e-01, 9.88861084e-01, 7.48165667e-01} + }, + { + {2.80443996e-01, 7.89279342e-01, 1.03226006e-01}, + {4.47893530e-01, 9.08595502e-01, 2.93614149e-01}, + {2.87775338e-01, 1.30028576e-01, 1.93669572e-02}, + {6.78835511e-01, 2.11628109e-01, 2.65546650e-01}, + {4.91573155e-01, 5.33625446e-02, 5.74117601e-01}, + {1.46728575e-01, 5.89305520e-01, 6.99758351e-01}, + {1.02334432e-01, 4.14055973e-01, 6.94400132e-01} + }, + { + {4.14179265e-01, 4.99534607e-02, 5.35896420e-01}, + {6.63794637e-01, 5.14889121e-01, 9.44594741e-01}, + {5.86555064e-01, 9.03401911e-01, 1.37474701e-01}, + {1.39276341e-01, 8.07391286e-01, 3.97676826e-01}, + {1.65354192e-01, 9.27508593e-01, 3.47765863e-01}, + {7.50812113e-01, 7.25997984e-01, 8.83306086e-01}, + {6.23672187e-01, 7.50942409e-01, 3.48898351e-01} + }, + { + {2.69927889e-01, 8.95886242e-01, 4.28091198e-01}, + {9.64840055e-01, 6.63441479e-01, 6.21695697e-01}, + {1.14745975e-01, 9.49489236e-01, 4.49912131e-01}, + {5.78389585e-01, 4.08136815e-01, 2.37026975e-01}, + {9.03379500e-01, 5.73679507e-01, 2.87032709e-03}, + {6.17144942e-01, 3.26644897e-01, 5.27058125e-01}, + {8.85942101e-01, 3.57269764e-01, 9.08535123e-01} + }, + { + {6.23360097e-01, 1.58212427e-02, 9.29437220e-01}, + {6.90896928e-01, 9.97322857e-01, 1.72340512e-01}, + {1.37135744e-01, 9.32595491e-01, 6.96818173e-01}, + {6.60001710e-02, 7.55463064e-01, 7.53876209e-01}, + {9.23024535e-01, 7.11524785e-01, 1.24270961e-01}, + {1.98801346e-02, 2.62109861e-02, 2.83064879e-02}, + {2.46211067e-01, 8.60027969e-01, 5.38831055e-01} + }, + { + {5.52821994e-01, 8.42030883e-01, 1.24173313e-01}, + {2.79183686e-01, 5.85759282e-01, 9.69595730e-01}, + {5.61030209e-01, 1.86472889e-02, 8.00632656e-01}, + {2.32974276e-01, 8.07105184e-01, 3.87860656e-01}, + {8.63541842e-01, 7.47121632e-01, 5.56240261e-01}, + {1.36455223e-01, 5.99176884e-02, 1.21343456e-01}, + {4.45518792e-02, 1.07494131e-01, 2.25709334e-01} + }, + { + {7.12988973e-01, 5.59717000e-01, 1.25559801e-02}, + {7.19742775e-02, 9.67276335e-01, 5.68100452e-01}, + {2.03293234e-01, 2.52325743e-01, 7.43825853e-01}, + {1.95429474e-01, 5.81358910e-01, 9.70019996e-01}, + {8.46828818e-01, 2.39847764e-01, 4.93769705e-01}, + {6.19955719e-01, 8.28980923e-01, 1.56791389e-01}, + {1.85762029e-02, 7.00221434e-02, 4.86345112e-01} + }, + { + {6.06329441e-01, 5.68851411e-01, 3.17362398e-01}, + {9.88616168e-01, 5.79745233e-01, 3.80141169e-01}, + {5.50948203e-01, 7.45334446e-01, 6.69232905e-01}, + {2.64919549e-01, 6.63348362e-02, 3.70084196e-01}, + {6.29717529e-01, 2.10174009e-01, 7.52755582e-01}, + {6.65364787e-02, 2.60315090e-01, 8.04754555e-01}, + {1.93434283e-01, 6.39460862e-01, 5.24670303e-01} + }, + { + {9.24807966e-01, 2.63296783e-01, 6.59610927e-02}, + {7.35065937e-01, 7.72178054e-01, 9.07815874e-01}, + {9.31972086e-01, 1.39515726e-02, 2.34362081e-01}, + {6.16778374e-01, 9.49016333e-01, 9.50176120e-01}, + {5.56653202e-01, 9.15606380e-01, 6.41566217e-01}, + {3.90007704e-01, 4.85990673e-01, 6.04310513e-01}, + {5.49547911e-01, 9.26181436e-01, 9.18733418e-01} + }, + { + {3.94875616e-01, 9.63262558e-01, 1.73955664e-01}, + {1.26329526e-01, 1.35079160e-01, 5.05662143e-01}, + {2.15248056e-02, 9.47970212e-01, 8.27115476e-01}, + {1.50189810e-02, 1.76196262e-01, 3.32063586e-01}, + {1.30996838e-01, 8.09490681e-01, 3.44736665e-01}, + {9.40107465e-01, 5.82014203e-01, 8.78831983e-01}, + {8.44734430e-01, 9.05392289e-01, 4.59880263e-01} + }, + { + {5.46346843e-01, 7.98603594e-01, 2.85718858e-01}, + {4.90253508e-01, 5.99110305e-01, 1.55332759e-02}, + {5.93481421e-01, 4.33676362e-01, 8.07360530e-01}, + {3.15244794e-01, 8.92888725e-01, 5.77857196e-01}, + {1.84010208e-01, 7.87929237e-01, 6.12031162e-01}, + {5.39092720e-02, 4.20193672e-01, 6.79068863e-01}, + {9.18601751e-01, 4.02024889e-04, 9.76759136e-01} + }, + { + {3.76580328e-01, 9.73783553e-01, 6.04716122e-01}, + {8.28845799e-01, 5.74711502e-01, 6.28076196e-01}, + {2.85576284e-01, 5.86833358e-01, 7.50021756e-01}, + {8.58313859e-01, 7.55082190e-01, 6.98057234e-01}, + {8.64479423e-01, 3.22681010e-01, 6.70788765e-01}, + {4.50873941e-01, 3.82102758e-01, 4.10811365e-01}, + {4.01479572e-01, 3.17383945e-01, 6.21919394e-01} + }, + { + {4.30247277e-01, 9.73802090e-01, 6.77800894e-01}, + {1.98569894e-01, 4.26701009e-01, 3.43346238e-01}, + {7.97638834e-01, 8.79998267e-01, 9.03841972e-01}, + {6.62719786e-01, 2.70208269e-01, 2.52366692e-01}, + {8.54897916e-01, 5.27714670e-01, 8.02161098e-01}, + {5.72488546e-01, 7.33142555e-01, 5.19011617e-01}, + {7.70883918e-01, 5.68857968e-01, 4.65709865e-01} + }, + { + {3.42688918e-01, 6.82093501e-02, 3.77924174e-01}, + {7.96260759e-02, 9.82817113e-01, 1.81612849e-01}, + {8.11858714e-01, 8.74961674e-01, 6.88413262e-01}, + {5.69494426e-01, 1.60971433e-01, 4.66880023e-01}, + {3.45172048e-01, 2.25039959e-01, 5.92511892e-01}, + {3.12269837e-01, 9.16305542e-01, 9.09635544e-01}, + {2.57118285e-01, 1.10891297e-01, 1.92962736e-01} + }, + { + {4.99584168e-01, 7.28585660e-01, 2.08194435e-01}, + {2.48033553e-01, 8.51671875e-01, 4.15848732e-01}, + {6.16685092e-01, 2.33666137e-01, 1.01967260e-01}, + {5.15857041e-01, 4.77140993e-01, 1.52671650e-01}, + {6.21806204e-01, 5.44010103e-01, 6.54137373e-01}, + {1.44545540e-01, 7.51527846e-01, 2.22049147e-01}, + {5.19351840e-01, 7.85296023e-01, 2.23304275e-02} + }, + { + {3.24362457e-01, 8.72922361e-01, 8.44709635e-01}, + {5.38440585e-01, 8.66608262e-01, 9.49805975e-01}, + {8.26407015e-01, 8.54115427e-01, 9.87434015e-02}, + {6.51304305e-01, 7.03516960e-01, 6.10240817e-01}, + {7.99615264e-01, 3.45712192e-02, 7.70238757e-01}, + {7.31728613e-01, 2.59698391e-01, 2.57069290e-01}, + {6.32303298e-01, 3.45297456e-01, 7.96588659e-01} + }, + { + {4.46146220e-01, 7.82749414e-01, 9.90471780e-01}, + {3.00248325e-01, 1.43005833e-01, 9.01308417e-01}, + {5.41559398e-01, 9.74740386e-01, 6.36604428e-01}, + {9.93912995e-01, 5.46070814e-01, 5.26425958e-01}, + {1.35427907e-01, 3.55705172e-01, 2.62185670e-02}, + {1.60395175e-01, 7.45637178e-01, 3.03996895e-02}, + {3.66543084e-01, 8.62346232e-01, 6.92677736e-01} + }, + { + {6.90942168e-01, 1.88636795e-01, 4.41904277e-01}, + {5.81577420e-01, 9.89751697e-01, 2.03906223e-01}, + {2.47732908e-01, 2.62173086e-01, 7.50172436e-01}, + {4.56975341e-01, 5.69294393e-02, 5.08516252e-01}, + {2.11960167e-01, 7.98604250e-01, 2.97331393e-01}, + {2.76060123e-02, 5.93432426e-01, 8.43840420e-01}, + {3.81016135e-01, 7.49858320e-01, 5.11141479e-01} + }, + { + {5.40951788e-01, 9.59434330e-01, 8.03960919e-01}, + {3.23230661e-02, 7.09387243e-01, 4.65001494e-01}, + {9.47548926e-01, 2.21432731e-01, 2.67072022e-01}, + {8.14739615e-02, 4.28618819e-01, 1.09018765e-01}, + {6.33786738e-01, 8.02963257e-01, 6.96800470e-01}, + {7.66211390e-01, 3.42454106e-01, 8.45851481e-01}, + {4.28768784e-01, 8.24009895e-01, 6.26496136e-01} + } + }; + + double[][][] expectedArray = { + { + {3.45350616e-02, 5.96526116e-02, 9.47178160e-06}, + {2.50372272e-02, 1.21533722e-02, 7.64688430e-03}, + {1.54248644e-02, 2.86171008e-02, 3.28577124e-02}, + {4.46213149e-02, 3.47149745e-02, 5.67454435e-02}, + {1.69314109e-02, 7.27199987e-02, 2.26806314e-03}, + {5.55237755e-02, 3.45584825e-02, 4.62670736e-02}, + {1.16259372e-02, 1.64054818e-02, 6.63124844e-02} + }, + { + {8.01851526e-02, 2.59557609e-02, 5.73336743e-02}, + {7.25768730e-02, 7.40855262e-02, 7.04281079e-03}, + {3.23426444e-03, 1.40642561e-02, 7.27220699e-02}, + {8.14444851e-03, 3.48734073e-02, 7.93262124e-02}, + {4.41532955e-02, 5.72967827e-02, 2.61289626e-02}, + {5.68515584e-02, 6.91182911e-02, 1.51451665e-03}, + {6.21220917e-02, 8.18910673e-02, 6.19582348e-02} + }, + { + {2.32245550e-02, 6.53630048e-02, 8.54850933e-03}, + {3.70916426e-02, 7.52439946e-02, 2.43152231e-02}, + {2.38316897e-02, 1.07681248e-02, 1.60384597e-03}, + {5.62167615e-02, 1.75256692e-02, 2.19908543e-02}, + {4.07089069e-02, 4.41914052e-03, 4.75447029e-02}, + {1.21511100e-02, 4.88024652e-02, 5.79494536e-02}, + {8.47467501e-03, 3.42894346e-02, 5.75057231e-02} + }, + { + {3.42996456e-02, 4.13682219e-03, 4.43794727e-02}, + {5.49711734e-02, 4.26397808e-02, 7.82252178e-02}, + {4.85746935e-02, 7.48138949e-02, 1.13847647e-02}, + {1.15339644e-02, 6.68629184e-02, 3.29330191e-02}, + {1.36935636e-02, 7.68102556e-02, 2.87997164e-02}, + {6.21773973e-02, 6.01224527e-02, 7.31496885e-02}, + {5.16484901e-02, 6.21881858e-02, 2.88935024e-02} + }, + { + {2.23536789e-02, 7.41914958e-02, 3.54517400e-02}, + {7.99018070e-02, 5.49419262e-02, 5.14848121e-02}, + {9.50251892e-03, 7.86305517e-02, 3.72588076e-02}, + {4.78984788e-02, 3.37992460e-02, 1.96290389e-02}, + {7.48120397e-02, 4.75084223e-02, 2.37701897e-04}, + {5.11079468e-02, 2.70506144e-02, 4.36475389e-02}, + {7.33679906e-02, 2.95867678e-02, 7.52389953e-02} + }, + { + {5.16226478e-02, 1.31021289e-03, 7.69699737e-02}, + {5.72156087e-02, 8.25918168e-02, 1.42721254e-02}, + {1.13566946e-02, 7.72315189e-02, 5.77059686e-02}, + {5.46570681e-03, 6.25625551e-02, 6.24311455e-02}, + {7.64389113e-02, 5.89238741e-02, 1.02913165e-02}, + {1.64634397e-03, 2.17062421e-03, 2.34416011e-03}, + {2.03896053e-02, 7.12219477e-02, 4.46224995e-02} + }, + { + {4.57811356e-02, 6.97315410e-02, 1.02832299e-02}, + {2.31201854e-02, 4.85087894e-02, 8.02956372e-02}, + {4.64608893e-02, 1.54424773e-03, 6.63032085e-02}, + {1.92934200e-02, 6.68392256e-02, 3.21201086e-02}, + {7.15129450e-02, 6.18717745e-02, 4.60642166e-02}, + {1.13003375e-02, 4.96199494e-03, 1.00488793e-02}, + {3.68949817e-03, 8.90196767e-03, 1.86917856e-02} + }, + { + {5.90451285e-02, 4.63521369e-02, 1.03980501e-03}, + {5.96044352e-03, 8.01035613e-02, 4.70464006e-02}, + {1.68354288e-02, 2.08959840e-02, 6.15988411e-02}, + {1.61842033e-02, 4.81443815e-02, 8.03307742e-02}, + {7.01288804e-02, 1.98626388e-02, 4.08908091e-02}, + {5.13407178e-02, 6.86508343e-02, 1.29844472e-02}, + {1.53836084e-03, 5.79878036e-03, 4.02759537e-02} + }, + { + {5.02122790e-02, 4.71085906e-02, 2.62818988e-02}, + {8.18707868e-02, 4.80107442e-02, 3.14808302e-02}, + {4.56259623e-02, 6.17237724e-02, 5.54215349e-02}, + {2.19389219e-02, 5.49342157e-03, 3.06479763e-02}, + {5.21491282e-02, 1.74052510e-02, 6.23383410e-02}, + {5.51012019e-03, 2.15576105e-02, 6.66445568e-02}, + {1.60189737e-02, 5.29560074e-02, 4.34497967e-02} + }, + { + {7.65866041e-02, 2.18045339e-02, 5.46247046e-03}, + {6.08734004e-02, 6.39467835e-02, 7.51794279e-02}, + {7.71798939e-02, 1.15537888e-03, 1.94083489e-02}, + {5.10775894e-02, 7.85913840e-02, 7.86874294e-02}, + {4.60984148e-02, 7.58245885e-02, 5.31303585e-02}, + {3.22979130e-02, 4.02465984e-02, 5.00450842e-02}, + {4.55099978e-02, 7.67003447e-02, 7.60835484e-02} + }, + { + {3.27010415e-02, 7.97711685e-02, 1.44058811e-02}, + {1.04617933e-02, 1.11863809e-02, 4.18756641e-02}, + {1.78254500e-03, 7.85047561e-02, 6.84963465e-02}, + {1.24377478e-03, 1.45914331e-02, 2.74993554e-02}, + {1.08483098e-02, 6.70367777e-02, 2.85488572e-02}, + {7.78536126e-02, 4.81986478e-02, 7.27791712e-02}, + {6.99554384e-02, 7.49787241e-02, 3.80843058e-02} + }, + { + {4.52449061e-02, 6.61351755e-02, 2.36613862e-02}, + {4.05996218e-02, 4.96144369e-02, 1.28636532e-03}, + {4.91482876e-02, 3.59142683e-02, 6.68603703e-02}, + {2.61065327e-02, 7.39432648e-02, 4.78543900e-02}, + {1.52385337e-02, 6.52511939e-02, 5.06844558e-02}, + {4.46441676e-03, 3.47977169e-02, 5.62360846e-02}, + {7.60726482e-02, 3.32930977e-05, 8.08888674e-02} + }, + { + {3.11859436e-02, 8.06424469e-02, 5.00786714e-02}, + {6.86396435e-02, 4.75938842e-02, 5.20132035e-02}, + {2.36495789e-02, 4.85977381e-02, 6.21119440e-02}, + {7.10799918e-02, 6.25310168e-02, 5.78085780e-02}, + {7.15905875e-02, 2.67223511e-02, 5.55503815e-02}, + {3.73384580e-02, 3.16432752e-02, 3.40207368e-02}, + {3.32479365e-02, 2.62836833e-02, 5.15033379e-02} + }, + { + {3.56302932e-02, 8.06439817e-02, 5.61310798e-02}, + {1.64442733e-02, 3.53366137e-02, 2.84337122e-02}, + {6.60552830e-02, 7.28757605e-02, 7.48503357e-02}, + {5.48821613e-02, 2.23768987e-02, 2.08993759e-02}, + {7.07971081e-02, 4.37019095e-02, 6.64297864e-02}, + {4.74097952e-02, 6.07141182e-02, 4.29811813e-02}, + {6.38396144e-02, 4.71091345e-02, 3.85670736e-02} + }, + { + {2.83792764e-02, 5.64865675e-03, 3.12972330e-02}, + {6.59411587e-03, 8.13905448e-02, 1.50400000e-02}, + {6.72328845e-02, 7.24586621e-02, 5.70099279e-02}, + {4.71618399e-02, 1.33306114e-02, 3.86639796e-02}, + {2.85849143e-02, 1.86363515e-02, 4.90679964e-02}, + {2.58601662e-02, 7.58824944e-02, 7.53301233e-02}, + {2.12928709e-02, 9.18329880e-03, 1.59799233e-02} + }, + { + {4.13723253e-02, 6.03367463e-02, 1.72413141e-02}, + {2.05405317e-02, 7.05299526e-02, 3.44378985e-02}, + {5.10698669e-02, 1.93507168e-02, 8.44426826e-03}, + {4.27199379e-02, 3.95137258e-02, 1.26432776e-02}, + {5.14939614e-02, 4.50513922e-02, 5.41714206e-02}, + {1.19703254e-02, 6.22366704e-02, 1.83886718e-02}, + {4.30093557e-02, 6.50331303e-02, 1.84926135e-03} + }, + { + {2.68615987e-02, 7.22897798e-02, 6.99533820e-02}, + {4.45901640e-02, 7.17668831e-02, 7.86567777e-02}, + {6.84376806e-02, 7.07323104e-02, 8.17728881e-03}, + {5.39368056e-02, 5.82607202e-02, 5.05361930e-02}, + {6.62189573e-02, 2.86296452e-03, 6.37861863e-02}, + {6.05970249e-02, 2.15065386e-02, 2.12888140e-02}, + {5.23632653e-02, 2.85952985e-02, 6.59683123e-02} + }, + { + {3.69469412e-02, 6.48222342e-02, 8.20244551e-02}, + {2.48646215e-02, 1.18428171e-02, 7.46405274e-02}, + {4.48484421e-02, 8.07216838e-02, 5.27194552e-02}, + {8.23094398e-02, 4.52220477e-02, 4.35951874e-02}, + {1.12152621e-02, 2.94571985e-02, 2.17125192e-03}, + {1.32828895e-02, 6.17488436e-02, 2.51750532e-03}, + {3.03547252e-02, 7.14139268e-02, 5.73630854e-02} + }, + { + {5.72193563e-02, 1.56216780e-02, 3.65956500e-02}, + {4.81624752e-02, 8.19648281e-02, 1.68861933e-02}, + {2.05156356e-02, 2.17114780e-02, 6.21244237e-02}, + {3.78437378e-02, 4.71452763e-03, 4.21120226e-02}, + {1.75531674e-02, 6.61352351e-02, 2.46230606e-02}, + {2.28615105e-03, 4.91442308e-02, 6.98814020e-02}, + {3.15532871e-02, 6.20984100e-02, 4.23294269e-02} + }, + { + {4.47981246e-02, 7.94541389e-02, 6.65788352e-02}, + {2.67678709e-03, 5.87468557e-02, 3.85084115e-02}, + {7.84698650e-02, 1.83376241e-02, 2.21171752e-02}, + {6.74714567e-03, 3.54954340e-02, 9.02822800e-03}, + {5.24861142e-02, 6.64962158e-02, 5.77045009e-02}, + {6.34526685e-02, 2.83598304e-02, 7.00479448e-02}, + {3.55078541e-02, 6.82391599e-02, 5.18823527e-02} + } + }; + + @Test + public void testL2Normalize() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand input = tf.constant(array); + Operand result = fops.math.l2Normalize(tf.constant(array), new int[]{ 0,1,2}); + session.evaluate(tf.constant(expectedArray), result); + } + } + + @Test + public void testConfusionMatrix() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + long[] labels = new long[] {2, 0, 2, 2, 0, 1}; + long[] predictions = new long[] {0, 0, 2, 2, 0, 2}; + Operand result = + fops.math.confusionMatrix(tf.constant(labels), tf.constant(predictions)); + long[][] expected = + new long[][] { + {2, 0, 0}, + {0, 0, 1}, + {1, 0, 2} + }; + session.evaluate(tf.constant(expected), result); + } + } +} From e0a4a26d3a90100311c45a686ef19e0f1ebdbf19 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 26 Mar 2021 17:54:15 -0400 Subject: [PATCH 07/60] Moved high level tf.nn ops to framework. Moved tf.raw.nn Ops to tf.nn. Changed generation to generate SoftmaxCrossEntropyWithLogits and SparseSoftmaxCrossEntropyWithLogits to core NNOps (tf.nn). --- ...pi_def_SoftmaxCrossEntropyWithLogits.pbtxt | 2 +- ..._SparseSoftmaxCrossEntropyWithLogits.pbtxt | 2 +- .../annotations/org/tensorflow/op/NnOps.java | 175 +++--------------- .../org/tensorflow/op/NnRawOps.java | 83 --------- .../SoftmaxCrossEntropyWithLogits.java | 59 +++--- .../SparseSoftmaxCrossEntropyWithLogits.java | 64 +++---- .../op/nn/SigmoidCrossEntropyWithLogits.java | 14 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 44 +++-- .../SparseSoftmaxCrossEntropyWithLogits.java | 47 +++-- 9 files changed, 150 insertions(+), 340 deletions(-) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/{raw => }/SoftmaxCrossEntropyWithLogits.java (82%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/{raw => }/SparseSoftmaxCrossEntropyWithLogits.java (79%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SigmoidCrossEntropyWithLogits.java (91%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SoftmaxCrossEntropyWithLogits.java (87%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SparseSoftmaxCrossEntropyWithLogits.java (83%) diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt index 5dba2164cd6..e064562c0f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "SoftmaxCrossEntropyWithLogits" endpoint { - name: "nn.raw.SoftmaxCrossEntropyWithLogits" + name: "nn.SoftmaxCrossEntropyWithLogits" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt index cf80ff77565..7627d5f6074 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "SparseSoftmaxCrossEntropyWithLogits" endpoint { - name: "nn.raw.SparseSoftmaxCrossEntropyWithLogits" + name: "nn.SparseSoftmaxCrossEntropyWithLogits" } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 8958b4fe2ff..1cf8b910297 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -83,7 +83,6 @@ import org.tensorflow.op.nn.Relu; import org.tensorflow.op.nn.Relu6; import org.tensorflow.op.nn.Selu; -import org.tensorflow.op.nn.SigmoidCrossEntropyWithLogits; import org.tensorflow.op.nn.Softmax; import org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits; import org.tensorflow.op.nn.Softsign; @@ -103,8 +102,6 @@ * @see {@link Ops} */ public final class NnOps { - public final NnRawOps raw; - private final Scope scope; private final Ops ops; @@ -112,7 +109,6 @@ public final class NnOps { NnOps(Ops ops) { this.scope = ops.scope(); this.ops = ops; - raw = new NnRawOps(ops); } /** @@ -1797,56 +1793,6 @@ public Selu selu(Operand features) { return Selu.create(scope, features); } - /** - * Computes sigmoid cross entropy given logits. - * - *

Measures the probability error in discrete classification tasks in which each class is - * independent and not mutually exclusive. For instance, one could perform multilabel - * classification where a picture can contain both an elephant and a dog at the same time. - * - *

For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is - * - *

-   *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
-   *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
-   *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
-   *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
-   *   = (1 - z) * x + log(1 + exp(-x))
-   *   = x - x * z + log(1 + exp(-x))
-   *  
- * - *

For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - *

-   *  x - x * z + log(1 + exp(-x))
-   *   = log(exp(x)) - x * z + log(1 + exp(-x))
-   *   = - x * z + log(1 + exp(x))
-   *  
- * - *

Hence, to ensure stability and avoid overflow, the implementation uses this equivalent - * formulation - * - *

-   *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
-   *  
- * - *

logits and labels must have the same type and shape. - * - *

- * - * @param scope The TensorFlow scope - * @param labels the labels - * @param logits the logits of type float32 or float64 - * @param the type of labels and logits - * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape - */ - public Operand sigmoidCrossEntropyWithLogits(Operand labels, - Operand logits) { - return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); - } - /** * Computes softmax activations. * For each batch {@code i} and class {@code j} we have @@ -1864,54 +1810,20 @@ public Softmax softmax(Operand logits) { } /** - * Computes softmax cross entropy between logits and labels. - * - *

Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

NOTE: - * - *

While the classes are mutually exclusive, their probabilities need not be. All that is - * required is that each row of labels is a valid probability distribution. If they - * are not, the computation of the gradient will be incorrect. - * - *

If using exclusive labels (wherein one and only one class is true at a time), - * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} - * - *

Usage: - * - *

-   *    Operand<TFloat32> logits =
-   *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
-   *    Operand<TFloat32> labels =
-   *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
-   *    Operand<TFloat32> output =
-   *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
-   *    // output Shape = [2]
-   *    // dataType = FLOAT (1)
-   *    // values { 0.169846, 0.824745 }
-   *  
- * - *

Backpropagation will happen into both logits and labels. To - * disallow backpropagation into labels, pass label tensors through - * tf.stopGradient before feeding it to this function. + * Computes softmax cross entropy cost and gradients to backpropagate. + *

+ * Inputs are the logits, not probabilities. * - * @param scope current scope - * @param labels Each vector along the class dimension should hold a valid probability - * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] - * , each row of labels[i] must be a valid probability distribution. - * @param logits Per-label activations, typically a linear output. These activation energies are - * interpreted as unnormalized log probabilities. - * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands - * @return the softmax cross entropy loss. Its type is the same as logits and its - * shape is the same as labels except that it does not have the last dimension of - * labels. + * @param data type for {@code loss()} output + * @param features batch_size x num_classes matrix + * @param labels batch_size x num_classes matrix + * The caller must ensure that each batch of labels represents a valid + * probability distribution. + * @return a new instance of SoftmaxCrossEntropyWithLogits */ - public Operand softmaxCrossEntropyWithLogits( - Operand labels, Operand logits, int axis) { - return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( + Operand features, Operand labels) { + return SoftmaxCrossEntropyWithLogits.create(scope, features, labels); } /** @@ -2098,51 +2010,24 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo } /** - * Computes sparse softmax cross entropy between logits and labels. - * - *

Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

NOTE: - * - *

For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link - * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. - * - *

WARNING: - * - *

This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, - * as it will produce incorrect results. - * - *

A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. - * - * @param scope current scope - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log - * probabilities. - * @return A Tensor of the same shape as labels and of the same type as - * logits with the softmax cross entropy loss. - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank - * of the labels is not equal to the rank of the logits minus one. - */ - public Operand sparseSoftmaxCrossEntropyWithLogits( - Operand labels, Operand logits) { - return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); + * Computes softmax cross entropy cost and gradients to backpropagate. + *

+ * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * a matrix of label probabilities, but rather a single label per row + * of features. This label is considered to have probability 1.0 for the + * given row. + *

+ * Inputs are the logits, not probabilities. + * + * @param data type for {@code loss()} output + * @param features batch_size x num_classes matrix + * @param labels batch_size vector with values in [0, num_classes). + * This is the label for the given minibatch entry. + * @return a new instance of SparseSoftmaxCrossEntropyWithLogits + */ + public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( + Operand features, Operand labels) { + return SparseSoftmaxCrossEntropyWithLogits.create(scope, features, labels); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java deleted file mode 100644 index c287459c460..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2020 The TensorFlow Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ============================================================================== -// -// This class has been generated, DO NOT EDIT! -// -package org.tensorflow.op; - -import org.tensorflow.Operand; -import org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits; -import org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits; -import org.tensorflow.types.family.TNumber; - -/** - * An API for building {@code nn.raw} operations as {@link Op Op}s - * - * @see {@link Ops} - */ -public final class NnRawOps { - private final Scope scope; - - private final Ops ops; - - NnRawOps(Ops ops) { - this.scope = ops.scope(); - this.ops = ops; - } - - /** - * Computes softmax cross entropy cost and gradients to backpropagate. - * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output - * @param features batch_size x num_classes matrix - * @param labels batch_size x num_classes matrix - * The caller must ensure that each batch of labels represents a valid - * probability distribution. - * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands - * @return a new instance of SoftmaxCrossEntropyWithLogits - */ - public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( - Operand features, Operand labels) { - return SoftmaxCrossEntropyWithLogits.create(scope, features, labels); - } - - /** - * Computes softmax cross entropy cost and gradients to backpropagate. - * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept - * a matrix of label probabilities, but rather a single label per row - * of features. This label is considered to have probability 1.0 for the - * given row. - *

Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output - * @param features batch_size x num_classes matrix - * @param labels batch_size vector with values in [0, num_classes). - * This is the label for the given minibatch entry. - * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands - * @return a new instance of SparseSoftmaxCrossEntropyWithLogits - */ - public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( - Operand features, Operand labels) { - return SparseSoftmaxCrossEntropyWithLogits.create(scope, features, labels); - } - - /** - * Get the parent {@link Ops} object. - */ - public final Ops ops() { - return ops; - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java similarity index 82% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 331933979c7..5d3ab3c1100 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.nn.raw; +package org.tensorflow.op.nn; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -29,68 +29,57 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. + *

* Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output + * + * @param data type for {@code loss()} output */ -@Operator( - group = "nn.raw" -) +@Operator(group = "nn") public final class SoftmaxCrossEntropyWithLogits extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; - - private Output loss; - - private Output backprop; - - private SoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx++); - } - + /** * Factory method to create a class wrapping a new SoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. - * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ - @Endpoint( - describeByClass = true - ) - public static SoftmaxCrossEntropyWithLogits create(Scope scope, - Operand features, Operand labels) { + @Endpoint(describeByClass = true) + public static SoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { OperationBuilder opBuilder = scope.env().opBuilder("SoftmaxCrossEntropyWithLogits", scope.makeOpName("SoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** - * Gets loss. * Per example loss (batch_size vector). - * @return loss. */ public Output loss() { return loss; } - + /** - * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). - * @return backprop. */ public Output backprop() { return backprop; } + + /** The name of this op, as known by TensorFlow core engine */ + public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; + + private Output loss; + private Output backprop; + + private SoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java similarity index 79% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 8c48cd0db4d..794beab4ded 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.nn.raw; +package org.tensorflow.op.nn; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -29,71 +29,61 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept + *

+ * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output + *

+ * Inputs are the logits, not probabilities. + * + * @param data type for {@code loss()} output */ -@Operator( - group = "nn.raw" -) +@Operator(group = "nn") public final class SparseSoftmaxCrossEntropyWithLogits extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; - - private Output loss; - - private Output backprop; - - private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx++); - } - + /** * Factory method to create a class wrapping a new SparseSoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. - * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ - @Endpoint( - describeByClass = true - ) - public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, - Operand features, Operand labels) { + @Endpoint(describeByClass = true) + public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { OperationBuilder opBuilder = scope.env().opBuilder("SparseSoftmaxCrossEntropyWithLogits", scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SparseSoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** - * Gets loss. * Per example loss (batch_size vector). - * @return loss. */ public Output loss() { return loss; } - + /** - * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). - * @return backprop. */ public Output backprop() { return backprop; } + + /** The name of this op, as known by TensorFlow core engine */ + public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; + + private Output loss; + private Output backprop; + + private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java similarity index 91% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index 92c413f7e52..b55385839d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -1,4 +1,4 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; @@ -8,11 +8,17 @@ import org.tensorflow.op.core.Select; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; -import org.tensorflow.op.math.*; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.Exp; +import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.Log1p; +import org.tensorflow.op.math.Mul; +import org.tensorflow.op.math.Neg; +import org.tensorflow.op.math.Sub; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -@Operator(group = "nn") +//@Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** @@ -60,7 +66,7 @@ public class SigmoidCrossEntropyWithLogits { * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape */ - @Endpoint(name = "sigmoidCrossEntropyWithLogits") + //@Endpoint(name = "sigmoidCrossEntropyWithLogits") public static Operand sigmoidCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { if (!isCompatible(labels.shape(), logits.shape())) { diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java similarity index 87% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index ddeacbea4d4..0f5b8197f1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -1,11 +1,15 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; -import org.tensorflow.op.core.*; +import org.tensorflow.op.core.Concat; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.Reshape; +import org.tensorflow.op.core.Slice; import org.tensorflow.op.dtypes.Cast; import org.tensorflow.op.linalg.Transpose; import org.tensorflow.op.math.Sub; @@ -14,12 +18,11 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; import java.util.Arrays; import java.util.List; -@Operator(group = "nn") +// @Operator(group = "nn") public class SoftmaxCrossEntropyWithLogits { /** @@ -68,6 +71,7 @@ public class SoftmaxCrossEntropyWithLogits { * shape is the same as labels except that it does not have the last dimension of * labels. */ + @SuppressWarnings("unchecked") @Endpoint(name = "softmaxCrossEntropyWithLogits") public static Operand softmaxCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits, int axis) { @@ -78,7 +82,9 @@ public static Operand softmaxCrossEntr } if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { - Operand result = softmaxCrossEntropyWithLogits(scope, + Operand result = + softmaxCrossEntropyWithLogits( + scope, Cast.create(scope, labels, TFloat32.class), Cast.create(scope, logits, TFloat32.class), axis); @@ -86,10 +92,8 @@ public static Operand softmaxCrossEntr } if (logits.asOutput().type() != labels.asOutput().type()) { - return softmaxCrossEntropyWithLogits(scope, - Cast.create(scope, labels, logits.asOutput().type()), - logits, - axis); + return softmaxCrossEntropyWithLogits( + scope, Cast.create(scope, labels, logits.asOutput().type()), logits, axis); } Operand inputRank = Cast.create(scope, Rank.create(scope, logits), TInt64.class); @@ -101,13 +105,20 @@ public static Operand softmaxCrossEntr labels = moveDimToEnd(scope, labels, axis, inputRank); } + Operand tLabels; + if (labels.type() != logits.type()) { + tLabels = Cast.create(scope, labels, logits.type()); + } else { + // Unchecked warning checked in if statement. + tLabels = (Operand) labels; + } + Shape inputShape = logits.shape(); logits = flattenOuterDims(scope, logits); - labels = flattenOuterDims(scope, labels); + tLabels = flattenOuterDims(scope, tLabels); - org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits.create( - scope, logits, (Operand)labels); + org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits.create(scope, logits, tLabels); /* cannot use generic on cost, because cost may be recast later. */ Operand cost = smax.loss(); Operand outputShape = @@ -119,6 +130,9 @@ public static Operand softmaxCrossEntr cost = Reshape.create(scope, cost, outputShape); if (scope.env().isGraph() && !shape.hasUnknownDimension()) { long[] array = shape.asArray(); + if (array == null) { + array = new long[0]; + } long[] newArray = new long[array.length - 1]; if (axis < 0) { axis = shape.numDimensions() + axis; @@ -153,7 +167,7 @@ private static Operand flattenOuterDims(Scope scope, Oper boolean productValid = true; for (int i = ndims - 2; i >= 0; i--) { long d = shape.size(i); - if (d == org.tensorflow.ndarray.Shape.UNKNOWN_SIZE) { + if (d == Shape.UNKNOWN_SIZE) { productValid = false; break; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java similarity index 83% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 54b32bb5c63..64faa7c5d70 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -1,11 +1,10 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; import org.tensorflow.op.core.AssertThat; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.Reshape; @@ -22,7 +21,7 @@ import java.util.Collections; import java.util.List; -@Operator(group = "nn") +// @Operator(group = "nn") public class SparseSoftmaxCrossEntropyWithLogits { /** @@ -63,19 +62,24 @@ public class SparseSoftmaxCrossEntropyWithLogits { * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, * or TFloat64. These activation energies are interpreted as unnormalized log * probabilities. + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return A Tensor of the same shape as labels and of the same type as * logits with the softmax cross entropy loss. * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank * of the labels is not equal to the rank of the logits minus one. */ + @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") - public static Operand sparseSoftmaxCrossEntropyWithLogits( - Scope scope, Operand labels, Operand logits) { + public static + Operand sparseSoftmaxCrossEntropyWithLogits( + Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); - /** cannot use generics on preciseLogits as it may be recast later */ - Operand preciseLogits = logits; + Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { preciseLogits = Cast.create(scope, logits, TFloat32.class); + } else { + preciseLogits = logits; } Shape labelsStaticShape = labels.shape(); org.tensorflow.op.core.Shape labelsShape = @@ -108,14 +112,16 @@ public static Operand sparseSoftmaxCrossE } // Check if no reshapes are required. if (logitsShape.numDimensions() == 2) { - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits.create( + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( scope, preciseLogits, labels); - Operand loss = smax.loss(); - if (logits.asOutput().type() == TFloat16.class) { - loss = Cast.create(scope, loss, TFloat16.class); + Operand cost = smax.loss(); + if (cost.type() != logits.type()) { + return Cast.create(scope, cost, logits.type()); + } else { + // Unchecked cast already checked with previous if + return (Operand) cost; } - return loss; } List shapeChecks = new ArrayList<>(); @@ -145,14 +151,17 @@ public static Operand sparseSoftmaxCrossE preciseLogits = Reshape.create(scope, preciseLogits, Constant.arrayOf(scope, -1L, numClassses)); labels = Reshape.create(scope, labels, Constant.scalarOf(scope, -1)); scope.withControlDependencies(shapeChecks); - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits.create( + // call raw op + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( scope, preciseLogits, labels); - Operand cost = smax.loss(); + Operand cost = smax.loss(); cost = Reshape.create(scope, cost, labelsShape); - if (logits.asOutput().type() == TFloat16.class) { - cost = Cast.create(scope, cost, TFloat16.class); + if (cost.type() != logits.type()) { + return Cast.create(scope, cost, logits.type()); + } else { + // Unchecked cast already checked with previous if + return (Operand) cost; } - return cost; } } From 28db4df34beab9f73557145b2858aac8feb36fc0 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 26 Mar 2021 18:02:55 -0400 Subject: [PATCH 08/60] Added FrameworkOps analogous to Ops. Added NnOps and SetOps as groups. Fixed MetricsHelper and Losses to use the bew FrameworkOps. Moved SetsOps to framework.op. --- .../tensorflow/framework/losses/Losses.java | 17 +- .../framework/metrics/impl/MetricsHelper.java | 685 ++---------------- .../framework/metrics/impl/SetsOps.java | 147 ---- .../tensorflow/framework/op/FrameworkOps.java | 136 ++++ .../org/tensorflow/framework/op/NnOps.java | 197 +++++ .../org/tensorflow/framework/op/SetsOps.java | 161 ++++ .../SparseSoftmaxCrossEntropyWithLogits.java | 3 +- .../{SetsOpsTest.java => SetOpsTest.java} | 18 +- 8 files changed, 559 insertions(+), 805 deletions(-) delete mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java rename tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/{SetsOpsTest.java => SetOpsTest.java} (86%) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 9aa94cf7fcf..aa5fa4ada6d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -19,6 +19,7 @@ import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.op.core.ReduceAll; import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceSum; @@ -181,7 +182,8 @@ public static Operand binaryCrossentropy( */ private static Operand binaryCrossentropyHelper( Ops tf, Operand target, Operand output, boolean fromLogits) { - if (fromLogits) return tf.nn.sigmoidCrossEntropyWithLogits(target, output); + FrameworkOps fop = FrameworkOps.create(tf); + if (fromLogits) { return fop.nn.sigmoidCrossEntropyWithLogits(target, output);} /* TODO - skip this logic for now. It requires walking back the inputs which is not yet possible if (!(output instanceof Variable) && (!tf.scope().env().isEager())) { @@ -191,7 +193,7 @@ private static Operand binaryCrossentropyHelper( // TODO if (output.op().numInputess() != 1) // TODO throw new IllegalArgumentException("output can only have 1 output"); // TODO output = output.op().inout(0); - // TODO return tf.nn.sigmoidCrossEntropyWithLogits(target, output); + // TODO return fop.nn.sigmoidCrossEntropyWithLogits(target, output); // TODO} } */ @@ -235,6 +237,7 @@ public static Operand categoricalCrossentropy( boolean fromLogits, float labelSmoothing, int axis) { + FrameworkOps fop = FrameworkOps.create(tf); Class predictionType = predictions.type(); Operand tLabels = cast(tf, labels, predictionType); LossTuple ops = LossesHelper.squeezeOrExpandDimensions(tf, tLabels, predictions, null); @@ -245,7 +248,7 @@ public static Operand categoricalCrossentropy( tLabels = smoothCategoricalLabels(tf, tLabels, labelSmoothing); } if (fromLogits) { - return tf.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, axis); + return fop.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, axis); } /* TODO if (!(predictions instanceof Variable) && (!tf.scope().env().isEager())) { @@ -255,7 +258,7 @@ public static Operand categoricalCrossentropy( if (predictions.op().numOutputs() != 1) throw new IllegalArgumentException("output can only have 1 output"); predictions = predictions.op().output(0); - return tf.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, -1); + return fop.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, -1); } } */ @@ -516,6 +519,7 @@ public static Operand sparseCategoricalCrossentropy( boolean fromLogits, int axis) { Class predictionType = predictions.type(); + FrameworkOps fop = FrameworkOps.create(tf); Operand epsilonConst = cast(tf, tf.constant(EPSILON), predictionType); Operand one = cast(tf, tf.constant(1), predictionType); Operand oneMinusEpsilonConst = tf.math.sub(one, epsilonConst); @@ -568,9 +572,8 @@ public static Operand sparseCategoricalCrossentropy( tf.constant( new long[] {-1L, predictionsShape.size(predictionsShape.numDimensions() - 1)})); } - - @SuppressWarnings("unchecked") - Operand loss = tf.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); + + Operand loss = fop.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); if (updateShape && predictionsRank >= 3) { Shape newShape = predictionsShape.take(predictionsShape.numDimensions() - 1); loss = tf.reshape(loss, tf.constant(newShape)); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index 40336233d21..a82e1760d1f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -15,36 +15,21 @@ package org.tensorflow.framework.metrics.impl; import org.tensorflow.Operand; -import org.tensorflow.framework.losses.impl.LossTuple; -import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.framework.metrics.exceptions.NotBroadcastableException; -import org.tensorflow.framework.utils.SparseTensor; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.core.Assign; -import org.tensorflow.op.core.OneHot; -import org.tensorflow.op.core.Rank; -import org.tensorflow.op.core.Squeeze; -import org.tensorflow.op.core.Stack; -import org.tensorflow.op.core.Variable; import org.tensorflow.op.math.Mean; -import org.tensorflow.op.nn.TopK; import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; -import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TIntegral; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; import static org.tensorflow.framework.utils.CastHelper.cast; @@ -59,8 +44,8 @@ public class MetricsHelper { "weights can not be broadcast to values."; /** - * Asserts that the {@code sampleWeights} can be broadcast to the same shape as {@code values - * } + * Asserts that the sampleWeights can be broadcast to the same shape as values + * * *

In losses and metrics, limited weight broadcasting is supported. Weights must be either * scalar, or the same rank as the target values, with each dimension either 1, or the same as the @@ -69,11 +54,11 @@ public class MetricsHelper { * @param tf the TensorFlow Ops * @param sampleWeights the sample weights. * @param values the values to which weights are applied. - * @return {@code Operation} with control dependencies to ensure {@code sampleWeight} - * can be broadcast to {@code values} + * @return Operation with control dependencies to ensure sampleWeight + * can be broadcast to values * @param the type of Operand - * @throws NotBroadcastableException If static checks determine {@code sampleWeights} has an - * incorrect shape that prohibit broadcasting to {@code values} + * @throws NotBroadcastableException If static checks determine sampleWeights has an + * incorrect shape that prohibit broadcasting to values */ @SuppressWarnings("unchecked") public static Op assertBroadcastable( @@ -94,7 +79,7 @@ public static Op assertBroadcastable( && !valuesShapeStatic.hasUnknownDimension()) { if (weightsRankStatic == 0) { return tf.withSubScope("staticScalarCheckSuccess") - .withControlDependencies(java.util.Collections.EMPTY_LIST) + .withControlDependencies(Collections.EMPTY_LIST) .noOp(); } if (weightsRankStatic != valuesRankStatic) { @@ -104,8 +89,8 @@ public static Op assertBroadcastable( ASSERT_BROADCAST_ERROR_PREFIX, valuesRankStatic, weightsRankStatic, - valuesShapeStatic, - weightsShapeStatic)); + valuesShapeStatic.toString(), + weightsShapeStatic.toString())); } for (int i = 0; i < valuesRankStatic; i++) { @@ -116,8 +101,8 @@ public static Op assertBroadcastable( "%s Mismatch at dim %d. values.shape=%s weights.shape=%s.", ASSERT_BROADCAST_ERROR_PREFIX, i, - valuesShapeStatic, - weightsShapeStatic)); + valuesShapeStatic.toString(), + weightsShapeStatic.toString())); } } return tf.withSubScope("staticDimsCheckSuccess") @@ -190,24 +175,25 @@ private static Operand canBroadcastNonscalarShapes( private static Operand canBroadcastDims( Ops tf, Operand weightsShape, Operand valuesShape) { tf = tf.withSubScope("canBroadcastDims"); + FrameworkOps fops = FrameworkOps.create(tf); Operand valuesShape2d = tf.expandDims(valuesShape, tf.constant(-1)); Operand validDims = tf.concat(Arrays.asList(valuesShape2d, tf.onesLike(valuesShape2d)), tf.constant(1)); Operand weightsShape2D = tf.expandDims(weightsShape, tf.constant(-1)); - Operand diffResult = SetsOps.difference(tf, weightsShape2D, validDims); + Operand diffResult = fops.sets.difference(weightsShape2D, validDims); Operand numInvalidDims = tf.size(diffResult); return tf.math.equal(tf.constant(0), numInvalidDims); } /** - * Broadcast {@code weights} to the same shape as {@code values}. + * Broadcast weights to the same shape as values. * * @param tf the TensorFlow ops - * @param weights Operand whose shape is broadcastable to {@code values}. + * @param weights Operand whose shape is broadcastable to values. * @param values Operand of any shape * @param the type of Operands - * @return {@code weights} broadcast to {@code values} shape + * @return weights broadcast to values shape */ public static Operand broadcastWeights( Ops tf, Operand weights, Operand values) { @@ -228,473 +214,11 @@ public static Operand broadcastWeights( return ctf.math.mul(weights, tf.onesLike(values)); } - /** - * Checks that all the Symbolic Shapes are consistent. - * - * @param tf the TensorFlow Ops - * @param symbols the list of Symbolic Shapes - * @param message the error message if the shapes are not consistent. - * @return a list of Operands to check the consistency of the symbolic shapes ready to add to a - * control dependency. - */ - public static List assertShapes( - Ops tf, List> symbols, String message) { - List updateOperations = new ArrayList<>(); - // check that the symbolic shape rank matches the operands rank. - symbols.forEach( - symbol -> { - Operand operand = symbol.getOperand(); - int rank = symbol.rank(); - Rank tfRank = tf.rank(operand); - Op assertion = - tf.withSubScope("assertShapes-1") - .assertThat( - tf.math.equal(tfRank, tf.constant(rank)), - Collections.singletonList(tf.constant(message))); - updateOperations.add(assertion); - }); - - Map> dict = new HashMap<>(); - - // check that each operand's dimension size equals the corresponding symbolic shape's dimensions - // size - symbols.forEach( - symbol -> { - AtomicLong ll = new AtomicLong(); - symbol - .getSymbols() - .forEach( - s -> { - Operand size = dict.get(s); - if (size == null) { - // save size for later checks - size = - tf.shape.size(symbol.getOperand(), tf.constant(ll.get()), TInt64.class); - dict.put(s, size); - } - Op assertion = - tf.withSubScope("assertShapes-2") - .assertThat( - tf.math.equal( - tf.shape.size( - symbol.getOperand(), - tf.constant(ll.getAndIncrement()), - TInt64.class), - size), - Collections.singletonList(tf.constant(message))); - updateOperations.add(assertion); - }); - }); - - return updateOperations; - } + // aliases for mean /** - * Returns an op to update the given confusion matrix variables. - * - *

For every pair of values in {@code labels} and {@code predictions}: - * - *

-   * TRUE_POSITIVES:  {@code labels} == true and {@code predictions} > thresholds
-   * FALSE_POSITIVES: {@code labels} == true and {@code predictions} <= thresholds
-   * TRUE_NEGATIVES:  {@code labels} == false and {@code predictions} <= thresholds
-   * FALSE_NEGATIVE:  {@code labels} == false and {@code predictions} > thresholds
-   * 
- * - *

The results will be weighted and added together. When multiple thresholds are provided, we - * will repeat the same for every threshold. - * - *

For estimation of these metrics over a stream of data, the function creates an `update_op` - * operation that updates the given variables. - * - *

{@code labels}, {@code predictions}, and {@code sampleWeight} tensors are - * aligned by {@link LossesHelper#removeSqueezableDimensions(Ops, Operand, Operand)}. {@code - * sampleWeight} is then broadcast to the shape of {@code predictions}. - * - * @param tf the TensorFlow Ops - * @param variablesToUpdate map with {@link ConfusionMatrixEnum} values as valid keys and - * corresponding variables to update as values. If {@code multiLabel}, then the variable - * shapes are (T, D), where T is the number of thresholds and D is the number of classes - * (after slicing by {@code classIndex}, if provided). If {@code multiLabels}, then - * the variable shapes are (T). - * @param varInitializers map with {@link ConfusionMatrixEnum} values as valid keys and - * corresponding initializer Operands to for {@code variablesToUpdate}. - * @param labels the labels. Will be cast to {@link TBool}. Shape (N, Cx, L1?), where N is the - * number of examples, Cx is zero or more class dimensions, and L1 is a potential extra - * dimension of size 1 that would be squeezed. - * @param predictions the predictions shape (N, Cx, P1?) - * @param thresholds thresholds in the range {@code [0, 1]}, or {@link #NEG_INF} is used when - * topK is set - * @param topK optional, indicates that only the top k predictions should be considered. Applied - * before possibly slicing by {@code classIndex}. - * @param classIndex optional, limits the prediction and labels to the specified class. This is an - * integer index into the first dimension of Cx. - * @param sampleWeight optional {@code Tensor} that is aligned with labels and predictions as - * explained above. Use weights of 0 to mask values. - * @param multiLabel indicates whether multidimensional prediction/labels should be treated as - * multilabel responses, or flattened into a single label. When true, the values of {@code - * variablesToUpdate} must have a second dimension equal to the number of labels and - * predictions per example, and those tensors must not be RaggedTensors. - * @param labelWeights tensor of non-negative weights for multilabel data. The weights are applied - * when calculating TRUE_POSITIVES, FALSE_POSITIVES, TRUE_NEGATIVES, and FALSE_NEGATIVES - * without explicit multilabel handling (i.e. when the data is to be flattened). Must have - * shape (Dx), which is the same as (Cx) referenced above, except that if {@code classIndex - * } is provided, then the final dimension of Dx is 1. These weights will be broadcast - * across the 0th dimension (the examples dimension) of {@code predictions}. May be null. - * Must be null if {@code multiLabel}. - * @param the data type for the variables - * @throws IllegalArgumentException If {@code predictions} and {@code labels} have - * mismatched shapes, or if {@code sampleWeight} is not null and its shape - * doesn't match {@code predictions}, or if {@code multiLabel && labelWeights != null}.. - * @return an op to update the given confusion matrix variables. - */ - @SuppressWarnings({"unchecked", "rawtypes"}) - public static List updateConfusionMatrixVariables( - Ops tf, - Map> variablesToUpdate, - Map> varInitializers, - Operand labels, - Operand predictions, - Operand thresholds, - Integer topK, - Integer classIndex, - Operand sampleWeight, - boolean multiLabel, - Operand labelWeights) { - if (multiLabel && labelWeights != null) - throw new IllegalArgumentException( - "labelWeights for multilabel data should be handled outside of updateConfusionMatrixVariables when multiLabel is true."); - - if (variablesToUpdate == null || variablesToUpdate.isEmpty()) { - return Collections.EMPTY_LIST; - } - - Operand tLabels = labels; - Operand tPredictions = predictions; - Operand tSampleWeight = sampleWeight; - - // We will tile data for threshold comparisons. We want a cross product of thresholds and - // predictions/labels: - // In the multilabel case, we want a data shape of (T, N, D). - // else (T, ND). - // where - // T is numThresholds (the size of the 0th dimension of thresholds) - // N is the number of examples (the 0th dimension of labels and predictions) - // Dx == Cx except that if classIndex != null, - // then the last dimension of Dx is size 1 - // D is the product of all Dx - // ND is N * D - - // size of the 0th dimension of thresholds - // reshape to scalar for operations later. - Operand numThresholds = - tf.reshape(tf.shape.size(thresholds, tf.constant(0)), tf.constant(Shape.scalar())); - - // if multilabel, then (rank(thresholds) == 1) - // else true - Operand oneThresh; - if (multiLabel) { - oneThresh = tf.math.equal(tf.constant(1), tf.rank(thresholds)); - } else { - // TODO handle Ragged Tensors???? - // [y_pred, - // y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true], - // sampleWeights) - oneThresh = tf.constant(true); - } - - List controlOps = new ArrayList<>(); - Operand axes = allAxes(tf, tPredictions); - controlOps.add( - tf.withSubScope("updateConfusionMatrixVariables-1") - .assertThat( - tf.reduceAll( - tf.math.greaterEqual( - tPredictions, cast(tf, tf.constant(0), tPredictions.type())), - axes), - Collections.singletonList(tf.constant("predictions must be >= 0")))); - controlOps.add( - tf.withSubScope("updateConfusionMatrixVariables-2") - .assertThat( - tf.reduceAll( - tf.math.lessEqual(tPredictions, cast(tf, tf.constant(1), tPredictions.type())), - axes), - Collections.singletonList(tf.constant("predictions must be <= 1")))); - - LossTuple result = - LossesHelper.squeezeOrExpandDimensions(tf, tLabels, tPredictions, tSampleWeight); - tPredictions = result.getTarget(); // shape (N, Cx) - tLabels = result.getLabels(); // shape (N, Cx) - tSampleWeight = result.getSampleWeights(); // broadcastable to (N, Dx) - - if (!tPredictions.shape().isCompatibleWith(tLabels.shape())) - throw new IllegalArgumentException( - String.format( - "Shapes %s and %s are incompatible)", - tPredictions.shape().toString(), tLabels.shape().toString())); - - if (topK != null) { - tPredictions = filterTopK(tf, tPredictions, topK); - } - - if (classIndex != null) { - // Slice to new shapes (N, Dx) - tLabels = tf.squeeze(tf.gather(tLabels, - tf.constant(new int[] {classIndex}), tf.constant(-1)), - Squeeze.axis(Collections.singletonList(1L))); - tPredictions = tf.squeeze(tf.gather(tPredictions, - tf.constant(new int[] {classIndex}), tf.constant(-1)), - Squeeze.axis(Collections.singletonList(1L))); - } - org.tensorflow.op.core.Shape predShape = tf.shape(tPredictions); - - Operand numExamples = - tf.reshape(tf.shape.size(tPredictions, tf.constant(0)), tf.constant(Shape.scalar())); - - // number of labels (and predictions) per example (after possibly slicing by classIndex) - // In the notation we are using for comments, this is D. - Operand numLabels = - tf.select( - tf.math.equal(tf.shape.numDimensions(predShape), tf.constant(1)), - tf.constant(1), - tf.reduceProd( - // take all but the first dimension - tf.shape.takeLast( - predShape, tf.math.sub(tf.shape.numDimensions(predShape), tf.constant(1))), - tf.constant(0))); - - // threshLabelTile == numLabels except in one case: - // if multilabel and rank(thresholds) != 1, then threshLabelTile is 1 - Operand threshLabelTile = tf.select(oneThresh, numLabels, tf.constant(1)); - - // if multilabel, then shape (1, N, Dx) - // else shape (1, ND), - Operand predictionsExtraDim; - Operand labelsExtraDim; - - if (multiLabel) { - predictionsExtraDim = tf.expandDims(tPredictions, tf.constant(0)); - labelsExtraDim = tf.expandDims(cast(tf, tLabels, TBool.class), tf.constant(0)); - } else { - predictionsExtraDim = tf.reshape(tPredictions, tf.constant(Shape.of(1, -1))); - labelsExtraDim = tf.reshape(cast(tf, tLabels, TBool.class), tf.constant(Shape.of(1, -1))); - } - - // the shape of each thresholds tile - // if multilabel, then [T, 1, -1] - // else [T, -1] - List> threshPretileShape; - - // the tiling multiples for thresholds - // We want to repeat the thresholds for each data position. - // if multilabel, then [1, N, threshLabelTile]. (threshLabelTile is typically numLabels) - // else [1, ND] - List> threshTiles; - - // tiling multiples for predictionsExtraDim and labelsExtraDim - // We want to repeat the predictions and labels for each threshold. - // If multilabel, then [T, 1, 1] - // else [T, 1] - List> dataTiles; - - if (multiLabel) { - threshPretileShape = Arrays.asList(numThresholds, tf.constant(1), tf.constant(-1)); - threshTiles = Arrays.asList(tf.constant(1), numExamples, threshLabelTile); - dataTiles = Arrays.asList(numThresholds, tf.constant(1), tf.constant(1)); - } else { - threshPretileShape = - Arrays.asList(tf.reshape(numThresholds, tf.constant(Shape.scalar())), tf.constant(-1)); - Operand mul = tf.math.mul(numExamples, numLabels); - threshTiles = Arrays.asList(tf.constant(1), mul); - dataTiles = Arrays.asList(numThresholds, tf.constant(1)); - } - - // if multilabel, then shape (T, 1, T*) - // else shape (T, T*) - // where T* is the product of all threshold dimension sizes beyond 0 - Operand thresholdsReshaped = - tf.reshape(cast(tf, thresholds, predictions.type()), tf.stack(threshPretileShape)); - - Operand threshTilesShape = tf.stack(threshTiles); - - // if multilabel, then - // if thresholds has rank > 1, then shape (T, N, T*) - // else shape (T, N, D) - // else shape (T, ND) - Operand threshTiled = tf.tile(thresholdsReshaped, threshTilesShape); - - Operand dataTilesShape = tf.stack(dataTiles); - - // if multilabel, then shape (T, N, D) - // else (T, ND) - Operand predsTiled = tf.tile(predictionsExtraDim, dataTilesShape); - - // Compare predictions and threshold. - Operand predIsPos = tf.math.greater(predsTiled, threshTiled); - // Tile labels by number of thresholds - Operand labelIsPos = tf.tile(labelsExtraDim, tf.stack(dataTiles)); - Operand weightsTiled; - if (tSampleWeight != null) { - tSampleWeight = tf.broadcastTo(tSampleWeight, tf.shape(tPredictions)); - // if multilabel, then - // reshape tSampleWeight to (1, N, threshLabelTile) - // tile the result into shape (T, N, threshLabelTile) - // where threshLabelTile is typically D - // else - // reshape tSampleWeight to (1, ND) - // tile the result into shape (T, ND) - weightsTiled = tf.tile(tf.reshape(tSampleWeight, threshTilesShape), dataTilesShape); - } else { - weightsTiled = null; - } - - if (labelWeights != null) { - // Change shape to (1, Dx). - Operand lLabelWeights = tf.expandDims(tf.identity(labelWeights), tf.constant(0)); - - // Broadcast to shape (N, Dx). - lLabelWeights = tf.broadcastTo(lLabelWeights, tPredictions); - - // If multilabel: shape (T, N, D) - // else: shape (T, ND) - Operand labelWeightsTiled = - tf.tile(tf.reshape(lLabelWeights, tf.stack(threshTiles)), tf.stack(dataTiles)); - - if (weightsTiled == null) { - weightsTiled = labelWeightsTiled; - } else { - weightsTiled = tf.math.mul(weightsTiled, labelWeightsTiled); - } - } - - Map loopVars = new HashMap<>(); - loopVars.put(ConfusionMatrixEnum.TRUE_POSITIVES, new Operand[] {labelIsPos, predIsPos}); - Variable updateTN = variablesToUpdate.get(ConfusionMatrixEnum.TRUE_NEGATIVES); - Variable updateFP = variablesToUpdate.get(ConfusionMatrixEnum.FALSE_POSITIVES); - Variable updateFN = variablesToUpdate.get(ConfusionMatrixEnum.FALSE_NEGATIVES); - - Operand predIsNeg = null; - Operand labelIsNeg; - if (updateFN != null || updateTN != null) { - predIsNeg = tf.math.logicalNot(predIsPos); - loopVars.put(ConfusionMatrixEnum.FALSE_NEGATIVES, new Operand[] {labelIsPos, predIsNeg}); - } - - if (updateFP != null || updateTN != null) { - labelIsNeg = tf.math.logicalNot(labelIsPos); - loopVars.put(ConfusionMatrixEnum.FALSE_POSITIVES, new Operand[] {labelIsNeg, predIsPos}); - if (updateTN != null) { - loopVars.put(ConfusionMatrixEnum.TRUE_NEGATIVES, new Operand[] {labelIsNeg, predIsNeg}); - } - } - - final Operand weightsTiledF = weightsTiled; - loopVars - .keySet() - .forEach( - (c) -> { - if (variablesToUpdate.containsKey(c)) { - Operand[] op = loopVars.get(c); - // op[0] = label, op[1] == prediction - controlOps.add( - weightedAssignAdd( - tf, - op[0], - op[1], - weightsTiledF, - variablesToUpdate.get(c), - varInitializers.get(c))); - } - }); - - return controlOps; - } - - /** - * Creates an Operand that adds the values by taking the logical and of labels and predictions to - * the specified confusion matrix variable. - * - * @param tf The TensorFlow Ops - * @param labels the labels - * @param predictions the predictions - * @param weights the weights applied to the logical and result, may be null - * @param variable the variable to update - * @param initializer the variable initializer to be applied to the variable, may be null. - * @param the data type for the variable. - * @return an Operand that updates the variable. - */ - private static Operand weightedAssignAdd( - Ops tf, - Operand labels, - Operand predictions, - Operand weights, - Variable variable, - Assign initializer) { - Class type = variable.type(); - Operand labelAndPred = cast(tf, tf.math.logicalAnd(labels, predictions), type); - - if (weights != null) { - labelAndPred = tf.math.mul(labelAndPred, weights); - } - // if multilabel: - // sum across examples, leaving shape (T, D) - // else: - // sum across ND, leaving shape (T) - Operand valueSum = tf.reduceSum(labelAndPred, tf.constant(1)); - Operand assignAdd; - if (initializer != null) { - Ops tfc = - tf.withSubScope("weightedAssignAdd") - .withControlDependencies(Collections.singletonList(initializer)); - assignAdd = tfc.assignAdd(variable, valueSum); - } else { - assignAdd = tf.assignAdd(variable, valueSum); - } - return assignAdd; - } - - /** - * Filters top-k values in the last dim of x and set the rest to NEG_INF. - * - *

Used for computing top-k prediction values in dense labels (which has the same shape as - * predictions) for recall and precision top-k metrics. - * - * @param tf The TensorFlow Ops - * @param x the tensor with any dimensions to filter - * @param topK the number of values to keep. - * @param the data type for x and the return value. - * @return the topK prediction values. - */ - private static Operand filterTopK(Ops tf, Operand x, int topK) { - Class type = x.type(); - Shape xShape = x.shape(); - // top has the same rank as x; the last dimension becomes indices of the topK features. - TopK top = tf.nn.topK(x, tf.constant(topK), TopK.sorted(false)); - // oneHot has an additional dimension: the one-hot representation of each topK index. - OneHot oneHot = - tf.oneHot( - top.indices(), - cast(tf, tf.constant(xShape.size(xShape.numDimensions() - 1)), TInt32.class), - tf.constant(1), - tf.constant(0), - OneHot.axis(-1L)); - // Sum the one-hot representations along the last dimension of x. - Operand topKMask = cast(tf, tf.reduceSum(oneHot, tf.constant(-2)), type); - - // x * top_k_mask + NEG_INF * (1 - top_k_mask) - Operand add1 = tf.math.mul(x, topKMask); - Operand add2 = - tf.math.mul( - cast(tf, tf.constant(NEG_INF), type), - tf.math.sub(cast(tf, tf.constant(1), type), topKMask)); - return tf.math.add(add1, add2); - } - - // alias for mean - - /** - * Calculate the mean of the operand, along all axes and {@code keepDims} is {@code false - * } + * Calculate the mean of the operand, along all axes and keepDims is false + * * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -706,8 +230,8 @@ public static Operand mean(Ops tf, Operand x) { } /** - * Calculate the mean of the operand, alongside the specified axis with {@code keepDims} is - * {@code false} + * Calculate the mean of the operand, alongside the specified axis with keepDims is + * false * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -725,12 +249,12 @@ public static Operand mean( * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean - * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is - * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes - * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained + * @param keepDims Indicates whether to keep the dimensions or not. If keepdims is + * false, the rank of the tensor is reduced by 1 for each entry in axes + * . If keepdims is true, the reduced dimensions are retained * with length 1. * @param the type of the operand - * @return the mean of elements of {@code x}. + * @return the mean of elements of x. */ public static Operand mean(Ops tf, Operand x, boolean keepDims) { return mean(tf, x, null, keepDims); @@ -742,12 +266,12 @@ public static Operand mean(Ops tf, Operand x, boolean * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean * @param axes Axes to compute the mean. - * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is - * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes - * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained + * @param keepDims Indicates whether to keep the dimensions or not. If keepdims is + * false, the rank of the tensor is reduced by 1 for each entry in axes + * . If keepdims is true, the reduced dimensions are retained * with length 1. * @param the data type of the Operand - * @return the mean of elements of {@code x}. + * @return the mean of elements of x. */ public static Operand mean( Ops tf, Operand x, Operand axes, boolean keepDims) { @@ -757,134 +281,9 @@ public static Operand mean( return tf.math.mean(x, axes, Mean.keepDims(keepDims)); } - public static - LossTuple raggedAssertCompatibleAndGetFlatValues( - Ops tf, Operand labels, Operand predictions) { - // TODO handle ragged Tensors - Operand tLabels = cast(tf, labels, predictions.type()); - return new LossTuple<>(tLabels, predictions); - } - - /** - * Computes the confusion matrix from predictions and labels. - * - *

The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape {@code [n, n]}, where {@code n} is the - * number of valid labels for a given classification task. Both prediction and labels must be 1-D - * arrays of the same shape in order for this function to work. - * - *

If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum - * value in either predictions or labels. Class labels are expected to start at 0. For example, if - * {@code numClasses}` is 3, then the possible labels would be {@code [0, 1, 2]}. - * - *

If {@code weights} is not null, then each prediction contributes its corresponding weight to - * the total value of the confusion matrix cell. - * - *

For example: - * - *

{@code
-   *     confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
-   *          [[0 0 0 0 0]
-   *           [0 0 1 0 0]
-   *           [0 0 1 0 0]
-   *           [0 0 0 0 0]
-   *           [0 0 0 0 1]]
-   * }
- * - * Note that the possible labels are assumed to be {@code [0, 1, 2, 3,4]}, resulting in a 5x5 - * confusion matrix. - * - * @param tf the TensorFlow Ops - * @param labels 1-D {@code Operand} of real labels for the classification task. - * @param predictions 1-D {@code Operand} of predictions for a given classification. - * @param numClasses The possible number of labels the classification task can have. If this value - * is not provided, it will be calculated using both predictions and labels array. - * @param weights optional weights to be applied to the confusion matrix - * @param type Data type of the confusion matrix. - * @param the type of Operands - * @return A {@code Operand} of type {@code type} with shape {@code [n, n]} - * representing the confusion matrix, where {@code n} is the number of possible labels in - * the classification task. - * @throws IllegalArgumentException If both {@code predictions} and {@code labels} do - * not have compatible shapes, or if {@code weights} is not{@code null} and its - * shape is not compatible with {@code predictions}. - */ - // TODO should this be moved to FramnworkOps under math. - public static Operand confusionMatrix( - Ops tf, - Operand labels, - Operand predictions, - Operand numClasses, - Operand weights, - Class type) { - if (!predictions.shape().isCompatibleWith(labels.shape())) - throw new IllegalArgumentException( - String.format( - "Prediction shape %s is not compatible with labels shape %s", - predictions.shape().toString(), labels.shape().toString())); - tf = tf.withSubScope("confusionMatrix"); - LossTuple ops = LossesHelper.squeezeOrExpandDimensions(tf, predictions, labels, null); - Operand tPredictions = cast(tf, ops.getTarget(), TInt64.class); - Operand tLabels = cast(tf, ops.getLabels(), TInt64.class); - - List labelControls = new ArrayList<>(); - List predictionControls = new ArrayList<>(); - - labelControls.add( - tf.assertThat( - tf.reduceAny(tf.math.greaterEqual(tLabels, tf.constant(0L)), allAxes(tf, tLabels)), - Collections.singletonList(tf.constant("`labels` contains negative values")))); - - predictionControls.add( - tf.assertThat( - tf.reduceAny( - tf.math.greaterEqual(tPredictions, tf.constant(0L)), allAxes(tf, tPredictions)), - Collections.singletonList(tf.constant("`predictions` contains negative values")))); - if (numClasses == null) { - numClasses = - tf.math.maximum( - tf.reduceMax(tPredictions, allAxes(tf, tPredictions)), - tf.reduceMax(tLabels, allAxes(tf, tLabels))); - } else { - labelControls.add( - tf.assertThat( - tf.reduceAny(tf.math.less(tLabels, numClasses), allAxes(tf, tLabels)), - Collections.singletonList(tf.constant("``labels` out of bounds")))); - predictionControls.add( - tf.assertThat( - tf.reduceAny(tf.math.less(tPredictions, numClasses), allAxes(tf, tPredictions)), - Collections.singletonList(tf.constant("``predictions` out of bounds")))); - } - - if (weights != null) { - if (!tPredictions.shape().isCompatibleWith(weights.shape())) { - throw new IllegalArgumentException( - String.format( - "Prediction shape %s is not compatible with weights shape %s", - tPredictions.shape().toString(), weights.shape().toString())); - } - } - - Ops tfc = tf.withSubScope("confusionMatrixLabels").withControlDependencies(labelControls); - tLabels = tfc.identity(tLabels); - - tfc = tf.withSubScope("confusionMatrixPredictions").withControlDependencies(predictionControls); - tPredictions = tfc.identity(tPredictions); - - Operand shape = tf.stack(Arrays.asList(numClasses, numClasses)); - Operand indices = tf.stack(Arrays.asList(tLabels, tPredictions), Stack.axis(1L)); - Operand values = - weights == null ? cast(tf, tf.onesLike(tPredictions), type) : cast(tf, weights, type); - SparseTensor cmSparse = new SparseTensor<>(indices, values, shape); - Operand zeroMatrix = tf.zeros(shape, type); - - return tf.sparse.sparseTensorDenseAdd( - cmSparse.getIndices(), cmSparse.getValues(), cmSparse.getDenseShape(), zeroMatrix); - } - /** - * Calculate the mean of the operand, along all axes and {@code keepDims} is {@code false - * } + * Calculate the mean of the operand, along all axes and keepDims is false + * * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -895,8 +294,8 @@ public static Operand booleanMean(Ops tf, Operand x) { } /** - * Calculate the mean of the operand, alongside the specified axis with {@code keepDims} is - * {@code false} + * Calculate the mean of the operand, alongside the specified axis with keepDims is + * false * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -913,11 +312,11 @@ public static Operand booleanMean( * * @param tf the TensorFlow Ops * @param x the boolean Operand used to calculate the mean - * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is - * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes - * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained + * @param keepDims Indicates whether to keep the dimensions or not. If keepdims is + * false, the rank of the tensor is reduced by 1 for each entry in axes + * . If keepdims is true, the reduced dimensions are retained * with length 1. - * @return the mean of elements of {@code x} containing floating point numbers + * @return the mean of elements of x containing floating point numbers */ public static Operand booleanMean(Ops tf, Operand x, boolean keepDims) { return booleanMean(tf, x, null, keepDims); @@ -929,11 +328,11 @@ public static Operand booleanMean(Ops tf, Operand x, boolean ke * @param tf the TensorFlow Ops * @param x the boolean Operand used to calculate the mean * @param axes Axes to compute the mean. - * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is - * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes - * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained + * @param keepDims Indicates whether to keep the dimensions or not. If keepdims is + * false, the rank of the tensor is reduced by 1 for each entry in axes + * . If keepdims is true, the reduced dimensions are retained * with length 1. - * @return the mean of elements of {@code x} containing floating point numbers + * @return the mean of elements of x containing floating point numbers */ public static Operand booleanMean( Ops tf, Operand x, Operand axes, boolean keepDims) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java deleted file mode 100644 index 68157632557..00000000000 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java +++ /dev/null @@ -1,147 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ -package org.tensorflow.framework.metrics.impl; - -import org.tensorflow.Operand; -import org.tensorflow.op.Ops; -import org.tensorflow.op.SparseOps; -import org.tensorflow.op.sparse.DenseToDenseSetOperation; -import org.tensorflow.types.family.TNumber; - -import static org.tensorflow.framework.utils.CastHelper.cast; - -/** Implementation of set operations */ -public class SetsOps { - - /** - * Computes set difference of elements in last dimension of {@code a} and {@code b} with - * {@code aMinusB} set to true. - * - *

All but the last dimension of {@code a} and {@code b} must match - * - * @param tf the TensorFlow Ops - * @param a The first operand representing set {@code a} - * @param b The other operand representing set {@code b} - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the * same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand difference(Ops tf, Operand a, Operand b) { - return difference(tf, a, b, true); - } - - /** - * Computes set difference of elements in last dimension of {@code a} and {@code b}. - * - *

All but the last dimension of {@code a} and {@code b} must match - * - * @param tf the TensorFlow Ops - * @param a The first operand representing set {@code a} - * @param b The other operand representing set {@code b} - * @param aMinusB whether to subtract b from a, vs vice versa. - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the * same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand difference( - Ops tf, Operand a, Operand b, boolean aMinusB) { - return setOperation(tf, a, b, aMinusB ? Operation.A_MINUS_B : Operation.B_MINUS_A); - } - - /** - * Computes set union of elements in last dimension of {@code a} and {@code b}. - * - * @param tf the TensorFlow Ops - * @param a The first operand representing set {@code a} - * @param b The other operand representing set {@code b} - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the * same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand union(Ops tf, Operand a, Operand b) { - return setOperation(tf, a, b, Operation.UNION); - } - - /** - * Computes set intersection of elements in last dimension of {@code a} and {@code b}. - * - * @param tf the TensorFlow Ops - * @param a The first operand representing set {@code a} - * @param b The other operand representing set {@code b} - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the * same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand intersection(Ops tf, Operand a, Operand b) { - return setOperation(tf, a, b, Operation.INTERSECTION); - } - - /** - * Compute set operation of elements in last dimension of {@code a} and {@code b}. - * - * @param tf the TensorFlow Ops - * @param a The first set operation operand - * @param b The other et operation operand - * @param setOperation The set operation to perform, {@link Operation}. - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand setOperation( - Ops tf, Operand a, Operand b, Operation setOperation) { - - DenseToDenseSetOperation setOperationResult = - tf.sparse.denseToDenseSetOperation( - a, b, setOperation.getSetOperation(), DenseToDenseSetOperation.validateIndices(true)); - - return tf.sparse.sparseToDense( - setOperationResult.resultIndices(), - setOperationResult.resultShape(), - setOperationResult.resultValues(), - cast(tf, tf.constant(0), a.type())); - } - - /** - * Enumeration containing the string operation values to be passed to the TensorFlow Sparse Ops - * function {@link SparseOps#denseToDenseSetOperation} - */ - public enum Operation { - A_MINUS_B("a-b"), - B_MINUS_A("b-a"), - INTERSECTION("intersection"), - UNION("union"); - - private final String setOperation; - - Operation(String setOperation) { - this.setOperation = setOperation; - } - - /** - * Gets the set operation String value used to pass as the stringOperation value to {@link - * SparseOps#denseToDenseSetOperation} - * - * @return the set operation String value - */ - public String getSetOperation() { - return setOperation; - } - } -} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java new file mode 100644 index 00000000000..cecbecfed15 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -0,0 +1,136 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.DeviceSpec; +import org.tensorflow.EagerSession; +import org.tensorflow.ExecutionEnvironment; +import org.tensorflow.op.Op; +import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; + +/** + * An API for building framework operations as {@link Op Op}s + * + *

These are higher level ops that may invoke core ops. Higher level Ops may perform the + * operation solely in the TensorFlow framework or do preprocessing of the Operands before invoking + * a core level Op. + */ +public class FrameworkOps { + public final Ops coreOps; + private final Scope scope; + + public final NnOps nn; + public final SetsOps sets; + + /** + * Creates a FrameworkOps instance with the provided scope + * + * @param scope the scope + */ + private FrameworkOps(Scope scope) { + this.coreOps = Ops.create(scope.env()); + this.scope = scope; + nn = new NnOps(this); + sets = new SetsOps(this); + } + + /** + * Creates a FrameworkOps instance based on the provided Core Ops + * + * @param coreOps The TensorFlow Core Ops + */ + private FrameworkOps(Ops coreOps) { + this.coreOps = coreOps; + this.scope = coreOps.scope(); + nn = new NnOps(this); + sets = new SetsOps(this); + } + + + /** Returns the current {@link Scope scope} of this API */ + public final Scope scope() { + return scope; + } + + /** + * Gets the core Ops + * + * @return coreOps + */ + public final Ops coreOps() { + return coreOps; + } + + /** + * Returns an API that builds operations with the provided name prefix. + * + *

@link Scope#withSubScope(String)} + */ + public FrameworkOps withSubScope(String childScopeName) { + return new FrameworkOps(scope.withSubScope(childScopeName)); + } + + /** + * Returns an API that uses the provided name for an op. + * + *

{@link Scope#withName(String)} + */ + public FrameworkOps withName(String opName) { + return new FrameworkOps(scope.withName(opName)); + } + + /** + * Returns an API that places the created operations on the device(s) matching the provided spec. + * + *

{@link Scope#withDevice(DeviceSpec)} + */ + public FrameworkOps withDevice(DeviceSpec deviceSpec) { + return new FrameworkOps(scope.withDevice(deviceSpec)); + } + + /** + * Returns an API that adds operations to the graph with the provided control dependencies. + * + *

{@link Scope#withControlDependencies(Iterable)} + */ + public FrameworkOps withControlDependencies(Iterable controls) { + return new FrameworkOps(scope.withControlDependencies(controls)); + } + + /** Creates an API for building operations in the provided execution environment */ + public static FrameworkOps create(ExecutionEnvironment env) { + return new FrameworkOps(new Scope(env)); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + *

Invoking this method is equivalent to {@code + * FrameworkOps.create(EagerSession.getDefault())}. + */ + public static FrameworkOps create() { + return new FrameworkOps(new Scope(EagerSession.getDefault())); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + * @param coreOps the TensorFlow core Ops + */ + public static FrameworkOps create(Ops coreOps) { + return new FrameworkOps(coreOps); + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java new file mode 100644 index 00000000000..4054f3ddbb5 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -0,0 +1,197 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.framework.op.nn.SigmoidCrossEntropyWithLogits; +import org.tensorflow.framework.op.nn.SoftmaxCrossEntropyWithLogits; +import org.tensorflow.framework.op.nn.SparseSoftmaxCrossEntropyWithLogits; +import org.tensorflow.op.Op; +import org.tensorflow.op.Scope; +import org.tensorflow.types.family.TNumber; + +/** + * An API for building {@code nn} operations as {@link Op Op}s + * + *

These are higher level ops that may invoke core ops. Higher level Ops may perform the + * operation solely in the TensorFlow framework or do preprocessing of the Operands before invoking + * a core level Op. + * + *

{@link FrameworkOps} + */ +public class NnOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * @param frameworkOps the TensorFLow framework Ops + */ + NnOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Computes sigmoid cross entropy given logits. + * + *

Measures the probability error in discrete classification tasks in which each class is + * independent and not mutually exclusive. For instance, one could perform multilabel + * classification where a picture can contain both an elephant and a dog at the same time. + * + *

For brevity, let x = logits, z = labels. The logistic loss in + * pseudo-code is + * + *

+     *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
+     *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
+     *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
+     *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
+     *   = (1 - z) * x + log(1 + exp(-x))
+     *   = x - x * z + log(1 + exp(-x))
+     *  
+ * + *

For x < 0, to avoid overflow in exp(-x), we reformulate the above + * + *

+     *  x - x * z + log(1 + exp(-x))
+     *   = log(exp(x)) - x * z + log(1 + exp(-x))
+     *   = - x * z + log(1 + exp(x))
+     *  
+ * + *

Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * formulation + * + *

+     *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
+     *  
+ * + *

logits and labels must have the same type and shape. + * + *

+ * + * @param labels the labels + * @param logits the logits of type float32 or float64 + * @param the type of labels and logits + * @return the component-wise logistic losses. + * @throws IllegalArgumentException if logits' and labels' do not have the same shape + */ + public Operand sigmoidCrossEntropyWithLogits(Operand labels, + Operand logits) { + return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); + } + + /** + * Computes softmax cross entropy between logits and labels. + * + *

Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

NOTE: + * + *

While the classes are mutually exclusive, their probabilities need not be. All that is + * required is that each row of labels is a valid probability distribution. If they + * are not, the computation of the gradient will be incorrect. + * + *

If using exclusive labels (wherein one and only one class is true at a time), + * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} + * + *

Usage: + * + *

+     *    Operand<TFloat32> logits =
+     *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
+     *    Operand<TFloat32> labels =
+     *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
+     *    Operand<TFloat32> output =
+     *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
+     *    // output Shape = [2]
+     *    // dataType = FLOAT (1)
+     *    // values { 0.169846, 0.824745 }
+     *  
+ * + *

Backpropagation will happen into both logits and labels. To + * disallow backpropagation into labels, pass label tensors through + * tf.stopGradient before feeding it to this function. + * + * @param labels Each vector along the class dimension should hold a valid probability + * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] + * , each row of labels[i] must be a valid probability distribution. + * @param logits Per-label activations, typically a linear output. These activation energies are + * interpreted as unnormalized log probabilities. + * @param axis The class dimension. -1 is the last dimension. + * @param the number type of the operands + * @return the softmax cross entropy loss. Its type is the same as logits and its + * shape is the same as labels except that it does not have the last dimension of + * labels. + */ + public Operand softmaxCrossEntropyWithLogits( + Operand labels, Operand logits, int axis) { + return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + } + + /** + * Computes sparse softmax cross entropy between logits and labels. + * + *

Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

NOTE: + * + *

For this operation, the probability of a given label is considered exclusive. That is, soft + * classes are not allowed, and the labels vector must provide a single specific + * index for the true class for each row of logits (each minibatch entry). For soft + * softmax classification with a probability distribution for each entry, {@link + * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. + * + *

WARNING: + * + *

This op expects unscaled logits, since it performs a softmax on logits + * internally for efficiency. Do not call this op with the output of softmax, + * as it will produce incorrect results. + * + *

A common use case is to have logits of shape [batchSize, numClasses] and have + * labels of shape [batchSize], but higher dimensions are supported, in which case + * the dim-th dimension is assumed to be of size numClasses. + * logits must have the dataType of TFloat16, TFloat32 + * , or TFloat64, and labels must have the dtype of TInt32 + * or TInt64. + * + * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r + * is rank of labels and result) and the dataType is TInt32 + * or TInt64. Each entry in labels must be an index in [0, + * numClasses). Other values will raise an exception when this op is run on CPU, and + * return NaN for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., + * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, + * or TFloat64. These activation energies are interpreted as unnormalized log + * probabilities. + * @param The data type for the labels + * @param The data type for the logits and loss + * @return the loss + * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * of the labels is not equal to the rank of the logits minus one. + */ + + public Operand sparseSoftmaxCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); + } + + +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java new file mode 100644 index 00000000000..d7833cdbb06 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java @@ -0,0 +1,161 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.Scope; +import org.tensorflow.op.SparseOps; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.sparse.DenseToDenseSetOperation; +import org.tensorflow.op.sparse.SparseToDense; +import org.tensorflow.types.family.TNumber; + +/** Implementation of set operations */ +public class SetsOps { + + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + SetsOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Computes set difference of elements in last dimension of a and b with + * aMinusB set to true. + * + *

All but the last dimension of a and b must match + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand difference(Operand a, Operand b) { + return difference(a, b, true); + } + + /** + * Computes set difference of elements in last dimension of a and b. + * + *

All but the last dimension of a and b must match + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param aMinusB whether to subtract b from a, vs vice versa. + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand difference(Operand a, Operand b, boolean aMinusB) { + return setOperation(a, b, aMinusB ? Operation.A_MINUS_B : Operation.B_MINUS_A); + } + + /** + * Computes set union of elements in last dimension of a and b. + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand union(Operand a, Operand b) { + return setOperation(a, b, Operation.UNION); + } + + /** + * Computes set intersection of elements in last dimension of a and b. + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand intersection(Operand a, Operand b) { + return setOperation(a, b, Operation.INTERSECTION); + } + + /** + * Compute set operation of elements in last dimension of a and b. + * + * @param a The first set operation operand + * @param b The other et operation operand + * @param setOperation The set operation to perform, {@link Operation}. + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand setOperation( + Operand a, Operand b, Operation setOperation) { + + DenseToDenseSetOperation setOperationResult = + DenseToDenseSetOperation.create( + scope, + a, + b, + setOperation.getSetOperation(), + DenseToDenseSetOperation.validateIndices(true)); + + return SparseToDense.create( + scope, + setOperationResult.resultIndices(), + setOperationResult.resultShape(), + setOperationResult.resultValues(), + Cast.create(scope, Constant.scalarOf(scope, 0), a.type())); + } + + /** + * Enumeration containing the string operation values to be passed to the TensorFlow Sparse Ops + * function {@link SparseOps#denseToDenseSetOperation} + */ + public enum Operation { + A_MINUS_B("a-b"), + B_MINUS_A("b-a"), + INTERSECTION("intersection"), + UNION("union"); + + private final String setOperation; + + Operation(String setOperation) { + this.setOperation = setOperation; + } + + /** + * Gets the set operation String value used to pass as the stringOperation value to {@link + * SparseOps#denseToDenseSetOperation} + * + * @return the set operation String value + */ + public String getSetOperation() { + return setOperation; + } + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 64faa7c5d70..75766cf9bfb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -64,8 +64,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { * probabilities. * @param the data type for the labels * @param the data tyoe for the loss and logits. - * @return A Tensor of the same shape as labels and of the same type as - * logits with the softmax cross entropy loss. + * @return the loss * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank * of the labels is not equal to the rank of the logits minus one. */ diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java similarity index 86% rename from tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java rename to tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java index eceff2797f8..e10f016bd94 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java @@ -2,6 +2,8 @@ import org.junit.jupiter.api.Test; import org.tensorflow.Operand; +import org.tensorflow.framework.op.FrameworkOps; +import org.tensorflow.framework.op.SetsOps; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; @@ -15,7 +17,7 @@ import static org.tensorflow.framework.utils.CastHelper.cast; -class SetsOpsTest { +class SetOpsTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -28,6 +30,7 @@ public void testSetIntersectionMultirow2() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); Operand b = tf.constant(new int[][] {{1, 9}, {1, 5}}); int[][] expected = new int[][] {{1, 9}, {0, 0}}; @@ -35,7 +38,7 @@ public void testSetIntersectionMultirow2() { for (Class type : types) { Operand aa = cast(tf, a, type); Operand bb = cast(tf, b, type); - Operand intersection = SetsOps.intersection(tf, aa, bb); + Operand intersection = fops.sets.intersection(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); } @@ -49,6 +52,7 @@ public void testSetIntersectionDuplicates2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{1, 1, 3}}); Operand b = tf.constant(new int[][] {{1, 1}}); int[][] expected = {{1}}; @@ -56,7 +60,7 @@ public void testSetIntersectionDuplicates2d() { for (Class type : types) { Operand aa = cast(tf, a, type); Operand bb = cast(tf, b, type); - Operand intersection = SetsOps.intersection(tf, aa, bb); + Operand intersection = fops.sets.intersection(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); @@ -72,6 +76,7 @@ public void testDenseSetDifferenceMultirow2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{1, 5, 9}, {4, 5, 3}}); Operand b = tf.constant(new int[][] {{1, 2, 6}, {1, 2, 2}}); @@ -81,14 +86,14 @@ public void testDenseSetDifferenceMultirow2d() { int[][] expected = {{5, 9, 0}, {3, 4, 5}}; // a- b Shape expectedShape = Shape.of(2, 3); - Operand intersection = SetsOps.difference(tf, aa, bb); + Operand intersection = fops.sets.difference(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); // b - a expected = new int[][] {{2, 6}, {1, 2}}; expectedShape = Shape.of(2, 2); - intersection = SetsOps.difference(tf, aa, bb, false); + intersection = fops.sets.difference(aa, bb, false); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); @@ -103,6 +108,7 @@ public void testDenseUnionMultirow2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); Operand b = tf.constant(new int[][] {{1, 9}, {1, 2}}); int[][] expected = new int[][] {{5, 0}, {3, 4}}; @@ -111,7 +117,7 @@ public void testDenseUnionMultirow2d() { Operand bb = cast(tf, b, type); Shape expectedShape = Shape.of(2, 2); // a- b - Operand intersection = SetsOps.difference(tf, aa, bb); + Operand intersection = fops.sets.difference(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); } From ba24371189a09094c2941540d65ce100c57caf5e Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 15:21:11 -0400 Subject: [PATCH 09/60] Added FrameworkOps analogous to Ops. Added NnOps and SetOps as groups. Fixed MetricsHelper and Losses to use the bew FrameworkOps. Moved SetsOps to framework.op. --- .../src/main/java/org/tensorflow/framework/losses/Losses.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index aa5fa4ada6d..33c8d50409d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -572,7 +572,7 @@ public static Operand sparseCategoricalCrossentropy( tf.constant( new long[] {-1L, predictionsShape.size(predictionsShape.numDimensions() - 1)})); } - + Operand loss = fop.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); if (updateShape && predictionsRank >= 3) { Shape newShape = predictionsShape.take(predictionsShape.numDimensions() - 1); From 4d3f17cf4cff04dee66f2e00756d911eaf12e2bd Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 15:36:41 -0400 Subject: [PATCH 10/60] Move l2Normalize to MathOps --- .../tensorflow/framework/losses/Losses.java | 23 ++----- .../tensorflow/framework/op/FrameworkOps.java | 3 + .../org/tensorflow/framework/op/MathOps.java | 67 +++++++++++++++++++ 3 files changed, 74 insertions(+), 19 deletions(-) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 33c8d50409d..398588cee67 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -337,13 +337,14 @@ public static Operand categoricalHinge( */ public static Operand cosineSimilarity( Ops tf, Operand labels, Operand predictions, int[] axis) { + FrameworkOps fops = FrameworkOps.create(tf); Operand tLabels = cast(tf, labels, predictions.type()); LossTuple lossTuple = LossesHelper.squeezeOrExpandDimensions(tf, tLabels, predictions, null); predictions = lossTuple.getTarget(); tLabels = lossTuple.getLabels(); - tLabels = l2Normalize(tf, tLabels, axis); - predictions = l2Normalize(tf, predictions, axis); + tLabels = fops.math.l2Normalize(tLabels, axis); + predictions = fops.math.l2Normalize(predictions, axis); Operand mathMul = tf.math.mul(tLabels, predictions); return tf.reduceSum(mathMul, tf.constant(axis), ReduceSum.keepDims(Boolean.FALSE)); } @@ -651,23 +652,7 @@ private static Operand smoothCategoricalLabels( return tf.math.add(tf.math.mul(labels, oneMinusSmoothing), tf.math.div(smoothing, numClasses)); } - // TODO this was tf.math.l2_normalize in TF Python - /** - * Normalizes along dimension axis using an L2 norm. - * - * @param tf The TensorFlow Ops - * @param x the input - * @param axis Dimension along which to normalize. - * @param the data type for the input and the result - * @return the normalized values based on L2 norm - */ - public static Operand l2Normalize(Ops tf, Operand x, int[] axis) { - Operand squareSum = - tf.reduceSum(tf.math.square(x), tf.constant(axis), ReduceSum.keepDims(Boolean.TRUE)); - Operand invNorm = - tf.math.rsqrt(tf.math.maximum(squareSum, cast(tf, tf.constant(1e-12F), x.type()))); - return tf.math.mul(x, invNorm); - } + /** * Converts binary labels into -1/1. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index cecbecfed15..18fb8ada6b7 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -34,6 +34,7 @@ public class FrameworkOps { public final NnOps nn; public final SetsOps sets; + public final MathOps math; /** * Creates a FrameworkOps instance with the provided scope @@ -45,6 +46,7 @@ private FrameworkOps(Scope scope) { this.scope = scope; nn = new NnOps(this); sets = new SetsOps(this); + math = new MathOps(this); } /** @@ -57,6 +59,7 @@ private FrameworkOps(Ops coreOps) { this.scope = coreOps.scope(); nn = new NnOps(this); sets = new SetsOps(this); + math = new MathOps(this); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java new file mode 100644 index 00000000000..57a18fc63c2 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -0,0 +1,67 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Maximum; +import org.tensorflow.op.math.Mul; +import org.tensorflow.op.math.Rsqrt; +import org.tensorflow.op.math.Square; +import org.tensorflow.types.family.TNumber; + +import static org.tensorflow.framework.utils.CastHelper.cast; + +public class MathOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + MathOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Normalizes along dimension axis using an L2 norm. + * + * @param x the input + * @param axis Dimension along which to normalize. + * @param the data type for the input and the result + * @return the normalized values based on L2 norm + */ + public Operand l2Normalize(Operand x, int[] axis) { + Operand squareSum = + ReduceSum.create(scope, + Square.create(scope, x), + Constant.vectorOf(scope, axis), + ReduceSum.keepDims(Boolean.TRUE)); + Operand invNorm = + Rsqrt.create(scope, + Maximum.create(scope, squareSum, + Cast.create(scope, + Constant.scalarOf(scope, 1e-12F), x.type()))); + return Mul.create(scope, x, invNorm); + } +} From 9e07483e233df90c0ec7be793b5c7f11700933bf Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 18:50:26 -0400 Subject: [PATCH 11/60] Reformat code, fix javadocs --- .../tensorflow/framework/op/FrameworkOps.java | 76 +++-- .../org/tensorflow/framework/op/MathOps.java | 68 ++-- .../org/tensorflow/framework/op/NnOps.java | 312 +++++++++--------- .../op/nn/SigmoidCrossEntropyWithLogits.java | 14 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 3 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 52 +-- 6 files changed, 271 insertions(+), 254 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index 18fb8ada6b7..c8b234f2c51 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -30,11 +30,10 @@ */ public class FrameworkOps { public final Ops coreOps; - private final Scope scope; - public final NnOps nn; public final SetsOps sets; public final MathOps math; + private final Scope scope; /** * Creates a FrameworkOps instance with the provided scope @@ -62,8 +61,43 @@ private FrameworkOps(Ops coreOps) { math = new MathOps(this); } + /** + * Creates an API for building operations in the provided execution environment + * + * @param env the exection environment + * @return the FrameworkOps + */ + public static FrameworkOps create(ExecutionEnvironment env) { + return new FrameworkOps(new Scope(env)); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + *

Invoking this method is equivalent to {@code + * FrameworkOps.create(EagerSession.getDefault())}. + * + * @return the FrameworkOps + */ + public static FrameworkOps create() { + return new FrameworkOps(new Scope(EagerSession.getDefault())); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + * @param coreOps the TensorFlow core Ops + * @return the FrameworkOps + */ + public static FrameworkOps create(Ops coreOps) { + return new FrameworkOps(coreOps); + } - /** Returns the current {@link Scope scope} of this API */ + /** + * Returns the current {@link Scope scope} of this API + * + * @return the current {@link Scope scope} of this API + */ public final Scope scope() { return scope; } @@ -81,6 +115,9 @@ public final Ops coreOps() { * Returns an API that builds operations with the provided name prefix. * *

@link Scope#withSubScope(String)} + * + * @param childScopeName the name of the child scope + * @return the FrameworkOps */ public FrameworkOps withSubScope(String childScopeName) { return new FrameworkOps(scope.withSubScope(childScopeName)); @@ -90,6 +127,9 @@ public FrameworkOps withSubScope(String childScopeName) { * Returns an API that uses the provided name for an op. * *

{@link Scope#withName(String)} + * + * @param opName the name of the scope + * @return the FrameworkOps */ public FrameworkOps withName(String opName) { return new FrameworkOps(scope.withName(opName)); @@ -99,6 +139,9 @@ public FrameworkOps withName(String opName) { * Returns an API that places the created operations on the device(s) matching the provided spec. * *

{@link Scope#withDevice(DeviceSpec)} + * + * @param deviceSpec the device specification for the scope + * @return the FrameworkOps */ public FrameworkOps withDevice(DeviceSpec deviceSpec) { return new FrameworkOps(scope.withDevice(deviceSpec)); @@ -108,32 +151,11 @@ public FrameworkOps withDevice(DeviceSpec deviceSpec) { * Returns an API that adds operations to the graph with the provided control dependencies. * *

{@link Scope#withControlDependencies(Iterable)} + * + * @param controls the operations + * @return the FrameworkOps */ public FrameworkOps withControlDependencies(Iterable controls) { return new FrameworkOps(scope.withControlDependencies(controls)); } - - /** Creates an API for building operations in the provided execution environment */ - public static FrameworkOps create(ExecutionEnvironment env) { - return new FrameworkOps(new Scope(env)); - } - - /** - * Creates an API for building operations in the default eager execution environment - * - *

Invoking this method is equivalent to {@code - * FrameworkOps.create(EagerSession.getDefault())}. - */ - public static FrameworkOps create() { - return new FrameworkOps(new Scope(EagerSession.getDefault())); - } - - /** - * Creates an API for building operations in the default eager execution environment - * - * @param coreOps the TensorFlow core Ops - */ - public static FrameworkOps create(Ops coreOps) { - return new FrameworkOps(coreOps); - } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 57a18fc63c2..5208cde98f3 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -15,7 +15,6 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; -import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.ReduceSum; @@ -26,42 +25,41 @@ import org.tensorflow.op.math.Square; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - public class MathOps { - private final Scope scope; + private final Scope scope; - private final FrameworkOps frameworkOps; + private final FrameworkOps frameworkOps; - /** - * Creates Framework {@code nn} Operations - * - * @param frameworkOps the TensorFLow framework Ops - */ - MathOps(FrameworkOps frameworkOps) { - this.scope = frameworkOps.scope(); - this.frameworkOps = frameworkOps; - } + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + MathOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } - /** - * Normalizes along dimension axis using an L2 norm. - * - * @param x the input - * @param axis Dimension along which to normalize. - * @param the data type for the input and the result - * @return the normalized values based on L2 norm - */ - public Operand l2Normalize(Operand x, int[] axis) { - Operand squareSum = - ReduceSum.create(scope, - Square.create(scope, x), - Constant.vectorOf(scope, axis), - ReduceSum.keepDims(Boolean.TRUE)); - Operand invNorm = - Rsqrt.create(scope, - Maximum.create(scope, squareSum, - Cast.create(scope, - Constant.scalarOf(scope, 1e-12F), x.type()))); - return Mul.create(scope, x, invNorm); - } + /** + * Normalizes along dimension axis using an L2 norm. + * + * @param x the input + * @param axis Dimension along which to normalize. + * @param the data type for the input and the result + * @return the normalized values based on L2 norm + */ + public Operand l2Normalize(Operand x, int[] axis) { + Operand squareSum = + ReduceSum.create( + scope, + Square.create(scope, x), + Constant.vectorOf(scope, axis), + ReduceSum.keepDims(Boolean.TRUE)); + Operand invNorm = + Rsqrt.create( + scope, + Maximum.create( + scope, squareSum, Cast.create(scope, Constant.scalarOf(scope, 1e-12F), x.type()))); + return Mul.create(scope, x, invNorm); + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java index 4054f3ddbb5..0fea3743d95 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -32,166 +32,164 @@ *

{@link FrameworkOps} */ public class NnOps { - private final Scope scope; + private final Scope scope; - private final FrameworkOps frameworkOps; + private final FrameworkOps frameworkOps; - /** - * Creates Framework {@code nn} Operations - * @param frameworkOps the TensorFLow framework Ops - */ - NnOps(FrameworkOps frameworkOps) { - this.scope = frameworkOps.scope(); - this.frameworkOps = frameworkOps; - } + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + NnOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } - /** - * Computes sigmoid cross entropy given logits. - * - *

Measures the probability error in discrete classification tasks in which each class is - * independent and not mutually exclusive. For instance, one could perform multilabel - * classification where a picture can contain both an elephant and a dog at the same time. - * - *

For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is - * - *

-     *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
-     *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
-     *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
-     *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
-     *   = (1 - z) * x + log(1 + exp(-x))
-     *   = x - x * z + log(1 + exp(-x))
-     *  
- * - *

For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - *

-     *  x - x * z + log(1 + exp(-x))
-     *   = log(exp(x)) - x * z + log(1 + exp(-x))
-     *   = - x * z + log(1 + exp(x))
-     *  
- * - *

Hence, to ensure stability and avoid overflow, the implementation uses this equivalent - * formulation - * - *

-     *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
-     *  
- * - *

logits and labels must have the same type and shape. - * - *

- * - * @param labels the labels - * @param logits the logits of type float32 or float64 - * @param the type of labels and logits - * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape - */ - public Operand sigmoidCrossEntropyWithLogits(Operand labels, - Operand logits) { - return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); - } - - /** - * Computes softmax cross entropy between logits and labels. - * - *

Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

NOTE: - * - *

While the classes are mutually exclusive, their probabilities need not be. All that is - * required is that each row of labels is a valid probability distribution. If they - * are not, the computation of the gradient will be incorrect. - * - *

If using exclusive labels (wherein one and only one class is true at a time), - * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} - * - *

Usage: - * - *

-     *    Operand<TFloat32> logits =
-     *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
-     *    Operand<TFloat32> labels =
-     *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
-     *    Operand<TFloat32> output =
-     *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
-     *    // output Shape = [2]
-     *    // dataType = FLOAT (1)
-     *    // values { 0.169846, 0.824745 }
-     *  
- * - *

Backpropagation will happen into both logits and labels. To - * disallow backpropagation into labels, pass label tensors through - * tf.stopGradient before feeding it to this function. - * - * @param labels Each vector along the class dimension should hold a valid probability - * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] - * , each row of labels[i] must be a valid probability distribution. - * @param logits Per-label activations, typically a linear output. These activation energies are - * interpreted as unnormalized log probabilities. - * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands - * @return the softmax cross entropy loss. Its type is the same as logits and its - * shape is the same as labels except that it does not have the last dimension of - * labels. - */ - public Operand softmaxCrossEntropyWithLogits( - Operand labels, Operand logits, int axis) { - return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); - } - - /** - * Computes sparse softmax cross entropy between logits and labels. - * - *

Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

NOTE: - * - *

For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link - * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. - * - *

WARNING: - * - *

This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, - * as it will produce incorrect results. - * - *

A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. - * - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log - * probabilities. - * @param The data type for the labels - * @param The data type for the logits and loss - * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank - * of the labels is not equal to the rank of the logits minus one. - */ - - public Operand sparseSoftmaxCrossEntropyWithLogits( - Operand labels, Operand logits) { - return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); - } + /** + * Computes sigmoid cross entropy given {@code logits}. + * + *

Measures the probability error in discrete classification tasks in which each class is + * independent and not mutually exclusive. For instance, one could perform multilabel + * classification where a picture can contain both an elephant and a dog at the same time. + * + *

For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in pseudo-code is + * + *

+   *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
+   *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
+   *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
+   *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
+   *   = (1 - z) * x + log(1 + exp(-x))
+   *   = x - x * z + log(1 + exp(-x))
+   *  
+ * + *

For {@code x < 0}, to avoid overflow in {@code exp(-x)}, we reformulate the above + * + *

+   *  x - x * z + log(1 + exp(-x))
+   *   = log(exp(x)) - x * z + log(1 + exp(-x))
+   *   = - x * z + log(1 + exp(x))
+   *  
+ * + *

Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * formulation + * + *

+   *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
+   *  
+ * + *

{@code logits} and {@code labels} must have the same type and shape. + * + *

+ * + * @param labels the labels + * @param logits the logits of type float32 or float64 + * @param the type of labels and logits + * @return the component-wise logistic losses. + * @throws IllegalArgumentException if logits' and labels' do not have the same shape + */ + public Operand sigmoidCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); + } + /** + * Computes softmax cross entropy between {@code logits} and {@code labels}. + * + *

Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

NOTE: + * + *

While the classes are mutually exclusive, their probabilities need not be. All that is + * required is that each row of {@code labels} is a valid probability distribution. If they are + * not, the computation of the gradient will be incorrect. + * + *

If using exclusive {@code labels} (wherein one and only one class is true at a time), see + * {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} + * + *

Usage: + * + *

+   *    Operand<TFloat32> logits =
+   *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
+   *    Operand<TFloat32> labels =
+   *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
+   *    Operand<TFloat32> output =
+   *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
+   *    // output Shape = [2]
+   *    // dataType = FLOAT (1)
+   *    // values { 0.169846, 0.824745 }
+   *  
+ * + *

Backpropagation will happen into both {@code logits} and {@code labels}. To disallow + * backpropagation into {@code labels}, pass label tensors through {@code tf.stopGradient} before + * feeding it to this function. + * + * @param labels Each vector along the class dimension should hold a valid probability + * distribution e.g. for the case in which labels are of shape {@code [batch_size, + * num_classes] }, each row of {@code labels[i]} must be a valid probability distribution. + * @param logits Per-label activations, typically a linear output. These activation energies are + * interpreted as unnormalized log probabilities. + * @param axis The class dimension. -1 is the last dimension. + * @param the number type of the operands + * @param the data type for the labels. + * @return the softmax cross entropy loss. Its type is the same as {@code logits} and its shape is + * the same as {@code labels} except that it does not have the last dimension of {@code + * labels}. + * + */ + public Operand softmaxCrossEntropyWithLogits( + Operand labels, Operand logits, int axis) { + return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + } + /** + * Computes sparse softmax cross entropy between {@code logits} and {@code labels}. + * + *

Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

NOTE: + * + *

For this operation, the probability of a given label is considered exclusive. That is, soft + * classes are not allowed, and the {@code labels} vector must provide a single specific index for + * the true class for each row of {@code logits} (each minibatch entry). For soft softmax + * classification with a probability distribution for each entry, {@link + * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. + * + *

WARNING: + * + *

This op expects unscaled logits, since it performs a {@code softmax} on {@code logits } + * internally for efficiency. Do not call this op with the output of {@code softmax}, as it will + * produce incorrect results. + * + *

A common use case is to have logits of shape {@code [batchSize, numClasses]} and have labels + * of shape {@code [batchSize]}, but higher dimensions are supported, in which case the {@code + * dim}-th dimension is assumed to be of size {@code numClasses}. {@code logits} must have the + * {@code dataType} of {@code TFloat16}, {@code TFloat32} , or {@code TFloat64}, and {@code + * labels} must have the dtype of {@code TInt32} or {@code TInt64}. + * + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r } is + * rank of {@code labels} and result) and the dataType is {@code TInt32} or {@code TInt64}. + * Each entry in {@code labels} must be an index in {@code [0, numClasses)}. Other values will + * raise an exception when this op is run on CPU, and return {@code NaN} for corresponding + * loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code + * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. + * @param The data type for the labels + * @param The data type for the logits and loss + * @return the loss + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank + * of the labels is not equal to the rank of the logits minus one. + */ + public Operand sparseSoftmaxCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits( + scope, labels, logits); + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index b55385839d3..fc3f7739363 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -3,8 +3,6 @@ import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; import org.tensorflow.op.core.Select; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; @@ -18,17 +16,17 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -//@Operator(group = "nn") +// @Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** - * Computes sigmoid cross entropy given logits. + * Computes sigmoid cross entropy given {@code logits}. * *

Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - *

For brevity, let x = logits, z = labels. The logistic loss in + *

For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in * pseudo-code is * *

@@ -40,7 +38,7 @@ public class SigmoidCrossEntropyWithLogits {
    *  = x - x * z + log(1 + exp(-x))
    * 
* - *

For x < 0, to avoid overflow in exp(-x), we reformulate the above + *

For {@code x < 0}, to avoid overflow in {@code exp(-x)}, we reformulate the above * *

    * x - x * z + log(1 + exp(-x))
@@ -55,7 +53,7 @@ public class SigmoidCrossEntropyWithLogits {
    *   max(x, 0) - x * z + log(1 + exp(-abs(x)))
    * 
* - *

logits and labels must have the same type and shape. + *

{@code logits} and {@code labels} must have the same type and shape. * *

* @@ -66,7 +64,7 @@ public class SigmoidCrossEntropyWithLogits { * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape */ - //@Endpoint(name = "sigmoidCrossEntropyWithLogits") + // @Endpoint(name = "sigmoidCrossEntropyWithLogits") public static Operand sigmoidCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { if (!isCompatible(labels.shape(), logits.shape())) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index 0f5b8197f1e..7d59941f27a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -66,7 +66,8 @@ public class SoftmaxCrossEntropyWithLogits { * @param logits Per-label activations, typically a linear output. These activation energies are * interpreted as unnormalized log probabilities. * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands + * @param the data type for the logits and return operand + * @param the data type for the labels * @return the softmax cross entropy loss. Its type is the same as logits and its * shape is the same as labels except that it does not have the last dimension of * labels. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 75766cf9bfb..0b2d29d6092 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -25,7 +25,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { /** - * Computes sparse softmax cross entropy between logits and labels. + * Computes sparse softmax cross entropy between {@code logits} and {@code labels}. * *

Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is @@ -34,45 +34,45 @@ public class SparseSoftmaxCrossEntropyWithLogits { *

NOTE: * *

For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft + * classes are not allowed, and the {@code labels} vector must provide a single specific + * index for the true class for each row of {@code logits} (each minibatch entry). For soft * softmax classification with a probability distribution for each entry, {@link * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. * *

WARNING: * - *

This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, + *

This op expects unscaled logits, since it performs a {@code softmax} on {@code logits + * } internally for efficiency. Do not call this op with the output of {@code softmax}, * as it will produce incorrect results. * - *

A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. + *

A common use case is to have logits of shape {@code [batchSize, numClasses]} and have + * labels of shape {@code [batchSize]}, but higher dimensions are supported, in which case + * the {@code dim}-th dimension is assumed to be of size {@code numClasses}. {@code + * logits} must have the {@code dataType} of {@code TFloat16}, {@code TFloat32} + * , or {@code TFloat64}, and {@code labels} must have the dtype of {@code TInt32} + * or {@code TInt64}. * * @param scope current scope - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r + * } is rank of {@code labels} and result) and the dataType is {@code TInt32} + * or {@code TInt64}. Each entry in {@code labels} must be an index in {@code [0, + * numClasses)}. Other values will raise an exception when this op is run on CPU, and + * return {@code NaN} for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, + * or {@code TFloat64}. These activation energies are interpreted as unnormalized log * probabilities. - * @param the data type for the labels - * @param the data tyoe for the loss and logits. + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank * of the labels is not equal to the rank of the logits minus one. */ @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") public static - Operand sparseSoftmaxCrossEntropyWithLogits( - Scope scope, Operand labels, Operand logits) { + Operand sparseSoftmaxCrossEntropyWithLogits( + Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { @@ -119,7 +119,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } @@ -160,7 +160,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } } From 790bf3517c93e975ea28f03b72b9b7a6d0dc2bde Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 16 Apr 2021 18:04:30 -0400 Subject: [PATCH 12/60] Add confusionMatrix() method. add Unit test --- .../org/tensorflow/framework/op/MathOps.java | 301 +++++++++++++ .../tensorflow/framework/op/MathOpsTest.java | 413 ++++++++++++++++++ 2 files changed, 714 insertions(+) create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 5208cde98f3..36f5b692cab 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -15,16 +15,37 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.LossTuple; +import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; +import org.tensorflow.op.core.AssertThat; import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Identity; +import org.tensorflow.op.core.OnesLike; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.ReduceAll; +import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.core.ScatterNd; +import org.tensorflow.op.core.Squeeze; +import org.tensorflow.op.core.Stack; +import org.tensorflow.op.core.Zeros; import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.Less; import org.tensorflow.op.math.Maximum; import org.tensorflow.op.math.Mul; import org.tensorflow.op.math.Rsqrt; import org.tensorflow.op.math.Square; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; +import java.util.Arrays; +import java.util.Collections; + public class MathOps { private final Scope scope; @@ -62,4 +83,284 @@ public Operand l2Normalize(Operand x, int[] axis) { scope, squareSum, Cast.create(scope, Constant.scalarOf(scope, 1e-12F), x.type()))); return Mul.create(scope, x, invNorm); } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

For example: + * + *

+   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
+   *         [[0 0 0 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 0 0 0]
+   *          [0 0 0 0 1]]
+   * 
+ * + *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix(Operand labels, Operand predictions) { + return confusionMatrix(labels, predictions, null, null, labels.type()); + } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

For example: + * + *

+   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
+   *         [[0 0 0 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 0 0 0]
+   *          [0 0 0 0 1]]
+   * 
+ * + *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param weights An optional Operand whose shape matches {@code predictions}. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix( + Operand labels, Operand predictions, Operand weights) { + return confusionMatrix(labels, predictions, weights, null, labels.type()); + } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

For example: + * + *

+   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
+   *         [[0 0 0 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 1 0 0]
+   *          [0 0 0 0 0]
+   *          [0 0 0 0 1]]
+   * 
+ * + *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param weights An optional Operand whose shape matches {@code predictions}. + * @param numClasses The possible number of labels the classification task can have. If this value + * is null, it will be calculated using both predictions and labels. + * @param type Data type of the confusion matrix. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix( + Operand labels, + Operand predictions, + Operand weights, + Operand numClasses, + Class type) { + Scope lScope = scope.withSubScope("confusionMatrix"); + LossTuple tuple = removeSqueezableDimensions(labels, predictions, 0); + Operand lLabels = Cast.create(lScope, tuple.getLabels(), TInt64.class); + Operand lPredictions = Cast.create(lScope, tuple.getTarget(), TInt64.class); + + Operand zero = Constant.scalarOf(lScope, 0L); + Operand one = Constant.scalarOf(lScope, 1L); + + AssertThat labelsNonNegative = + AssertThat.create( + lScope, + ReduceAll.create(lScope, GreaterEqual.create(lScope, lLabels, zero), allAxes(lLabels)), + Collections.singletonList( + Constant.scalarOf(lScope, "labels contains negative values"))); + lLabels = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(labelsNonNegative)), lLabels); + + AssertThat predictionsNonNegative = + AssertThat.create( + lScope, + ReduceAll.create( + lScope, GreaterEqual.create(lScope, lPredictions, zero), allAxes(lPredictions)), + Collections.singletonList( + Constant.scalarOf(lScope, "predictions contains negative values"))); + lPredictions = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(predictionsNonNegative)), + lPredictions); + + Operand lNumClasses; + if (numClasses == null) { + lNumClasses = + Add.create( + lScope, + Maximum.create( + lScope, + ReduceMax.create(lScope, lPredictions, zero), + ReduceMax.create(lScope, lLabels, zero)), + one); + } else { + lNumClasses = Cast.create(lScope, numClasses, TInt64.class); + AssertThat labelsLess = + AssertThat.create( + lScope, + Less.create(lScope, lLabels, lNumClasses), + Collections.singletonList(Constant.scalarOf(lScope, "labels out of bounds"))); + lLabels = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(labelsLess)), lLabels); + + AssertThat predictionsLess = + AssertThat.create( + lScope, + Less.create(lScope, lPredictions, lNumClasses), + Collections.singletonList(Constant.scalarOf(lScope, "predictions out of bounds"))); + lPredictions = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(predictionsLess)), + lPredictions); + } + + if (weights != null) { + if (!predictions.shape().isCompatibleWith(weights.shape())) { + throw new IllegalArgumentException( + String.format( + "predictions.shape() [%s], is not compatible with weights.shape() [ %s].", + predictions.shape(), weights.shape())); + } + } + + Operand shape = Stack.create(lScope, Arrays.asList(lNumClasses, lNumClasses)); + Operand indices = + Stack.create(lScope, Arrays.asList(lLabels, lPredictions), Stack.axis(1L)); + Operand values = weights == null ? OnesLike.create(lScope, predictions) : weights; + Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), type); + + return ScatterNd.create(lScope, indices, values, shape); + } + + /** + * Squeeze last dim if ranks differ from expected by exactly 1. + * + * @param labels Label values, a Operand whose dimensions match predictions + * . + * @param predictions Predicted values, a Tensor of arbitrary dimensions. + * @param expectedRankDiff Expected result of rank(predictions) - rank(labels). + * @param the data type for the labels, predictions and result + * @return labels and predictions, possibly with last dim squeezed. + */ + public LossTuple removeSqueezableDimensions( + Operand labels, Operand predictions, int expectedRankDiff) { + Scope lScope = scope.withSubScope("removeSqueezableDimensions"); + Shape predictionsShape = predictions.shape(); + int predictionsRank = predictionsShape.numDimensions(); + Shape labelsShape = labels.shape(); + int labelsRank = labelsShape.numDimensions(); + + if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { + // Use static rank. + int rankDiff = predictionsRank - labelsRank; + if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { + predictions = Squeeze.create(lScope, predictions); + } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.size(-1), 1)) { + labels = Squeeze.create(lScope, labels); + } + return new LossTuple<>(labels, predictions); + } + // Use dynamic rank. + + // TODO: hold for lazy select feature, + // Operand rankDiff = tf.math.sub(tf.rank(predictions), tf.rank(labels)); + if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.size(-1), 1)) { + /* + * TODO, if we ever get a select that does lazy evaluation, but for now do the tf.squeeze + * predictions = tf.select( tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), + * tf.squeeze(predictions, Squeeze.axis(Arrays.asList(-1L))), predictions ); * + */ + predictions = + Squeeze.create(lScope, predictions, Squeeze.axis(Collections.singletonList(-1L))); + } + if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.size(-1), 1)) { + /* + * TODO, if we ever get a select that does lazy evaluation labels = tf.select( + * tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), tf.squeeze(labels, + * Squeeze.axis(Arrays.asList(-1L))), predictions ); * + */ + labels = Squeeze.create(lScope, labels, Squeeze.axis(Collections.singletonList(-1L))); + } + return new LossTuple<>(labels, predictions); + } + + public Operand allAxes(Operand op) { + int rank = op.shape().numDimensions(); + if (rank != Shape.UNKNOWN_SIZE) { + int[] axes = new int[rank]; + for (int i = 0; i < rank; i++) { + axes[i] = i; + } + return Constant.vectorOf(scope, axes); + } else { + return Range.create( + scope, Constant.scalarOf(scope, 0), Rank.create(scope, op), Constant.scalarOf(scope, 1)); + } + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java new file mode 100644 index 00000000000..326e3cdc2d1 --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java @@ -0,0 +1,413 @@ +package org.tensorflow.framework.op; + +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt64; + +class MathOpsTest { + + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + double[][][] array = + new double[][][] { + { + {4.17021990e-01, 7.20324516e-01, 1.14374816e-04}, + {3.02332580e-01, 1.46755889e-01, 9.23385918e-02}, + {1.86260208e-01, 3.45560730e-01, 3.96767467e-01}, + {5.38816750e-01, 4.19194520e-01, 6.85219526e-01}, + {2.04452246e-01, 8.78117442e-01, 2.73875929e-02}, + {6.70467496e-01, 4.17304814e-01, 5.58689833e-01}, + {1.40386939e-01, 1.98101491e-01, 8.00744593e-01} + }, + { + {9.68261600e-01, 3.13424170e-01, 6.92322612e-01}, + {8.76389146e-01, 8.94606650e-01, 8.50442126e-02}, + {3.90547849e-02, 1.69830427e-01, 8.78142476e-01}, + {9.83468369e-02, 4.21107620e-01, 9.57889557e-01}, + {5.33165276e-01, 6.91877127e-01, 3.15515637e-01}, + {6.86500907e-01, 8.34625661e-01, 1.82882771e-02}, + {7.50144303e-01, 9.88861084e-01, 7.48165667e-01} + }, + { + {2.80443996e-01, 7.89279342e-01, 1.03226006e-01}, + {4.47893530e-01, 9.08595502e-01, 2.93614149e-01}, + {2.87775338e-01, 1.30028576e-01, 1.93669572e-02}, + {6.78835511e-01, 2.11628109e-01, 2.65546650e-01}, + {4.91573155e-01, 5.33625446e-02, 5.74117601e-01}, + {1.46728575e-01, 5.89305520e-01, 6.99758351e-01}, + {1.02334432e-01, 4.14055973e-01, 6.94400132e-01} + }, + { + {4.14179265e-01, 4.99534607e-02, 5.35896420e-01}, + {6.63794637e-01, 5.14889121e-01, 9.44594741e-01}, + {5.86555064e-01, 9.03401911e-01, 1.37474701e-01}, + {1.39276341e-01, 8.07391286e-01, 3.97676826e-01}, + {1.65354192e-01, 9.27508593e-01, 3.47765863e-01}, + {7.50812113e-01, 7.25997984e-01, 8.83306086e-01}, + {6.23672187e-01, 7.50942409e-01, 3.48898351e-01} + }, + { + {2.69927889e-01, 8.95886242e-01, 4.28091198e-01}, + {9.64840055e-01, 6.63441479e-01, 6.21695697e-01}, + {1.14745975e-01, 9.49489236e-01, 4.49912131e-01}, + {5.78389585e-01, 4.08136815e-01, 2.37026975e-01}, + {9.03379500e-01, 5.73679507e-01, 2.87032709e-03}, + {6.17144942e-01, 3.26644897e-01, 5.27058125e-01}, + {8.85942101e-01, 3.57269764e-01, 9.08535123e-01} + }, + { + {6.23360097e-01, 1.58212427e-02, 9.29437220e-01}, + {6.90896928e-01, 9.97322857e-01, 1.72340512e-01}, + {1.37135744e-01, 9.32595491e-01, 6.96818173e-01}, + {6.60001710e-02, 7.55463064e-01, 7.53876209e-01}, + {9.23024535e-01, 7.11524785e-01, 1.24270961e-01}, + {1.98801346e-02, 2.62109861e-02, 2.83064879e-02}, + {2.46211067e-01, 8.60027969e-01, 5.38831055e-01} + }, + { + {5.52821994e-01, 8.42030883e-01, 1.24173313e-01}, + {2.79183686e-01, 5.85759282e-01, 9.69595730e-01}, + {5.61030209e-01, 1.86472889e-02, 8.00632656e-01}, + {2.32974276e-01, 8.07105184e-01, 3.87860656e-01}, + {8.63541842e-01, 7.47121632e-01, 5.56240261e-01}, + {1.36455223e-01, 5.99176884e-02, 1.21343456e-01}, + {4.45518792e-02, 1.07494131e-01, 2.25709334e-01} + }, + { + {7.12988973e-01, 5.59717000e-01, 1.25559801e-02}, + {7.19742775e-02, 9.67276335e-01, 5.68100452e-01}, + {2.03293234e-01, 2.52325743e-01, 7.43825853e-01}, + {1.95429474e-01, 5.81358910e-01, 9.70019996e-01}, + {8.46828818e-01, 2.39847764e-01, 4.93769705e-01}, + {6.19955719e-01, 8.28980923e-01, 1.56791389e-01}, + {1.85762029e-02, 7.00221434e-02, 4.86345112e-01} + }, + { + {6.06329441e-01, 5.68851411e-01, 3.17362398e-01}, + {9.88616168e-01, 5.79745233e-01, 3.80141169e-01}, + {5.50948203e-01, 7.45334446e-01, 6.69232905e-01}, + {2.64919549e-01, 6.63348362e-02, 3.70084196e-01}, + {6.29717529e-01, 2.10174009e-01, 7.52755582e-01}, + {6.65364787e-02, 2.60315090e-01, 8.04754555e-01}, + {1.93434283e-01, 6.39460862e-01, 5.24670303e-01} + }, + { + {9.24807966e-01, 2.63296783e-01, 6.59610927e-02}, + {7.35065937e-01, 7.72178054e-01, 9.07815874e-01}, + {9.31972086e-01, 1.39515726e-02, 2.34362081e-01}, + {6.16778374e-01, 9.49016333e-01, 9.50176120e-01}, + {5.56653202e-01, 9.15606380e-01, 6.41566217e-01}, + {3.90007704e-01, 4.85990673e-01, 6.04310513e-01}, + {5.49547911e-01, 9.26181436e-01, 9.18733418e-01} + }, + { + {3.94875616e-01, 9.63262558e-01, 1.73955664e-01}, + {1.26329526e-01, 1.35079160e-01, 5.05662143e-01}, + {2.15248056e-02, 9.47970212e-01, 8.27115476e-01}, + {1.50189810e-02, 1.76196262e-01, 3.32063586e-01}, + {1.30996838e-01, 8.09490681e-01, 3.44736665e-01}, + {9.40107465e-01, 5.82014203e-01, 8.78831983e-01}, + {8.44734430e-01, 9.05392289e-01, 4.59880263e-01} + }, + { + {5.46346843e-01, 7.98603594e-01, 2.85718858e-01}, + {4.90253508e-01, 5.99110305e-01, 1.55332759e-02}, + {5.93481421e-01, 4.33676362e-01, 8.07360530e-01}, + {3.15244794e-01, 8.92888725e-01, 5.77857196e-01}, + {1.84010208e-01, 7.87929237e-01, 6.12031162e-01}, + {5.39092720e-02, 4.20193672e-01, 6.79068863e-01}, + {9.18601751e-01, 4.02024889e-04, 9.76759136e-01} + }, + { + {3.76580328e-01, 9.73783553e-01, 6.04716122e-01}, + {8.28845799e-01, 5.74711502e-01, 6.28076196e-01}, + {2.85576284e-01, 5.86833358e-01, 7.50021756e-01}, + {8.58313859e-01, 7.55082190e-01, 6.98057234e-01}, + {8.64479423e-01, 3.22681010e-01, 6.70788765e-01}, + {4.50873941e-01, 3.82102758e-01, 4.10811365e-01}, + {4.01479572e-01, 3.17383945e-01, 6.21919394e-01} + }, + { + {4.30247277e-01, 9.73802090e-01, 6.77800894e-01}, + {1.98569894e-01, 4.26701009e-01, 3.43346238e-01}, + {7.97638834e-01, 8.79998267e-01, 9.03841972e-01}, + {6.62719786e-01, 2.70208269e-01, 2.52366692e-01}, + {8.54897916e-01, 5.27714670e-01, 8.02161098e-01}, + {5.72488546e-01, 7.33142555e-01, 5.19011617e-01}, + {7.70883918e-01, 5.68857968e-01, 4.65709865e-01} + }, + { + {3.42688918e-01, 6.82093501e-02, 3.77924174e-01}, + {7.96260759e-02, 9.82817113e-01, 1.81612849e-01}, + {8.11858714e-01, 8.74961674e-01, 6.88413262e-01}, + {5.69494426e-01, 1.60971433e-01, 4.66880023e-01}, + {3.45172048e-01, 2.25039959e-01, 5.92511892e-01}, + {3.12269837e-01, 9.16305542e-01, 9.09635544e-01}, + {2.57118285e-01, 1.10891297e-01, 1.92962736e-01} + }, + { + {4.99584168e-01, 7.28585660e-01, 2.08194435e-01}, + {2.48033553e-01, 8.51671875e-01, 4.15848732e-01}, + {6.16685092e-01, 2.33666137e-01, 1.01967260e-01}, + {5.15857041e-01, 4.77140993e-01, 1.52671650e-01}, + {6.21806204e-01, 5.44010103e-01, 6.54137373e-01}, + {1.44545540e-01, 7.51527846e-01, 2.22049147e-01}, + {5.19351840e-01, 7.85296023e-01, 2.23304275e-02} + }, + { + {3.24362457e-01, 8.72922361e-01, 8.44709635e-01}, + {5.38440585e-01, 8.66608262e-01, 9.49805975e-01}, + {8.26407015e-01, 8.54115427e-01, 9.87434015e-02}, + {6.51304305e-01, 7.03516960e-01, 6.10240817e-01}, + {7.99615264e-01, 3.45712192e-02, 7.70238757e-01}, + {7.31728613e-01, 2.59698391e-01, 2.57069290e-01}, + {6.32303298e-01, 3.45297456e-01, 7.96588659e-01} + }, + { + {4.46146220e-01, 7.82749414e-01, 9.90471780e-01}, + {3.00248325e-01, 1.43005833e-01, 9.01308417e-01}, + {5.41559398e-01, 9.74740386e-01, 6.36604428e-01}, + {9.93912995e-01, 5.46070814e-01, 5.26425958e-01}, + {1.35427907e-01, 3.55705172e-01, 2.62185670e-02}, + {1.60395175e-01, 7.45637178e-01, 3.03996895e-02}, + {3.66543084e-01, 8.62346232e-01, 6.92677736e-01} + }, + { + {6.90942168e-01, 1.88636795e-01, 4.41904277e-01}, + {5.81577420e-01, 9.89751697e-01, 2.03906223e-01}, + {2.47732908e-01, 2.62173086e-01, 7.50172436e-01}, + {4.56975341e-01, 5.69294393e-02, 5.08516252e-01}, + {2.11960167e-01, 7.98604250e-01, 2.97331393e-01}, + {2.76060123e-02, 5.93432426e-01, 8.43840420e-01}, + {3.81016135e-01, 7.49858320e-01, 5.11141479e-01} + }, + { + {5.40951788e-01, 9.59434330e-01, 8.03960919e-01}, + {3.23230661e-02, 7.09387243e-01, 4.65001494e-01}, + {9.47548926e-01, 2.21432731e-01, 2.67072022e-01}, + {8.14739615e-02, 4.28618819e-01, 1.09018765e-01}, + {6.33786738e-01, 8.02963257e-01, 6.96800470e-01}, + {7.66211390e-01, 3.42454106e-01, 8.45851481e-01}, + {4.28768784e-01, 8.24009895e-01, 6.26496136e-01} + } + }; + + double[][][] expectedArray = { + { + {3.45350616e-02, 5.96526116e-02, 9.47178160e-06}, + {2.50372272e-02, 1.21533722e-02, 7.64688430e-03}, + {1.54248644e-02, 2.86171008e-02, 3.28577124e-02}, + {4.46213149e-02, 3.47149745e-02, 5.67454435e-02}, + {1.69314109e-02, 7.27199987e-02, 2.26806314e-03}, + {5.55237755e-02, 3.45584825e-02, 4.62670736e-02}, + {1.16259372e-02, 1.64054818e-02, 6.63124844e-02} + }, + { + {8.01851526e-02, 2.59557609e-02, 5.73336743e-02}, + {7.25768730e-02, 7.40855262e-02, 7.04281079e-03}, + {3.23426444e-03, 1.40642561e-02, 7.27220699e-02}, + {8.14444851e-03, 3.48734073e-02, 7.93262124e-02}, + {4.41532955e-02, 5.72967827e-02, 2.61289626e-02}, + {5.68515584e-02, 6.91182911e-02, 1.51451665e-03}, + {6.21220917e-02, 8.18910673e-02, 6.19582348e-02} + }, + { + {2.32245550e-02, 6.53630048e-02, 8.54850933e-03}, + {3.70916426e-02, 7.52439946e-02, 2.43152231e-02}, + {2.38316897e-02, 1.07681248e-02, 1.60384597e-03}, + {5.62167615e-02, 1.75256692e-02, 2.19908543e-02}, + {4.07089069e-02, 4.41914052e-03, 4.75447029e-02}, + {1.21511100e-02, 4.88024652e-02, 5.79494536e-02}, + {8.47467501e-03, 3.42894346e-02, 5.75057231e-02} + }, + { + {3.42996456e-02, 4.13682219e-03, 4.43794727e-02}, + {5.49711734e-02, 4.26397808e-02, 7.82252178e-02}, + {4.85746935e-02, 7.48138949e-02, 1.13847647e-02}, + {1.15339644e-02, 6.68629184e-02, 3.29330191e-02}, + {1.36935636e-02, 7.68102556e-02, 2.87997164e-02}, + {6.21773973e-02, 6.01224527e-02, 7.31496885e-02}, + {5.16484901e-02, 6.21881858e-02, 2.88935024e-02} + }, + { + {2.23536789e-02, 7.41914958e-02, 3.54517400e-02}, + {7.99018070e-02, 5.49419262e-02, 5.14848121e-02}, + {9.50251892e-03, 7.86305517e-02, 3.72588076e-02}, + {4.78984788e-02, 3.37992460e-02, 1.96290389e-02}, + {7.48120397e-02, 4.75084223e-02, 2.37701897e-04}, + {5.11079468e-02, 2.70506144e-02, 4.36475389e-02}, + {7.33679906e-02, 2.95867678e-02, 7.52389953e-02} + }, + { + {5.16226478e-02, 1.31021289e-03, 7.69699737e-02}, + {5.72156087e-02, 8.25918168e-02, 1.42721254e-02}, + {1.13566946e-02, 7.72315189e-02, 5.77059686e-02}, + {5.46570681e-03, 6.25625551e-02, 6.24311455e-02}, + {7.64389113e-02, 5.89238741e-02, 1.02913165e-02}, + {1.64634397e-03, 2.17062421e-03, 2.34416011e-03}, + {2.03896053e-02, 7.12219477e-02, 4.46224995e-02} + }, + { + {4.57811356e-02, 6.97315410e-02, 1.02832299e-02}, + {2.31201854e-02, 4.85087894e-02, 8.02956372e-02}, + {4.64608893e-02, 1.54424773e-03, 6.63032085e-02}, + {1.92934200e-02, 6.68392256e-02, 3.21201086e-02}, + {7.15129450e-02, 6.18717745e-02, 4.60642166e-02}, + {1.13003375e-02, 4.96199494e-03, 1.00488793e-02}, + {3.68949817e-03, 8.90196767e-03, 1.86917856e-02} + }, + { + {5.90451285e-02, 4.63521369e-02, 1.03980501e-03}, + {5.96044352e-03, 8.01035613e-02, 4.70464006e-02}, + {1.68354288e-02, 2.08959840e-02, 6.15988411e-02}, + {1.61842033e-02, 4.81443815e-02, 8.03307742e-02}, + {7.01288804e-02, 1.98626388e-02, 4.08908091e-02}, + {5.13407178e-02, 6.86508343e-02, 1.29844472e-02}, + {1.53836084e-03, 5.79878036e-03, 4.02759537e-02} + }, + { + {5.02122790e-02, 4.71085906e-02, 2.62818988e-02}, + {8.18707868e-02, 4.80107442e-02, 3.14808302e-02}, + {4.56259623e-02, 6.17237724e-02, 5.54215349e-02}, + {2.19389219e-02, 5.49342157e-03, 3.06479763e-02}, + {5.21491282e-02, 1.74052510e-02, 6.23383410e-02}, + {5.51012019e-03, 2.15576105e-02, 6.66445568e-02}, + {1.60189737e-02, 5.29560074e-02, 4.34497967e-02} + }, + { + {7.65866041e-02, 2.18045339e-02, 5.46247046e-03}, + {6.08734004e-02, 6.39467835e-02, 7.51794279e-02}, + {7.71798939e-02, 1.15537888e-03, 1.94083489e-02}, + {5.10775894e-02, 7.85913840e-02, 7.86874294e-02}, + {4.60984148e-02, 7.58245885e-02, 5.31303585e-02}, + {3.22979130e-02, 4.02465984e-02, 5.00450842e-02}, + {4.55099978e-02, 7.67003447e-02, 7.60835484e-02} + }, + { + {3.27010415e-02, 7.97711685e-02, 1.44058811e-02}, + {1.04617933e-02, 1.11863809e-02, 4.18756641e-02}, + {1.78254500e-03, 7.85047561e-02, 6.84963465e-02}, + {1.24377478e-03, 1.45914331e-02, 2.74993554e-02}, + {1.08483098e-02, 6.70367777e-02, 2.85488572e-02}, + {7.78536126e-02, 4.81986478e-02, 7.27791712e-02}, + {6.99554384e-02, 7.49787241e-02, 3.80843058e-02} + }, + { + {4.52449061e-02, 6.61351755e-02, 2.36613862e-02}, + {4.05996218e-02, 4.96144369e-02, 1.28636532e-03}, + {4.91482876e-02, 3.59142683e-02, 6.68603703e-02}, + {2.61065327e-02, 7.39432648e-02, 4.78543900e-02}, + {1.52385337e-02, 6.52511939e-02, 5.06844558e-02}, + {4.46441676e-03, 3.47977169e-02, 5.62360846e-02}, + {7.60726482e-02, 3.32930977e-05, 8.08888674e-02} + }, + { + {3.11859436e-02, 8.06424469e-02, 5.00786714e-02}, + {6.86396435e-02, 4.75938842e-02, 5.20132035e-02}, + {2.36495789e-02, 4.85977381e-02, 6.21119440e-02}, + {7.10799918e-02, 6.25310168e-02, 5.78085780e-02}, + {7.15905875e-02, 2.67223511e-02, 5.55503815e-02}, + {3.73384580e-02, 3.16432752e-02, 3.40207368e-02}, + {3.32479365e-02, 2.62836833e-02, 5.15033379e-02} + }, + { + {3.56302932e-02, 8.06439817e-02, 5.61310798e-02}, + {1.64442733e-02, 3.53366137e-02, 2.84337122e-02}, + {6.60552830e-02, 7.28757605e-02, 7.48503357e-02}, + {5.48821613e-02, 2.23768987e-02, 2.08993759e-02}, + {7.07971081e-02, 4.37019095e-02, 6.64297864e-02}, + {4.74097952e-02, 6.07141182e-02, 4.29811813e-02}, + {6.38396144e-02, 4.71091345e-02, 3.85670736e-02} + }, + { + {2.83792764e-02, 5.64865675e-03, 3.12972330e-02}, + {6.59411587e-03, 8.13905448e-02, 1.50400000e-02}, + {6.72328845e-02, 7.24586621e-02, 5.70099279e-02}, + {4.71618399e-02, 1.33306114e-02, 3.86639796e-02}, + {2.85849143e-02, 1.86363515e-02, 4.90679964e-02}, + {2.58601662e-02, 7.58824944e-02, 7.53301233e-02}, + {2.12928709e-02, 9.18329880e-03, 1.59799233e-02} + }, + { + {4.13723253e-02, 6.03367463e-02, 1.72413141e-02}, + {2.05405317e-02, 7.05299526e-02, 3.44378985e-02}, + {5.10698669e-02, 1.93507168e-02, 8.44426826e-03}, + {4.27199379e-02, 3.95137258e-02, 1.26432776e-02}, + {5.14939614e-02, 4.50513922e-02, 5.41714206e-02}, + {1.19703254e-02, 6.22366704e-02, 1.83886718e-02}, + {4.30093557e-02, 6.50331303e-02, 1.84926135e-03} + }, + { + {2.68615987e-02, 7.22897798e-02, 6.99533820e-02}, + {4.45901640e-02, 7.17668831e-02, 7.86567777e-02}, + {6.84376806e-02, 7.07323104e-02, 8.17728881e-03}, + {5.39368056e-02, 5.82607202e-02, 5.05361930e-02}, + {6.62189573e-02, 2.86296452e-03, 6.37861863e-02}, + {6.05970249e-02, 2.15065386e-02, 2.12888140e-02}, + {5.23632653e-02, 2.85952985e-02, 6.59683123e-02} + }, + { + {3.69469412e-02, 6.48222342e-02, 8.20244551e-02}, + {2.48646215e-02, 1.18428171e-02, 7.46405274e-02}, + {4.48484421e-02, 8.07216838e-02, 5.27194552e-02}, + {8.23094398e-02, 4.52220477e-02, 4.35951874e-02}, + {1.12152621e-02, 2.94571985e-02, 2.17125192e-03}, + {1.32828895e-02, 6.17488436e-02, 2.51750532e-03}, + {3.03547252e-02, 7.14139268e-02, 5.73630854e-02} + }, + { + {5.72193563e-02, 1.56216780e-02, 3.65956500e-02}, + {4.81624752e-02, 8.19648281e-02, 1.68861933e-02}, + {2.05156356e-02, 2.17114780e-02, 6.21244237e-02}, + {3.78437378e-02, 4.71452763e-03, 4.21120226e-02}, + {1.75531674e-02, 6.61352351e-02, 2.46230606e-02}, + {2.28615105e-03, 4.91442308e-02, 6.98814020e-02}, + {3.15532871e-02, 6.20984100e-02, 4.23294269e-02} + }, + { + {4.47981246e-02, 7.94541389e-02, 6.65788352e-02}, + {2.67678709e-03, 5.87468557e-02, 3.85084115e-02}, + {7.84698650e-02, 1.83376241e-02, 2.21171752e-02}, + {6.74714567e-03, 3.54954340e-02, 9.02822800e-03}, + {5.24861142e-02, 6.64962158e-02, 5.77045009e-02}, + {6.34526685e-02, 2.83598304e-02, 7.00479448e-02}, + {3.55078541e-02, 6.82391599e-02, 5.18823527e-02} + } + }; + + @Test + public void testL2Normalize() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand input = tf.constant(array); + Operand result = fops.math.l2Normalize(tf.constant(array), new int[]{ 0,1,2}); + session.evaluate(tf.constant(expectedArray), result); + } + } + + @Test + public void testConfusionMatrix() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + long[] labels = new long[] {2, 0, 2, 2, 0, 1}; + long[] predictions = new long[] {0, 0, 2, 2, 0, 2}; + Operand result = + fops.math.confusionMatrix(tf.constant(labels), tf.constant(predictions)); + long[][] expected = + new long[][] { + {2, 0, 0}, + {0, 0, 1}, + {1, 0, 2} + }; + session.evaluate(tf.constant(expected), result); + } + } +} From b4ca97a025645a227aae7c306ab397a383f0a6d9 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:37:19 -0400 Subject: [PATCH 13/60] Added linalg methods for matmul --- .../tensorflow/framework/op/LinalgOps.java | 306 ++++++++++++++++++ .../framework/op/LinalgOpsTest.java | 60 ++++ 2 files changed, 366 insertions(+) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/op/LinalgOpsTest.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java new file mode 100644 index 00000000000..eb069a2db22 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java @@ -0,0 +1,306 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.SparseTensor; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Conj; +import org.tensorflow.op.sparse.SparseMatMul; +import org.tensorflow.op.train.BatchMatMul; +import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; + +public class LinalgOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + LinalgOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Multiplies matrix a by matrix b, producing a * b + * . + * + *

The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions + * specify matching batch size. + * + *

Both matrices must be of the same type. The supported types are: TFloat16, + * TFloat32, TFloat64, TInt32. + * + *

Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

A simple 2-D tensor matrix multiplication: + * + *

{@code
+   * Operand a = tf.constant(new double[][] {
+   *         {-8.944851},
+   *         {4.1711287},
+   *         {-0.22380222}
+   *     });
+   * Operand b = tf.constant( new double[][] {
+   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
+   *     });
+   * Operand result = fops.linalg.matmul(a, b);
+   * // result = {
+   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
+   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
+   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
+   * //  }
+   *
+   * }
+ * + *

Note: This is matrix product, not element-wise product. + * + * @param a an Operand of of type TFloat16, TFloat32, TFloat64 + * , TInt32. with a rank > 1 + * @param b an Operand with same type and rank as a. + * @param the data type of the Operands + * @return A Operand of the same type as a and b where each inner-most + * matrix is the product of the corresponding matrices in a and b. + * This is the matrix product not an element-wise product. + * @throws java.lang.IllegalArgumentException If transposeA and adjointA + * , or transposeB and adjointB are both set to `true`. + */ + @Endpoint(name = "matmul") + public Operand matmul(Operand a, Operand b) { + return matmul(a, b, false, false, false, false, false, false); + } + + /** + * Multiplies matrix a by matrix b, producing a * b + * . + * + *

The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions + * specify matching batch size. + * + *

Both matrices must be of the same type. The supported types are: TFloat16, + * TFloat32, TFloat64, TInt32. + * + *

Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

+ * + *

Note: This is matrix product, not element-wise product. + * + *

A simple 2-D tensor matrix multiplication: + * + *

{@code
+   * Operand a = tf.constant(new double[][] {
+   *         {-8.944851},
+   *         {4.1711287},
+   *         {-0.22380222}
+   *     });
+   * Operand b = tf.constant( new double[][] {
+   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
+   *     });
+   * Operand result = fops.linalg.matmul(a, b);
+   * // result = {
+   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
+   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
+   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
+   * //  }
+   *
+   * }
+ * + * @param a an Operand of of type TFloat16, TFloat32, TFloat64 + * , TInt32. with a rank > 1 + * @param b an Operand with same type and rank as a. + * @param transposeA If `true`, a is transposed before multiplication. + * @param transposeB If `True`, b is transposed before multiplication + * @param the data type of the Operands + * @return A Operand of the same type as a and b where each inner-most + * matrix is the product of the corresponding matrices in a and b. + * This is the matrix product not an element-wise product. + * @throws java.lang.IllegalArgumentException If transposeA and adjointA + * , or transposeB and adjointB are both set to `true`. + */ + @Endpoint(name = "matmul") + public Operand matmul( + Operand a, Operand b, boolean transposeA, boolean transposeB) { + return matmul(a, b, transposeA, transposeB, false, false, false, false); + } + + /** + * Multiplies matrix a by matrix b, producing a * b + * . + * + *

The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions + * specify matching batch size. + * + *

Both matrices must be of the same type. The supported types are: TFloat16, + * TFloat32, TFloat64, TInt32. + * + *

Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

Note: This is matrix product, not element-wise product. + * + *

A simple 2-D tensor matrix multiplication: + * + *

{@code
+   * Operand a = tf.constant(new double[][] {
+   *         {-8.944851},
+   *         {4.1711287},
+   *         {-0.22380222}
+   *     });
+   * Operand b = tf.constant( new double[][] {
+   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
+   *     });
+   * Operand result = fops.linalg.matmul(a, b);
+   * // result = {
+   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
+   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
+   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
+   * //  }
+   *
+   * }
+ * + * @param a an Operand of of type TFloat16, TFloat32, TFloat64 + * , TInt32. with a rank > 1 + * @param b an Operand with same type and rank as a. + * @param transposeA If true, a is transposed before multiplication. + * @param transposeB If True, b is transposed before multiplication + * @param adjointA If true, a is conjugated and transposed before multiplication. + * @param adjointB If true, b is conjugated and transposed before multiplication. + * @param aIsSparse If true, a is treated as a sparse matrix. Notice, this does + * not support {@link SparseTensor}, it just makes optimizations that assume most values + * in a are zero. + * @param bIsSparse If true, b is treated as a sparse matrix. Notice, this does + * not support {@link SparseTensor}, it just makes optimizations that assume most values + * in b are zero. + * @param the data type of the Operands + * @return A Operand of the same type as a and b where each inner-most + * matrix is the product of the corresponding matrices in a and b. + * This is the matrix product not an element-wise product. + * @throws java.lang.IllegalArgumentException If transposeA and adjointA + * , or transposeB and adjointB are both set to `true`. + */ + @SuppressWarnings("unchecked") + @Endpoint(name = "matmul") + public Operand matmul( + Operand a, + Operand b, + boolean transposeA, + boolean transposeB, + boolean adjointA, + boolean adjointB, + boolean aIsSparse, + boolean bIsSparse) { + Scope lscope = scope.withSubScope("MatMul"); + if (transposeA && adjointA) + throw new IllegalArgumentException("Only one of transposeA and adjointA can be true."); + if (transposeB && adjointB) + throw new IllegalArgumentException("Only one of transposeB and adjointB can be true."); + if (!(TFloating.class.isAssignableFrom(a.type()) || a.type().equals(TInt32.class))) + throw new IllegalArgumentException( + String.format( + "Operand 'a' must be of type 'TBfloat16','TFloat16', 'TFloat32', 'TFloat64' or 'TInt32'. found type : %s", + a.type().getSimpleName())); + if (!(TFloating.class.isAssignableFrom(a.type()) || b.type().equals(TInt32.class))) + throw new IllegalArgumentException( + String.format( + "Operand 'b' must be of type 'TBfloat16', 'TFloat32', 'TFloat64' or 'TInt32'. found type : %s", + b.type().getSimpleName())); + + Shape aShape = a.shape(); + Shape bShape = b.shape(); + if (aShape.numDimensions() != bShape.numDimensions()) + throw new IllegalArgumentException( + String.format( + "Parameters 'a' and 'b' must the same rank: found a rank = %d, b rank = %d", + aShape.numDimensions(), bShape.numDimensions())); + boolean outputMayHaveNonEmptyBatchShape = + aShape.numDimensions() == Shape.UNKNOWN_SIZE + || aShape.numDimensions() > 2 + || bShape.numDimensions() == Shape.UNKNOWN_SIZE; + + if ((!aIsSparse && !bIsSparse) && outputMayHaveNonEmptyBatchShape) { + // BatchMatmul does not support transpose, so we conjugate the matrix and + // use adjoint instead. Conj() is a noop for real matrices. + if (transposeA) { + a = Conj.create(scope, a); + adjointA = true; + } + if (transposeB) { + b = Conj.create(scope, b); + adjointB = true; + } + return BatchMatMul.create( + lscope, a, b, BatchMatMul.adjX(adjointA), BatchMatMul.adjY(adjointB)); + } + + // Neither matmul nor sparse_matmul support adjoint, so we conjugate + // the matrix and use transpose instead. Conj() is a noop for real + // matrices. + if (adjointA) { + a = Conj.create(scope, a); + transposeA = true; + } + if (adjointB) { + b = Conj.create(scope, b); + transposeB = true; + } + + boolean useSparseMatmul = false; + if (aIsSparse || bIsSparse) { + useSparseMatmul = + (a.type().equals(TBfloat16.class) || a.type().equals(TFloat32.class)) + && (b.type().equals(TBfloat16.class) || b.type().equals(TFloat32.class)); + } + if ((a.type().equals(TBfloat16.class) || b.type().equals(TBfloat16.class)) + && !a.type().equals(b.type())) useSparseMatmul = true; + + if (useSparseMatmul) { + Operand result = + SparseMatMul.create( + lscope, + a, + b, + SparseMatMul.transposeA(transposeA), + SparseMatMul.transposeB(transposeB), + SparseMatMul.aIsSparse(aIsSparse), + SparseMatMul.bIsSparse(bIsSparse)); + if (a.type().equals(TFloat32.class)) return (Operand) result; + else return Cast.create(scope, result, a.type()); + } + + return org.tensorflow.op.linalg.MatMul.create( + lscope, + a, + b, + org.tensorflow.op.linalg.MatMul.transposeA(transposeA), + org.tensorflow.op.linalg.MatMul.transposeB(transposeB)); + } +} diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/LinalgOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/LinalgOpsTest.java new file mode 100644 index 00000000000..f2c297ce032 --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/LinalgOpsTest.java @@ -0,0 +1,60 @@ +package org.tensorflow.framework.op; + +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; + +class LinalgOpsTest { + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + @Test + public void test2D() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand a = tf.constant(new float[][] {{3.7213619f}}); + Operand b = tf.constant(new float[][] {{8.153921f}}); + + Operand ans = fops.linalg.matmul(a, b); + Operand expected = tf.constant(new float[][] {{30.34369f}}); + session.evaluate(expected, ans); + + Operand a64 = + tf.constant(new double[][] {{-8.944851}, {4.1711287}, {-0.22380222}}); + Operand b64 = + tf.constant( + new double[][] {{-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}}); + + Operand ans64 = fops.linalg.matmul(a64, b64); + Operand expected64 = + tf.constant( + new double[][] { + {127.69746, 111.21564, 20.078575, 14.111271, -16.62731}, + {-59.547394, -51.861652, -9.362965, -6.580314, 7.753584}, + {3.1950197, 2.7826407, 0.50237054, 0.35306725, -0.4160191} + }); + session.evaluate(expected64, ans64); + + a64 = + tf.constant( + new double[][] { + {-9.189821, -1.588742, -8.684379}, + {-10.953391, -8.473055, -6.8909864}, + {-11.712155, -6.6350083, -2.4441578}, + {1.4037079, -11.279383, 0.9129576}, + {0.11368857, 2.3792067, -11.218701}, + }); + b64 = tf.constant(new double[][] {{-4.933953}, {-12.692161}, {-10.192119}}); + ans64 = fops.linalg.matmul(a64, b64); + expected64 = + tf.constant( + new double[][] {{154.01892}, {231.81863}, {166.91096}, {126.92895}, {83.58413}}); + session.setEpsilon(1e-4f); + session.evaluate(expected64, ans64); + } + } +} From e83d26b6cb7e4d44616efb1df249a310cabaebe2 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:47:53 -0400 Subject: [PATCH 14/60] add nn ops for sigmoidCrossEntropyWithLogits, softmaxCrossEntropyWithLogits and sparseSoftmaxCrossEntropyWithLogits --- .../annotations/org/tensorflow/op/NnOps.java | 13 ++-- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 57 +++++++++------ .../SparseSoftmaxCrossEntropyWithLogits.java | 62 +++++++++------- .../org/tensorflow/framework/op/NnOps.java | 15 ++-- .../op/nn/SigmoidCrossEntropyWithLogits.java | 3 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 70 +++++++++++-------- .../tensorflow/framework/op/NnOpsTest.java | 68 ++++++++++++++++++ 7 files changed, 192 insertions(+), 96 deletions(-) create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/op/NnOpsTest.java diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 1cf8b910297..2bd4d13145f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -1811,14 +1811,14 @@ public Softmax softmax(Operand logits) { /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

* Inputs are the logits, not probabilities. * - * @param data type for {@code loss()} output + * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. + * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( @@ -2011,18 +2011,17 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

- * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

- * Inputs are the logits, not probabilities. + *

Inputs are the logits, not probabilities. * - * @param data type for {@code loss()} output + * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. + * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 5d3ab3c1100..d6eed5cbe28 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -29,57 +29,68 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

* Inputs are the logits, not probabilities. - * - * @param data type for {@code loss()} output + * + * @param data type for {@code loss} output */ -@Operator(group = "nn") +@Operator( + group = "nn" +) public final class SoftmaxCrossEntropyWithLogits extends RawOp { - + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; + + private Output loss; + + private Output backprop; + + private SoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx++); + } + /** * Factory method to create a class wrapping a new SoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. + * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ - @Endpoint(describeByClass = true) - public static SoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { + @Endpoint( + describeByClass = true + ) + public static SoftmaxCrossEntropyWithLogits create(Scope scope, + Operand features, Operand labels) { OperationBuilder opBuilder = scope.env().opBuilder("SoftmaxCrossEntropyWithLogits", scope.makeOpName("SoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** + * Gets loss. * Per example loss (batch_size vector). + * @return loss. */ public Output loss() { return loss; } - + /** + * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). + * @return backprop. */ public Output backprop() { return backprop; } - - /** The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; - - private Output loss; - private Output backprop; - - private SoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx); - } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 794beab4ded..26498cdce7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -29,61 +29,71 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

- * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

- * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss()} output + *

Inputs are the logits, not probabilities. + * + * @param data type for {@code loss} output */ -@Operator(group = "nn") +@Operator( + group = "nn" +) public final class SparseSoftmaxCrossEntropyWithLogits extends RawOp { - + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; + + private Output loss; + + private Output backprop; + + private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx++); + } + /** * Factory method to create a class wrapping a new SparseSoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. + * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ - @Endpoint(describeByClass = true) - public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { + @Endpoint( + describeByClass = true + ) + public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, + Operand features, Operand labels) { OperationBuilder opBuilder = scope.env().opBuilder("SparseSoftmaxCrossEntropyWithLogits", scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SparseSoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** + * Gets loss. * Per example loss (batch_size vector). + * @return loss. */ public Output loss() { return loss; } - + /** + * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). + * @return backprop. */ public Output backprop() { return backprop; } - - /** The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; - - private Output loss; - private Output backprop; - - private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx); - } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java index 0fea3743d95..4f5120a3dbf 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -87,7 +87,7 @@ public class NnOps { * @param logits the logits of type float32 or float64 * @param the type of labels and logits * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape + * @throws IllegalArgumentException if logits and labels do not have the same shape */ public Operand sigmoidCrossEntropyWithLogits( Operand labels, Operand logits) { @@ -139,7 +139,6 @@ public Operand sigmoidCrossEntropyWithLogits( * @return the softmax cross entropy loss. Its type is the same as {@code logits} and its shape is * the same as {@code labels} except that it does not have the last dimension of {@code * labels}. - * */ public Operand softmaxCrossEntropyWithLogits( Operand labels, Operand logits, int axis) { @@ -181,14 +180,14 @@ public Operand softmaxCrossEntropyWith * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. - * @param The data type for the labels - * @param The data type for the logits and loss + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank - * of the labels is not equal to the rank of the logits minus one. + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if + * the rank of the labels is not equal to the rank of the logits minus one. */ - public Operand sparseSoftmaxCrossEntropyWithLogits( - Operand labels, Operand logits) { + public Operand sparseSoftmaxCrossEntropyWithLogits( + Operand labels, Operand logits) { return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits( scope, labels, logits); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index fc3f7739363..432e1b47a3f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -26,8 +26,7 @@ public class SigmoidCrossEntropyWithLogits { * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - *

For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in - * pseudo-code is + *

For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in pseudo-code is * *

    * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
index 0b2d29d6092..553adf90aad 100644
--- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
+++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
@@ -14,7 +14,11 @@
 import org.tensorflow.types.TBfloat16;
 import org.tensorflow.types.TFloat16;
 import org.tensorflow.types.TFloat32;
+import org.tensorflow.types.TFloat64;
 import org.tensorflow.types.TInt32;
+import org.tensorflow.types.TInt64;
+import org.tensorflow.types.family.TFloating;
+import org.tensorflow.types.family.TIntegral;
 import org.tensorflow.types.family.TNumber;
 
 import java.util.ArrayList;
@@ -34,39 +38,37 @@ public class SparseSoftmaxCrossEntropyWithLogits {
    * 

NOTE: * *

For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the {@code labels} vector must provide a single specific - * index for the true class for each row of {@code logits} (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link + * classes are not allowed, and the {@code labels} vector must provide a single specific index for + * the true class for each row of {@code logits} (each minibatch entry). For soft softmax + * classification with a probability distribution for each entry, {@link * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. * *

WARNING: * - *

This op expects unscaled logits, since it performs a {@code softmax} on {@code logits - * } internally for efficiency. Do not call this op with the output of {@code softmax}, - * as it will produce incorrect results. + *

This op expects unscaled logits, since it performs a {@code softmax} on {@code logits } + * internally for efficiency. Do not call this op with the output of {@code softmax}, as it will + * produce incorrect results. * - *

A common use case is to have logits of shape {@code [batchSize, numClasses]} and have - * labels of shape {@code [batchSize]}, but higher dimensions are supported, in which case - * the {@code dim}-th dimension is assumed to be of size {@code numClasses}. {@code - * logits} must have the {@code dataType} of {@code TFloat16}, {@code TFloat32} - * , or {@code TFloat64}, and {@code labels} must have the dtype of {@code TInt32} - * or {@code TInt64}. + *

A common use case is to have logits of shape {@code [batchSize, numClasses]} and have labels + * of shape {@code [batchSize]}, but higher dimensions are supported, in which case the {@code + * dim}-th dimension is assumed to be of size {@code numClasses}. {@code logits} must have the + * {@code dataType} of {@code TFloat16}, {@code TFloat32} , or {@code TFloat64}, and {@code + * labels} must have the dtype of {@code TInt32} or {@code TInt64}. * * @param scope current scope - * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r - * } is rank of {@code labels} and result) and the dataType is {@code TInt32} - * or {@code TInt64}. Each entry in {@code labels} must be an index in {@code [0, - * numClasses)}. Other values will raise an exception when this op is run on CPU, and - * return {@code NaN} for corresponding loss and gradient rows on GPU. + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r } is + * rank of {@code labels} and result) and the dataType is {@code TInt32} or {@code TInt64}. + * Each entry in {@code labels} must be an index in {@code [0, numClasses)}. Other values will + * raise an exception when this op is run on CPU, and return {@code NaN} for corresponding + * loss and gradient rows on GPU. * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., - * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, - * or {@code TFloat64}. These activation energies are interpreted as unnormalized log - * probabilities. + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code + * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. * @param the data type for the labels * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank - * of the labels is not equal to the rank of the logits minus one. + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if + * the rank of the labels is not equal to the rank of the logits minus one. */ @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") @@ -74,15 +76,23 @@ public class SparseSoftmaxCrossEntropyWithLogits { Operand sparseSoftmaxCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); - Operand preciseLogits; + Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { preciseLogits = Cast.create(scope, logits, TFloat32.class); + } else if (TFloating.class.isAssignableFrom(logits.type())) { + preciseLogits = (Operand) logits; } else { - preciseLogits = logits; + preciseLogits = Cast.create(scope, logits, TFloat64.class); } - Shape labelsStaticShape = labels.shape(); + Operand iLabels; + if (TIntegral.class.isAssignableFrom(labels.type())) { + iLabels = (Operand) labels; + } else { + iLabels = Cast.create(scope, labels, TInt64.class); + } + Shape labelsStaticShape = iLabels.shape(); org.tensorflow.op.core.Shape labelsShape = - org.tensorflow.op.core.Shape.create(scope, labels); + org.tensorflow.op.core.Shape.create(scope, iLabels); Shape logitsShape = logits.shape(); Shape logitsShortened = logitsShape.take(logitsShape.numDimensions() - 1); @@ -113,7 +123,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( if (logitsShape.numDimensions() == 2) { org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( - scope, preciseLogits, labels); + scope, preciseLogits, iLabels); Operand cost = smax.loss(); if (cost.type() != logits.type()) { return Cast.create(scope, cost, logits.type()); @@ -131,7 +141,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( scope, Equal.create( scope, - org.tensorflow.op.core.Shape.create(scope, labels), + org.tensorflow.op.core.Shape.create(scope, iLabels), Shapes.take( scope, org.tensorflow.op.core.Shape.create(scope, logits), @@ -148,12 +158,12 @@ Operand sparseSoftmaxCrossEntropyWithLogits( long numClassses = logitsShape.size(-1); preciseLogits = Reshape.create(scope, preciseLogits, Constant.arrayOf(scope, -1L, numClassses)); - labels = Reshape.create(scope, labels, Constant.scalarOf(scope, -1)); + iLabels = Reshape.create(scope, iLabels, Constant.scalarOf(scope, -1)); scope.withControlDependencies(shapeChecks); // call raw op org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( - scope, preciseLogits, labels); + scope, preciseLogits, iLabels); Operand cost = smax.loss(); cost = Reshape.create(scope, cost, labelsShape); if (cost.type() != logits.type()) { diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/NnOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/NnOpsTest.java new file mode 100644 index 00000000000..0436fdd57cf --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/NnOpsTest.java @@ -0,0 +1,68 @@ +package org.tensorflow.framework.op; + +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +class NnOpsTest { + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + @Test + public void testSigmoidCrossEntropyWithLogits() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + float[] x = new float[] {-100, -2, -2, 0, 2, 2, 2, 100}; + float[] y = new float[] {0, 0, 1, 0, 0, 1, 0.5f, 1}; + + Operand logits = tf.constant(x); + Operand targets = tf.constant(y); + Operand loss = fops.nn.sigmoidCrossEntropyWithLogits(targets, logits); + Operand expected = + tf.constant( + new float[] { + 0.f, 0.126928f, 2.126928f, 0.6931472f, + 2.126928f, 0.126928f, 1.126928f, 0.f + }); + session.evaluate(expected, loss); + } + } + + @Test + public void testSoftmaxCrossEntropyWithLogits() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + float[] x = new float[] {-100, -2, -2, 0, 2, 2, 2, 100}; + float[] y = new float[] {0, 0, 1, 0, 0, 1, 0.5f, 1}; + + Operand logits = tf.constant(x); + Operand targets = tf.constant(y); + Operand loss = fops.nn.softmaxCrossEntropyWithLogits(targets, logits, 0); + + session.evaluate(249.0f, loss); + } + } + + @Test + public void testSparseSoftmaxCrossEntropyWithLogits() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + float[][] x = new float[][] {{0, 0}}; + int[] y = new int[] {0}; + + Operand logits = tf.constant(x); + Operand labels = tf.constant(y); + Operand loss = fops.nn.sparseSoftmaxCrossEntropyWithLogits(labels, logits); + + session.evaluate(0.69314718f, loss); + } + } +} From e4e65f2a09ffb980cf0c6881f90369f9d87633f5 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:48:53 -0400 Subject: [PATCH 15/60] Moved SetOps to FrameworkOps --- .../org/tensorflow/framework/op/{SetsOps.java => SetOps.java} | 4 ++-- .../tensorflow/framework/{metrics/impl => op}/SetOpsTest.java | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) rename tensorflow-framework/src/main/java/org/tensorflow/framework/op/{SetsOps.java => SetOps.java} (98%) rename tensorflow-framework/src/test/java/org/tensorflow/framework/{metrics/impl => op}/SetOpsTest.java (97%) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java similarity index 98% rename from tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java index d7833cdbb06..f76947018b5 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java @@ -24,7 +24,7 @@ import org.tensorflow.types.family.TNumber; /** Implementation of set operations */ -public class SetsOps { +public class SetOps { private final Scope scope; @@ -35,7 +35,7 @@ public class SetsOps { * * @param frameworkOps the TensorFLow framework Ops */ - SetsOps(FrameworkOps frameworkOps) { + SetOps(FrameworkOps frameworkOps) { this.scope = frameworkOps.scope(); this.frameworkOps = frameworkOps; } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java similarity index 97% rename from tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java rename to tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java index e10f016bd94..7dee866abf2 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java @@ -1,9 +1,7 @@ -package org.tensorflow.framework.metrics.impl; +package org.tensorflow.framework.op; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; -import org.tensorflow.framework.op.FrameworkOps; -import org.tensorflow.framework.op.SetsOps; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; From a2ed723aa7fa040e58439e4e17b1799747a68001 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:51:05 -0400 Subject: [PATCH 16/60] Added tensordot and reduceLogSumExp --- .../org/tensorflow/framework/op/MathOps.java | 796 +++++++++++++++++- .../tensorflow/framework/op/MathOpsTest.java | 90 +- 2 files changed, 874 insertions(+), 12 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 36f5b692cab..4c2210feb9c 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -14,37 +14,59 @@ =======================================================================*/ package org.tensorflow.framework.op; +import org.tensorflow.Graph; import org.tensorflow.Operand; +import org.tensorflow.Session; import org.tensorflow.framework.losses.impl.LossTuple; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.core.AssertThat; +import org.tensorflow.op.core.Concat; import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Gather; import org.tensorflow.op.core.Identity; import org.tensorflow.op.core.OnesLike; import org.tensorflow.op.core.Range; import org.tensorflow.op.core.Rank; import org.tensorflow.op.core.ReduceAll; import org.tensorflow.op.core.ReduceMax; +import org.tensorflow.op.core.ReduceProd; import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.core.Reshape; import org.tensorflow.op.core.ScatterNd; +import org.tensorflow.op.core.Select; +import org.tensorflow.op.core.SetDiff1d; +import org.tensorflow.op.core.Slice; import org.tensorflow.op.core.Squeeze; import org.tensorflow.op.core.Stack; -import org.tensorflow.op.core.Zeros; +import org.tensorflow.op.core.StopGradient; +import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.linalg.Transpose; import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.Exp; import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.IsFinite; import org.tensorflow.op.math.Less; +import org.tensorflow.op.math.Log; import org.tensorflow.op.math.Maximum; import org.tensorflow.op.math.Mul; import org.tensorflow.op.math.Rsqrt; import org.tensorflow.op.math.Square; +import org.tensorflow.op.math.Sub; +import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TFloat16; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; public class MathOps { private final Scope scope; @@ -123,7 +145,7 @@ public Operand l2Normalize(Operand x, int[] axis) { * predictions}. */ public Operand confusionMatrix(Operand labels, Operand predictions) { - return confusionMatrix(labels, predictions, null, null, labels.type()); + return confusionMatrix(labels, predictions, null, null); } /** @@ -167,7 +189,7 @@ public Operand confusionMatrix(Operand labels, Operand */ public Operand confusionMatrix( Operand labels, Operand predictions, Operand weights) { - return confusionMatrix(labels, predictions, weights, null, labels.type()); + return confusionMatrix(labels, predictions, weights, null); } /** @@ -204,7 +226,6 @@ public Operand confusionMatrix( * @param weights An optional Operand whose shape matches {@code predictions}. * @param numClasses The possible number of labels the classification task can have. If this value * is null, it will be calculated using both predictions and labels. - * @param type Data type of the confusion matrix. * @param Data type of the confusion matrix. * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion * matrix, where {@code n} is the number of possible labels in the classification task. @@ -213,11 +234,7 @@ public Operand confusionMatrix( * predictions}. */ public Operand confusionMatrix( - Operand labels, - Operand predictions, - Operand weights, - Operand numClasses, - Class type) { + Operand labels, Operand predictions, Operand weights, Operand numClasses) { Scope lScope = scope.withSubScope("confusionMatrix"); LossTuple tuple = removeSqueezableDimensions(labels, predictions, 0); Operand lLabels = Cast.create(lScope, tuple.getLabels(), TInt64.class); @@ -293,7 +310,8 @@ public Operand confusionMatrix( Operand indices = Stack.create(lScope, Arrays.asList(lLabels, lPredictions), Stack.axis(1L)); Operand values = weights == null ? OnesLike.create(lScope, predictions) : weights; - Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), type); + /// Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), + // type); return ScatterNd.create(lScope, indices, values, shape); } @@ -317,7 +335,7 @@ public LossTuple removeSqueezableDimensions( int labelsRank = labelsShape.numDimensions(); if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { - // Use static rank. + // Use rank. int rankDiff = predictionsRank - labelsRank; if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { predictions = Squeeze.create(lScope, predictions); @@ -350,6 +368,13 @@ public LossTuple removeSqueezableDimensions( return new LossTuple<>(labels, predictions); } + /** + * Creates an Operand that has all axes contained in the Operand's shape. + * + * @param op the Operand + * @param THe Data type for the Operand + * @return an Operand that has all axes contained in the Operand's shape.. + */ public Operand allAxes(Operand op) { int rank = op.shape().numDimensions(); if (rank != Shape.UNKNOWN_SIZE) { @@ -363,4 +388,753 @@ public Operand allAxes(Operand op) { scope, Constant.scalarOf(scope, 0), Rank.create(scope, op), Constant.scalarOf(scope, 1)); } } + + /** + * Transpose and reshape the input for contraction op. + * + *

This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul` using + * `array_ops.transpose` and `array_ops.reshape`. The method takes a tensor and performs the + * correct transpose and reshape operation for a given set of indices. It returns the reshaped + * tensor as well as a list of indices necessary to reshape the tensor again after matrix + * multiplication. + * + * @param the type of Operand + * @param a the Tensor + * @param axis unique indices specifying valid axes of `a`. + * @param flipped whether to flip the dimensions or not + * @return A tuple (reshapedA, freeDims, freeDimsStatic) where reshapedA is a reshaped to allow + * contraction via matmul, freeDims` is a TInt32 Operand, depending on whether the shape of a + * is fully specified, and freeDimsStatic is either a list of integers and null values, or + * None, representing the inferred shape of the free dimensions + */ + private Object[] tensordotReshape( + Operand a, Operand axis, boolean flipped) { + Shape aShape = a.shape(); + + if (!aShape.hasUnknownDimension()) { // calculate using values + long[] aShapeDims = aShape.asArray(); + if (aShapeDims == null) aShapeDims = new long[0]; + long[] aDimsIndex = new long[aShapeDims.length]; + for (int i = 0; i < aDimsIndex.length; i++) aDimsIndex[i] = i; + + // get int array from axis Operand + int[] iAxes = getIntArray(axis); + // Convert negative axes to positive + for (int i = 0; i < iAxes.length; i++) + iAxes[i] = iAxes[i] >= 0 ? iAxes[i] : Math.floorMod(iAxes[i], iAxes.length); + + // convert integer axis to long axis + long[] lAxes = Arrays.stream(iAxes).mapToLong(i -> i).toArray(); + + // create list of the axes, dims, and free axes + List axesList = Arrays.stream(lAxes).boxed().collect(Collectors.toList()); + List freeList = Arrays.stream(aDimsIndex).boxed().collect(Collectors.toList()); + freeList.removeAll(axesList); + + // create array of free dims + long[] free = freeList.stream().mapToLong(i -> i).toArray(); + long[] freeDims = new long[free.length]; + for (int i = 0; i < free.length; i++) freeDims[i] = aShapeDims[(int) free[i]]; + + // Calculate the free dim by doing a reduce prod + long prodFree = 1; + for (long i : freeDims) { + prodFree *= i; + } + + // calculate the used dims by doing a reduce prod + long prodAxis = 1; + for (long i : lAxes) { + prodAxis *= aShapeDims[(int) i]; + } + + // setup the permutations array for the transpose + long[] perm = new long[freeDims.length + lAxes.length]; + Shape newShape; + if (flipped) { + System.arraycopy(lAxes, 0, perm, 0, lAxes.length); + System.arraycopy(free, 0, perm, lAxes.length, free.length); + newShape = Shape.of(prodAxis, prodFree); + } else { + System.arraycopy(free, 0, perm, 0, free.length); + System.arraycopy(lAxes, 0, perm, freeDims.length, lAxes.length); + newShape = Shape.of(prodFree, prodAxis); + } + + Operand aTrans; + long[] arrange = new long[lAxes.length]; + for (int i = 0; i < arrange.length; i++) arrange[i] = i; + + // if the permutations is not equals to the natural order of the dims, then do a transpose + if (!Arrays.equals(perm, arrange)) { + aTrans = Transpose.create(scope, a, Constant.vectorOf(scope, perm)); + } else { + aTrans = a; + } + + // reshape the final result to the new Shape, if necessary + Operand aReshaped = + aTrans.asOutput().shape().equals(newShape) + ? aTrans + : Reshape.create(scope, aTrans, Constant.vectorOf(scope, newShape.asArray())); + // return a tuple for the reshaped Operand, and Operand for the free dimensions, and a long + // array for the free dimensions + return new Object[] {aReshaped, Constant.vectorOf(scope, freeDims), freeDims}; + + } else { // calculate dynamically + + long[] freeDimsStatic = null; + Operand one = Constant.scalarOf(scope, 1); + Operand minusOne = Constant.scalarOf(scope, -1); + Operand zero = Constant.scalarOf(scope, 0); + org.tensorflow.op.core.Shape tShape = org.tensorflow.op.core.Shape.create(scope, a); + Operand axesT; + Operand freeT; + if (aShape.numDimensions() + != Shape.UNKNOWN_SIZE) { // we know the rank, but there are unknown dimensions + long[] aShapeDims = aShape.asArray(); + if (aShapeDims == null) aShapeDims = new long[0]; + + // get int array from axis Operand + int[] iAxes = getIntArray(axis); + // Convert negative axes to positive + for (int i = 0; i < iAxes.length; i++) + iAxes[i] = iAxes[i] >= 0 ? iAxes[i] : Math.floorMod(iAxes[i], iAxes.length); + + // convert integer axis to long axis + long[] lAxes = Arrays.stream(iAxes).mapToLong(i -> i).toArray(); + + // create list of the axes, dims, and free axes + List axesList = Arrays.stream(lAxes).boxed().collect(Collectors.toList()); + List dimsList = Arrays.stream(aShapeDims).boxed().collect(Collectors.toList()); + List freeList = new ArrayList<>(axesList); + freeList.removeAll(dimsList); + + // create array of free dims + long[] freeDims = freeList.stream().mapToLong(i -> i).toArray(); + freeDimsStatic = freeDims; + + axesT = Constant.vectorOf(scope, iAxes); + freeT = Cast.create(scope, Constant.vectorOf(scope, freeDims), TInt32.class); + + } else { // we don't know the rank yet + Rank rank = Rank.create(scope, a); + + // convert axis to positive + axesT = + Select.create( + scope, + GreaterEqual.create(scope, axis, Constant.scalarOf(scope, 0)), + axis, + Add.create(scope, axis, rank)); + + SetDiff1d diff = + SetDiff1d.create( + scope, Range.create(scope, Constant.scalarOf(scope, 0), rank, one), axesT); + freeT = diff.out(); + } + Operand freeDims = Gather.create(scope, tShape, freeT, zero); + Operand axesDims = Gather.create(scope, tShape, axesT, zero); + Operand prodFreeDims = ReduceProd.create(scope, freeDims, minusOne); + Operand prodAxesDims = ReduceProd.create(scope, axesDims, minusOne); + Operand perm; + Operand newShape; + if (flipped) { + perm = Concat.create(scope, Arrays.asList(axesT, freeT), zero); + newShape = Stack.create(scope, Arrays.asList(prodAxesDims, prodFreeDims)); + } else { + perm = Concat.create(scope, Arrays.asList(freeT, axesT), zero); + newShape = Stack.create(scope, Arrays.asList(prodFreeDims, prodAxesDims)); + } + Operand aReshaped = Reshape.create(scope, Transpose.create(scope, a, perm), newShape); + return new Object[] {aReshaped, freeDims, freeDimsStatic}; + } + } + + /** + * Gets an int array from an Operand<TInt32> operand. + * + * @param axes the Operand to fetch the values + * @return the int array from an Operand<TInt32> + */ + private int[] getIntArray(Operand axes) { + List result = new ArrayList<>(); + if (scope.env().isEager()) { + axes.asTensor().scalars().forEach(s -> result.add(s.getInt())); + } else { + try (Session session = new Session((Graph) scope.env()); + TInt32 tensor = (TInt32) session.runner().fetch(axes).run().get(0)) { + tensor.scalars().forEach(s -> result.add(s.getInt())); + } + } + return result.stream().mapToInt(i -> i).toArray(); + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axis the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings("unchecked") + private Operand[] tensordotAxes(Operand a, int axis) { + Shape aShape = a.asOutput().shape(); + if (axis < 0) { + throw new IllegalArgumentException("'axis' must be at least 0."); + } + int rank = aShape.numDimensions(); + Operand[] result = new Operand[2]; + if (rank != Shape.UNKNOWN_SIZE) { + if (axis > rank) { + throw new IllegalArgumentException( + String.format( + "'axis' must not be larger than the number of dimensions of tensor %s.", rank)); + } + int min = rank - axis; + int postRange = rank - min; + int[] postAxis = new int[postRange]; + for (int i = 0; i < postRange; i++) postAxis[i] = i + min; + + int[] preAxis = new int[axis]; + for (int i = 0; i < axis; i++) preAxis[i] = i; + + result[0] = Constant.vectorOf(scope, postAxis); + result[1] = Constant.vectorOf(scope, preAxis); + } else { + Rank rankT = Rank.create(scope, a); + Constant axisT = Constant.scalarOf(scope, axis); + Constant one = Constant.scalarOf(scope, 1); + Constant zero = Constant.scalarOf(scope, 0); + AssertThat assertion = + AssertThat.create( + scope, + Less.create(scope, axisT, rankT), + Arrays.asList( + Constant.scalarOf( + scope, "'axes' must not be larger than the number of dimensions of tensor "), + rankT)); + Scope scope1 = scope.withControlDependencies(Collections.singletonList(assertion)); + result[0] = Range.create(scope1, Sub.create(scope, rankT, axisT), rankT, one); + result[1] = Range.create(scope1, zero, axisT, one); + } + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private Operand[] tensordotAxes(Operand a, int[] axes) { + if (axes.length != 2) + throw new IllegalArgumentException( + "'axes' must have length 1 or 2, provided with " + axes.length); + int[] aAxis = new int[] {axes[0]}; + int[] bAxis = new int[] {axes[1]}; + Operand[] result = new Operand[2]; + result[0] = Constant.vectorOf(scope, aAxis); + result[1] = Constant.vectorOf(scope, bAxis); + + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private Operand[] tensordotAxes(Operand a, int[][] axes) { + if (axes.length != 2) + throw new IllegalArgumentException( + "'axes' must have length 1 or 2, provided with " + axes.length); + int[] aAxis = axes[0]; + int[] bAxis = axes[1]; + if (aAxis.length != bAxis.length) + throw new IllegalArgumentException( + String.format( + "Different number of contraction axes 'a' and 'b', %d != %d", + aAxis.length, bAxis.length)); + Operand[] result = new Operand[2]; + result[0] = Constant.vectorOf(scope, aAxis); + result[1] = Constant.vectorOf(scope, bAxis); + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private Operand[] tensordotAxes(Operand a, Operand axes) { + + Constant one = Constant.scalarOf(scope, 1); + Constant zero = Constant.scalarOf(scope, 0); + Operand[] result = new Operand[2]; + result[0] = + Slice.create( + scope, + axes, + Cast.create(scope, zero, TInt32.class), + Cast.create(scope, one, TInt32.class)); + result[1] = + Slice.create( + scope, + axes, + Cast.create(scope, one, TInt32.class), + Cast.create(scope, one, TInt32.class)); + return result; + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

+ * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

+ * This operation corresponds to numpy.tensordot(a, b, axes). + *

+ * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

+ * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

+ * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

+ * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

+ * cjklm = Σi aijk + * blmi . + *

+ * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

+ * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param axis sum over the last N axes of a and the + * first N axes of b in order. If `axes=0`, computes the outer + * product between `a` and `b`. + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @Endpoint(name = "tensordot") + public Operand tensordot(Operand a, Operand b, int axis) { + + Operand[] abAxis = tensordotAxes(a, axis); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + return tensordot(a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

+ * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

+ * This operation corresponds to numpy.tensordot(a, b, axes). + *

+ * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

+ * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

+ * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

+ * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

+ * cjklm = Σi aijk + * blmi . + *

+ * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

+ * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param axes If axes is a scalar, sum over the last N axes of a and the + * first N axes of b in order. If axes is a list, the first and second row + * contain the set of unique integers specifying axes along which the + * contraction is computed, for `a` and `b`, respectively. The number of + * axes for `a` and `b` must be equal. If `axes=0`, computes the outer + * product between `a` and `b`. + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @Endpoint(name = "tensordot") + public Operand tensordot( + Operand a, Operand b, Operand axes) { + + Operand[] abAxis = tensordotAxes(a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

+ * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

+ * This operation corresponds to numpy.tensordot(a, b, axes). + *

+ * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

+ * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

+ * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

+ * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

+ * cjklm = Σi aijk + * blmi . + *

+ * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

+ * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param axes the first and second row + * contain the set of unique integers specifying axes along which the + * contraction is computed, for `a` and `b`, respectively. The number of + * axes for `a` and `b` must be equal. I + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @Endpoint(name = "tensordot") + public Operand tensordot(Operand a, Operand b, int[] axes) { + + Operand[] abAxis = tensordotAxes(a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

+ * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

+ * This operation corresponds to numpy.tensordot(a, b, axes). + *

+ * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

+ * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

+ * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

+ * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

+ * cjklm = Σi aijk + * blmi . + *

+ * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

+ * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param axes the first and second row + * contain the set of unique integers specifying axes along which the + * contraction is computed, for `a` and `b`, respectively. The number of + * axes for `a` and `b` must be equal. I + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @Endpoint(name = "tensordot") + public Operand tensordot(Operand a, Operand b, int[][] axes) { + + Operand[] abAxis = tensordotAxes(a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

+ * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

+ * This operation corresponds to numpy.tensordot(a, b, axes). + *

+ * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

+ * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

+ * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

+ * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

+ * cjklm = Σi aijk + * blmi . + *

+ * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

+ * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param aAxis axes for the a Operand + * @param bAxis axes for the b Operand + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @SuppressWarnings({"unchecked", "unused"}) + @Endpoint(name = "tensordot") + public Operand tensordot( + Operand a, Operand b, Operand aAxis, Operand bAxis) { + + if (a.type().equals(TBfloat16.class) || a.type().equals(TFloat16.class)) { + throw new IllegalArgumentException( + String.format( + "Operand 'a' must be either TFloat32 or TFloat64 DataType, 'a' is a %s DataType", + a.type().getSimpleName())); + } + if (!a.type().equals(b.type())) { + throw new IllegalArgumentException( + String.format( + "Operands a and b must be the same data type, a is %s DataType, b is %s DataType", + a.type().getSimpleName(), b.type().getSimpleName())); + } + + // first result is Operand, second result is Operand, third result is long[] and it is + // ignored here. + Object[] aResult = tensordotReshape(a, aAxis, false); + Operand reshapedA = (Operand) aResult[0]; + Operand aFreeDims = (Operand) aResult[1]; + long[] aFreeDimsStatic = (long[]) aResult[2]; + + // first result is Operand, second result is Operand, third result is long[] and it is + // ignored here. + Object[] bResult = tensordotReshape(b, bAxis, true); + Operand reshapedB = (Operand) bResult[0]; + Operand bFreeDims = (Operand) bResult[1]; + long[] bFreeDimsStatic = (long[]) bResult[2]; + + Operand abMatmul = frameworkOps.linalg.matmul(reshapedA, reshapedB); + long[] abDimsStatic = new long[aFreeDimsStatic.length + bFreeDimsStatic.length]; + System.arraycopy(aFreeDimsStatic, 0, abDimsStatic, 0, aFreeDimsStatic.length); + System.arraycopy( + bFreeDimsStatic, 0, abDimsStatic, aFreeDimsStatic.length, bFreeDimsStatic.length); + if (!abMatmul.shape().hasUnknownDimension() + && abMatmul.shape().equals(Shape.of(abDimsStatic))) { + return abMatmul; + } else { + return Reshape.create(scope, abMatmul, Constant.vectorOf(scope, abDimsStatic)); + } + } + + /** + * Computes log(sum(exp(elements across dimensions of a tensor))). Reduces {@code input_tensor} + * along the dimensions given in {@code axes}. + * + *

Reduces `{@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} + * is true, the rank of the tensor is reduced by 1 for each of the entries in {@code axes}, which + * must be unique. If {@code keepdims} is true, the reduced dimensions are retained with length 1. + * If {@code axes} has no entries, all dimensions are reduced, and a tensor with a single element + * is returned. This function is more numerically stable than {@code log(sum(exp(input)))}. It + * avoids overflows caused by taking the exp of large inputs and underflows caused by taking the + * log of small inputs. + * + * @param input The tensor to reduce. + * @param axes The dimensions to reduce. If null, reduces all dimensions. Must be in the range + * {@link [-rank(input_tensor), rank(input_tensor)]}. + * @param keepDims If true, retains reduced dimensions with length 1. + * @return The reduced tensor. + */ + @Endpoint(name = "reduceLogSumExp") + public Operand reduceLogSumExp( + Operand input, int[] axes, boolean keepDims) { + Operand reduceDims = reductionDims(input, axes); + Operand rawMax = reduceMaxWithDims(input, axes, keepDims, reduceDims); + Operand myMax = + StopGradient.create( + scope, + Select.create( + scope, IsFinite.create(scope, rawMax), rawMax, ZerosLike.create(scope, rawMax))); + + Operand result = + Log.create( + scope, + reduceSumWithDims( + Exp.create(scope, Sub.create(scope, input, myMax)), axes, keepDims, reduceDims)); + + if (!keepDims) { + myMax = Reshape.create(scope, myMax, org.tensorflow.op.core.Shape.create(scope, result)); + } + result = Add.create(scope, result, myMax); + return mayReduceToScalar(keepDims, axes, result); + } + + private Operand reduceSumWithDims( + Operand input, int[] axes, boolean keepDims, Operand dims) { + return mayReduceToScalar( + keepDims, axes, ReduceSum.create(scope, input, dims, ReduceSum.keepDims(keepDims))); + } + + private Operand reduceMaxWithDims( + Operand input, int[] axes, boolean keepDims, Operand dims) { + return mayReduceToScalar( + keepDims, axes, ReduceMax.create(scope, input, dims, ReduceMax.keepDims(keepDims))); + } + + /** + * Sets a reduction's output shape to be a scalar if possible. + * + * @return the operand, possibly reduced to a scalar. + */ + private Operand mayReduceToScalar( + boolean keepDims, int[] axes, Operand output) { + + if ((output.shape().numDimensions() == Shape.UNKNOWN_SIZE + || output.shape().hasUnknownDimension()) + && !keepDims + && axes == null) { + return Reshape.create(scope, output, Constant.tensorOf(scope, Shape.scalar())); + } else { + return output; + } + } + + /** + * Reduce dimensions based on axis + * + * @param input the input + * @param axes he dimensions to reduce, may be null + * @return the dimensions to be reduced. + */ + private Operand reductionDims(Operand input, int[] axes) { + if (axes != null) { + return Constant.vectorOf(scope, axes); + } + long rank = input.shape().numDimensions(); + if (rank != Shape.UNKNOWN_SIZE) { + int[] dims = new int[(int) rank]; + for (int i = 0; i < rank; i++) { + dims[i] = i; + } + return Constant.vectorOf(scope, dims); + + } else { + return Range.create( + scope, + Constant.scalarOf(scope, 0), + Rank.create(scope, input), + Constant.scalarOf(scope, 1)); + } + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java index 326e3cdc2d1..dda5a7c6eaa 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java @@ -5,9 +5,12 @@ import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt64; +import static org.junit.jupiter.api.Assertions.assertThrows; + class MathOpsTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -386,7 +389,7 @@ public void testL2Normalize() { Ops tf = session.getTF(); FrameworkOps fops = FrameworkOps.create(tf); Operand input = tf.constant(array); - Operand result = fops.math.l2Normalize(tf.constant(array), new int[]{ 0,1,2}); + Operand result = fops.math.l2Normalize(tf.constant(array), new int[] {0, 1, 2}); session.evaluate(tf.constant(expectedArray), result); } } @@ -410,4 +413,89 @@ public void testConfusionMatrix() { session.evaluate(tf.constant(expected), result); } } + + @Test + public void testTensorDotValid() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + int[] axes1 = new int[] {1, 2}; + int[][] axes2 = new int[][] {{1}, {2}}; + int[][] axes3 = new int[2][0]; + int axes4 = 0; + + Operand a = tf.ones(tf.constant(Shape.of(3, 3)), TFloat32.class); + Operand b = tf.constant(new float[][][] {{{2, 3, 1}}}); + + Operand ans = fops.math.tensordot(a, b, axes1); + Operand expected = tf.constant(new float[][][] {{{6}}, {{6}}, {{6}}}); + session.evaluate(expected, ans); + + ans = fops.math.tensordot(a, b, axes2); + expected = tf.constant(new float[][][] {{{6}}, {{6}}, {{6}}}); + session.evaluate(expected, ans); + + ans = fops.math.tensordot(a, b, axes3); + + float[][][][][] expectedArray = + new float[][][][][] { + {{{{2, 3, 1}}}, {{{2, 3, 1}}}, {{{2, 3, 1}}}}, + {{{{2, 3, 1}}}, {{{2, 3, 1}}}, {{{2, 3, 1}}}}, + {{{{2, 3, 1}}}, {{{2, 3, 1}}}, {{{2, 3, 1}}}} + }; + ans = fops.math.tensordot(a, b, axes3); + expected = tf.constant(expectedArray); + session.evaluate(expected, ans); + + ans = fops.math.tensordot(a, b, axes4); + expected = tf.constant(expectedArray); + session.evaluate(expected, ans); + } + } + + @Test + public void testTensorDotInValidAxis() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand a = tf.constant(new float[][] {{1, 2}, {3, 4}}); + Operand b = tf.constant(new float[][] {{1, 2}, {3, 4}}); + assertThrows(IllegalArgumentException.class, () -> fops.math.tensordot(a, b, -1)); + assertThrows(IllegalArgumentException.class, () -> fops.math.tensordot(a, b, 3)); + assertThrows( + IllegalArgumentException.class, () -> fops.math.tensordot(a, b, new int[] {1})); + assertThrows( + IllegalArgumentException.class, () -> fops.math.tensordot(a, b, new int[][] {{1}})); + assertThrows( + IllegalArgumentException.class, + () -> fops.math.tensordot(a, b, new int[][] {{1}, {0, 1}})); + + assertThrows( + ArrayIndexOutOfBoundsException.class, + () -> fops.math.tensordot(a, b, new int[][] {{0}, {7}})); + } + } + + @Test + public void testReduceLogSumExp() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand x = + tf.constant( + new float[][] { + {0.43346116f, 0.8569728f, 0.57155997f, 0.0743812f, 0.63846475f}, + {0.8165283f, 0.26554802f, 0.37025765f, 0.8255019f, 0.45682374f}, + {0.93511814f, 0.52291054f, 0.80983895f, 0.11580781f, 0.8111686f}, + {0.49967498f, 0.27537802f, 0.48554695f, 0.28238368f, 0.7989301f}, + {0.8958915f, 0.84870094f, 0.56874424f, 0.08818512f, 0.13915819f} + }); + + Operand result = fops.math.reduceLogSumExp(x, new int[] {0, 1}, false); + session.evaluate(3.7911222f, result); + } + } } From be1fe6678c7e8d44fdb81d8bc47bba250c909b1f Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:52:43 -0400 Subject: [PATCH 17/60] Added frameworkOps for nn and linalg --- .../java/org/tensorflow/framework/op/FrameworkOps.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index c8b234f2c51..d9e3eec4b21 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -31,8 +31,9 @@ public class FrameworkOps { public final Ops coreOps; public final NnOps nn; - public final SetsOps sets; + public final SetOps sets; public final MathOps math; + public final LinalgOps linalg; private final Scope scope; /** @@ -44,8 +45,9 @@ private FrameworkOps(Scope scope) { this.coreOps = Ops.create(scope.env()); this.scope = scope; nn = new NnOps(this); - sets = new SetsOps(this); + sets = new SetOps(this); math = new MathOps(this); + linalg = new LinalgOps(this); } /** @@ -57,8 +59,9 @@ private FrameworkOps(Ops coreOps) { this.coreOps = coreOps; this.scope = coreOps.scope(); nn = new NnOps(this); - sets = new SetsOps(this); + sets = new SetOps(this); math = new MathOps(this); + linalg = new LinalgOps(this); } /** From 7b51e7fea5ae7e738c7360d4da6c8f7994266a96 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:53:15 -0400 Subject: [PATCH 18/60] Modified to use FrameworkOps --- .../tensorflow/framework/losses/Losses.java | 1 + .../tensorflow/framework/metrics/MeanIoU.java | 11 +- .../framework/metrics/impl/MetricsHelper.java | 561 ++++++++++++++++-- .../metrics/impl/WeightsBroadcastOps.java | 4 +- 4 files changed, 532 insertions(+), 45 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 398588cee67..6700f2569f0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -27,6 +27,7 @@ import org.tensorflow.op.math.Softplus; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; import static org.tensorflow.framework.utils.CastHelper.cast; diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java index 22baab3d6cb..70cd826f625 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java @@ -16,7 +16,7 @@ import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; -import org.tensorflow.framework.metrics.impl.MetricsHelper; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; @@ -124,8 +124,8 @@ public Assign getInitializer() { * @param sampleWeights Optional weighting of each example. Defaults to 1, if null. Rank is either * 0, or the same rank as labels, and must be broadcastable to labels. * @return the Operands that updates totalConfusionMatrix variable - * @throws IllegalArgumentException if the weights rank is not 0, and weights rank @{code !=} labels rank, - * and if the predictions size is not equal to the labels size + * @throws IllegalArgumentException if the weights rank is not 0, and weights rank @{code !=} + * labels rank, and if the predictions size is not equal to the labels size */ @Override public List updateStateList( @@ -167,10 +167,11 @@ public List updateStateList( tSampleWeights = getTF().shape.flatten(tSampleWeights); } + FrameworkOps fops = FrameworkOps.create(getTF()); // Accumulate the prediction to current confusion matrix. Operand currentCM = - MetricsHelper.confusionMatrix( - getTF(), tLabels, tPredictions, getTF().constant(numClasses), tSampleWeights, type); + fops.math.confusionMatrix( + tLabels, tPredictions, tSampleWeights, getTF().constant(numClasses)); return Collections.singletonList(getTF().assignAdd(totalConfusionMatrix, currentCM)); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index a82e1760d1f..a4e19d58bcb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -15,21 +15,35 @@ package org.tensorflow.framework.metrics.impl; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.LossTuple; +import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.framework.metrics.exceptions.NotBroadcastableException; import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; +import org.tensorflow.op.core.Assign; +import org.tensorflow.op.core.OneHot; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.Squeeze; +import org.tensorflow.op.core.Variable; import org.tensorflow.op.math.Mean; +import org.tensorflow.op.nn.TopK; import org.tensorflow.types.TBool; +import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TIntegral; import org.tensorflow.types.family.TNumber; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; import static org.tensorflow.framework.utils.CastHelper.cast; @@ -44,8 +58,8 @@ public class MetricsHelper { "weights can not be broadcast to values."; /** - * Asserts that the sampleWeights can be broadcast to the same shape as values - * + * Asserts that the {@code sampleWeights} can be broadcast to the same shape as {@code values + * } * *

In losses and metrics, limited weight broadcasting is supported. Weights must be either * scalar, or the same rank as the target values, with each dimension either 1, or the same as the @@ -54,11 +68,11 @@ public class MetricsHelper { * @param tf the TensorFlow Ops * @param sampleWeights the sample weights. * @param values the values to which weights are applied. - * @return Operation with control dependencies to ensure sampleWeight - * can be broadcast to values + * @return {@code Operation} with control dependencies to ensure {@code sampleWeight} + * can be broadcast to {@code values} * @param the type of Operand - * @throws NotBroadcastableException If static checks determine sampleWeights has an - * incorrect shape that prohibit broadcasting to values + * @throws NotBroadcastableException If static checks determine {@code sampleWeights} has an + * incorrect shape that prohibit broadcasting to {@code values} */ @SuppressWarnings("unchecked") public static Op assertBroadcastable( @@ -79,7 +93,7 @@ public static Op assertBroadcastable( && !valuesShapeStatic.hasUnknownDimension()) { if (weightsRankStatic == 0) { return tf.withSubScope("staticScalarCheckSuccess") - .withControlDependencies(Collections.EMPTY_LIST) + .withControlDependencies(Collections.emptyList()) .noOp(); } if (weightsRankStatic != valuesRankStatic) { @@ -89,8 +103,8 @@ public static Op assertBroadcastable( ASSERT_BROADCAST_ERROR_PREFIX, valuesRankStatic, weightsRankStatic, - valuesShapeStatic.toString(), - weightsShapeStatic.toString())); + valuesShapeStatic, + weightsShapeStatic)); } for (int i = 0; i < valuesRankStatic; i++) { @@ -101,8 +115,8 @@ public static Op assertBroadcastable( "%s Mismatch at dim %d. values.shape=%s weights.shape=%s.", ASSERT_BROADCAST_ERROR_PREFIX, i, - valuesShapeStatic.toString(), - weightsShapeStatic.toString())); + valuesShapeStatic, + weightsShapeStatic)); } } return tf.withSubScope("staticDimsCheckSuccess") @@ -187,13 +201,13 @@ private static Operand canBroadcastDims( } /** - * Broadcast weights to the same shape as values. + * Broadcast {@code weights} to the same shape as {@code values}. * * @param tf the TensorFlow ops - * @param weights Operand whose shape is broadcastable to values. + * @param weights Operand whose shape is broadcastable to {@code values}. * @param values Operand of any shape * @param the type of Operands - * @return weights broadcast to values shape + * @return {@code weights} broadcast to {@code values} shape */ public static Operand broadcastWeights( Ops tf, Operand weights, Operand values) { @@ -214,11 +228,473 @@ public static Operand broadcastWeights( return ctf.math.mul(weights, tf.onesLike(values)); } - // aliases for mean + /** + * Checks that all the Symbolic Shapes are consistent. + * + * @param tf the TensorFlow Ops + * @param symbols the list of Symbolic Shapes + * @param message the error message if the shapes are not consistent. + * @return a list of Operands to check the consistency of the symbolic shapes ready to add to a + * control dependency. + */ + public static List assertShapes( + Ops tf, List> symbols, String message) { + List updateOperations = new ArrayList<>(); + // check that the symbolic shape rank matches the operands rank. + symbols.forEach( + symbol -> { + Operand operand = symbol.getOperand(); + int rank = symbol.rank(); + Rank tfRank = tf.rank(operand); + Op assertion = + tf.withSubScope("assertShapes-1") + .assertThat( + tf.math.equal(tfRank, tf.constant(rank)), + Collections.singletonList(tf.constant(message))); + updateOperations.add(assertion); + }); + + Map> dict = new HashMap<>(); + + // check that each operand's dimension size equals the corresponding symbolic shape's dimensions + // size + symbols.forEach( + symbol -> { + AtomicLong ll = new AtomicLong(); + symbol + .getSymbols() + .forEach( + s -> { + Operand size = dict.get(s); + if (size == null) { + // save size for later checks + size = + tf.shape.size(symbol.getOperand(), tf.constant(ll.get()), TInt64.class); + dict.put(s, size); + } + Op assertion = + tf.withSubScope("assertShapes-2") + .assertThat( + tf.math.equal( + tf.shape.size( + symbol.getOperand(), + tf.constant(ll.getAndIncrement()), + TInt64.class), + size), + Collections.singletonList(tf.constant(message))); + updateOperations.add(assertion); + }); + }); + + return updateOperations; + } + + /** + * Returns an op to update the given confusion matrix variables. + * + *

For every pair of values in {@code labels} and {@code predictions}: + * + *

+   * TRUE_POSITIVES:  {@code labels} == true and {@code predictions} > thresholds
+   * FALSE_POSITIVES: {@code labels} == true and {@code predictions} <= thresholds
+   * TRUE_NEGATIVES:  {@code labels} == false and {@code predictions} <= thresholds
+   * FALSE_NEGATIVE:  {@code labels} == false and {@code predictions} > thresholds
+   * 
+ * + *

The results will be weighted and added together. When multiple thresholds are provided, we + * will repeat the same for every threshold. + * + *

For estimation of these metrics over a stream of data, the function creates an `update_op` + * operation that updates the given variables. + * + *

{@code labels}, {@code predictions}, and {@code sampleWeight} tensors are + * aligned by {@link LossesHelper#removeSqueezableDimensions(Ops, Operand, Operand)}. {@code + * sampleWeight} is then broadcast to the shape of {@code predictions}. + * + * @param tf the TensorFlow Ops + * @param variablesToUpdate map with {@link ConfusionMatrixEnum} values as valid keys and + * corresponding variables to update as values. If {@code multiLabel}, then the variable + * shapes are (T, D), where T is the number of thresholds and D is the number of classes + * (after slicing by {@code classIndex}, if provided). If {@code multiLabels}, then + * the variable shapes are (T). + * @param varInitializers map with {@link ConfusionMatrixEnum} values as valid keys and + * corresponding initializer Operands to for {@code variablesToUpdate}. + * @param labels the labels. Will be cast to {@link TBool}. Shape (N, Cx, L1?), where N is the + * number of examples, Cx is zero or more class dimensions, and L1 is a potential extra + * dimension of size 1 that would be squeezed. + * @param predictions the predictions shape (N, Cx, P1?) + * @param thresholds thresholds in the range {@code [0, 1]}, or {@link #NEG_INF} is used when + * topK is set + * @param topK optional, indicates that only the top k predictions should be considered. Applied + * before possibly slicing by {@code classIndex}. + * @param classIndex optional, limits the prediction and labels to the specified class. This is an + * integer index into the first dimension of Cx. + * @param sampleWeight optional {@code Tensor} that is aligned with labels and predictions as + * explained above. Use weights of 0 to mask values. + * @param multiLabel indicates whether multidimensional prediction/labels should be treated as + * multilabel responses, or flattened into a single label. When true, the values of {@code + * variablesToUpdate} must have a second dimension equal to the number of labels and + * predictions per example, and those tensors must not be RaggedTensors. + * @param labelWeights tensor of non-negative weights for multilabel data. The weights are applied + * when calculating TRUE_POSITIVES, FALSE_POSITIVES, TRUE_NEGATIVES, and FALSE_NEGATIVES + * without explicit multilabel handling (i.e. when the data is to be flattened). Must have + * shape (Dx), which is the same as (Cx) referenced above, except that if {@code classIndex + * } is provided, then the final dimension of Dx is 1. These weights will be broadcast + * across the 0th dimension (the examples dimension) of {@code predictions}. May be null. + * Must be null if {@code multiLabel}. + * @param the data type for the variables + * @throws IllegalArgumentException If {@code predictions} and {@code labels} have + * mismatched shapes, or if {@code sampleWeight} is not null and its shape + * doesn't match {@code predictions}, or if {@code multiLabel && labelWeights != null}.. + * @return an op to update the given confusion matrix variables. + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + public static List updateConfusionMatrixVariables( + Ops tf, + Map> variablesToUpdate, + Map> varInitializers, + Operand labels, + Operand predictions, + Operand thresholds, + Integer topK, + Integer classIndex, + Operand sampleWeight, + boolean multiLabel, + Operand labelWeights) { + if (multiLabel && labelWeights != null) + throw new IllegalArgumentException( + "labelWeights for multilabel data should be handled outside of updateConfusionMatrixVariables when multiLabel is true."); + + if (variablesToUpdate == null || variablesToUpdate.isEmpty()) { + return Collections.EMPTY_LIST; + } + + Operand tLabels = labels; + Operand tPredictions = predictions; + Operand tSampleWeight = sampleWeight; + + // We will tile data for threshold comparisons. We want a cross product of thresholds and + // predictions/labels: + // In the multilabel case, we want a data shape of (T, N, D). + // else (T, ND). + // where + // T is numThresholds (the size of the 0th dimension of thresholds) + // N is the number of examples (the 0th dimension of labels and predictions) + // Dx == Cx except that if classIndex != null, + // then the last dimension of Dx is size 1 + // D is the product of all Dx + // ND is N * D + + // size of the 0th dimension of thresholds + // reshape to scalar for operations later. + Operand numThresholds = + tf.reshape(tf.shape.size(thresholds, tf.constant(0)), tf.constant(Shape.scalar())); + + // if multilabel, then (rank(thresholds) == 1) + // else true + Operand oneThresh; + if (multiLabel) { + oneThresh = tf.math.equal(tf.constant(1), tf.rank(thresholds)); + } else { + // TODO handle Ragged Tensors???? + // [y_pred, + // y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true], + // sampleWeights) + oneThresh = tf.constant(true); + } + + List controlOps = new ArrayList<>(); + Operand axes = allAxes(tf, tPredictions); + controlOps.add( + tf.withSubScope("updateConfusionMatrixVariables-1") + .assertThat( + tf.reduceAll( + tf.math.greaterEqual( + tPredictions, cast(tf, tf.constant(0), tPredictions.type())), + axes), + Collections.singletonList(tf.constant("predictions must be >= 0")))); + controlOps.add( + tf.withSubScope("updateConfusionMatrixVariables-2") + .assertThat( + tf.reduceAll( + tf.math.lessEqual(tPredictions, cast(tf, tf.constant(1), tPredictions.type())), + axes), + Collections.singletonList(tf.constant("predictions must be <= 1")))); + + LossTuple result = + LossesHelper.squeezeOrExpandDimensions(tf, tLabels, tPredictions, tSampleWeight); + tPredictions = result.getTarget(); // shape (N, Cx) + tLabels = result.getLabels(); // shape (N, Cx) + tSampleWeight = result.getSampleWeights(); // broadcastable to (N, Dx) + + if (!tPredictions.shape().isCompatibleWith(tLabels.shape())) + throw new IllegalArgumentException( + String.format( + "Shapes %s and %s are incompatible)", + tPredictions.shape().toString(), tLabels.shape().toString())); + + if (topK != null) { + tPredictions = filterTopK(tf, tPredictions, topK); + } + + if (classIndex != null) { + // Slice to new shapes (N, Dx) + tLabels = tf.squeeze(tf.gather(tLabels, + tf.constant(new int[] {classIndex}), tf.constant(-1)), + Squeeze.axis(Collections.singletonList(1L))); + tPredictions = tf.squeeze(tf.gather(tPredictions, + tf.constant(new int[] {classIndex}), tf.constant(-1)), + Squeeze.axis(Collections.singletonList(1L))); + } + org.tensorflow.op.core.Shape predShape = tf.shape(tPredictions); + + Operand numExamples = + tf.reshape(tf.shape.size(tPredictions, tf.constant(0)), tf.constant(Shape.scalar())); + + // number of labels (and predictions) per example (after possibly slicing by classIndex) + // In the notation we are using for comments, this is D. + Operand numLabels = + tf.select( + tf.math.equal(tf.shape.numDimensions(predShape), tf.constant(1)), + tf.constant(1), + tf.reduceProd( + // take all but the first dimension + tf.shape.takeLast( + predShape, tf.math.sub(tf.shape.numDimensions(predShape), tf.constant(1))), + tf.constant(0))); + + // threshLabelTile == numLabels except in one case: + // if multilabel and rank(thresholds) != 1, then threshLabelTile is 1 + Operand threshLabelTile = tf.select(oneThresh, numLabels, tf.constant(1)); + + // if multilabel, then shape (1, N, Dx) + // else shape (1, ND), + Operand predictionsExtraDim; + Operand labelsExtraDim; + + if (multiLabel) { + predictionsExtraDim = tf.expandDims(tPredictions, tf.constant(0)); + labelsExtraDim = tf.expandDims(cast(tf, tLabels, TBool.class), tf.constant(0)); + } else { + predictionsExtraDim = tf.reshape(tPredictions, tf.constant(Shape.of(1, -1))); + labelsExtraDim = tf.reshape(cast(tf, tLabels, TBool.class), tf.constant(Shape.of(1, -1))); + } + + // the shape of each thresholds tile + // if multilabel, then [T, 1, -1] + // else [T, -1] + List> threshPretileShape; + + // the tiling multiples for thresholds + // We want to repeat the thresholds for each data position. + // if multilabel, then [1, N, threshLabelTile]. (threshLabelTile is typically numLabels) + // else [1, ND] + List> threshTiles; + + // tiling multiples for predictionsExtraDim and labelsExtraDim + // We want to repeat the predictions and labels for each threshold. + // If multilabel, then [T, 1, 1] + // else [T, 1] + List> dataTiles; + + if (multiLabel) { + threshPretileShape = Arrays.asList(numThresholds, tf.constant(1), tf.constant(-1)); + threshTiles = Arrays.asList(tf.constant(1), numExamples, threshLabelTile); + dataTiles = Arrays.asList(numThresholds, tf.constant(1), tf.constant(1)); + } else { + threshPretileShape = + Arrays.asList(tf.reshape(numThresholds, tf.constant(Shape.scalar())), tf.constant(-1)); + Operand mul = tf.math.mul(numExamples, numLabels); + threshTiles = Arrays.asList(tf.constant(1), mul); + dataTiles = Arrays.asList(numThresholds, tf.constant(1)); + } + + // if multilabel, then shape (T, 1, T*) + // else shape (T, T*) + // where T* is the product of all threshold dimension sizes beyond 0 + Operand thresholdsReshaped = + tf.reshape(cast(tf, thresholds, predictions.type()), tf.stack(threshPretileShape)); + + Operand threshTilesShape = tf.stack(threshTiles); + + // if multilabel, then + // if thresholds has rank > 1, then shape (T, N, T*) + // else shape (T, N, D) + // else shape (T, ND) + Operand threshTiled = tf.tile(thresholdsReshaped, threshTilesShape); + + Operand dataTilesShape = tf.stack(dataTiles); + + // if multilabel, then shape (T, N, D) + // else (T, ND) + Operand predsTiled = tf.tile(predictionsExtraDim, dataTilesShape); + + // Compare predictions and threshold. + Operand predIsPos = tf.math.greater(predsTiled, threshTiled); + // Tile labels by number of thresholds + Operand labelIsPos = tf.tile(labelsExtraDim, tf.stack(dataTiles)); + Operand weightsTiled; + if (tSampleWeight != null) { + tSampleWeight = tf.broadcastTo(tSampleWeight, tf.shape(tPredictions)); + // if multilabel, then + // reshape tSampleWeight to (1, N, threshLabelTile) + // tile the result into shape (T, N, threshLabelTile) + // where threshLabelTile is typically D + // else + // reshape tSampleWeight to (1, ND) + // tile the result into shape (T, ND) + weightsTiled = tf.tile(tf.reshape(tSampleWeight, threshTilesShape), dataTilesShape); + } else { + weightsTiled = null; + } + + if (labelWeights != null) { + // Change shape to (1, Dx). + Operand lLabelWeights = tf.expandDims(tf.identity(labelWeights), tf.constant(0)); + + // Broadcast to shape (N, Dx). + lLabelWeights = tf.broadcastTo(lLabelWeights, tPredictions); + + // If multilabel: shape (T, N, D) + // else: shape (T, ND) + Operand labelWeightsTiled = + tf.tile(tf.reshape(lLabelWeights, tf.stack(threshTiles)), tf.stack(dataTiles)); + + if (weightsTiled == null) { + weightsTiled = labelWeightsTiled; + } else { + weightsTiled = tf.math.mul(weightsTiled, labelWeightsTiled); + } + } + + Map loopVars = new HashMap<>(); + loopVars.put(ConfusionMatrixEnum.TRUE_POSITIVES, new Operand[] {labelIsPos, predIsPos}); + Variable updateTN = variablesToUpdate.get(ConfusionMatrixEnum.TRUE_NEGATIVES); + Variable updateFP = variablesToUpdate.get(ConfusionMatrixEnum.FALSE_POSITIVES); + Variable updateFN = variablesToUpdate.get(ConfusionMatrixEnum.FALSE_NEGATIVES); + + Operand predIsNeg = null; + Operand labelIsNeg; + if (updateFN != null || updateTN != null) { + predIsNeg = tf.math.logicalNot(predIsPos); + loopVars.put(ConfusionMatrixEnum.FALSE_NEGATIVES, new Operand[] {labelIsPos, predIsNeg}); + } + + if (updateFP != null || updateTN != null) { + labelIsNeg = tf.math.logicalNot(labelIsPos); + loopVars.put(ConfusionMatrixEnum.FALSE_POSITIVES, new Operand[] {labelIsNeg, predIsPos}); + if (updateTN != null) { + loopVars.put(ConfusionMatrixEnum.TRUE_NEGATIVES, new Operand[] {labelIsNeg, predIsNeg}); + } + } + + final Operand weightsTiledF = weightsTiled; + loopVars + .keySet() + .forEach( + (c) -> { + if (variablesToUpdate.containsKey(c)) { + Operand[] op = loopVars.get(c); + // op[0] = label, op[1] == prediction + controlOps.add( + weightedAssignAdd( + tf, + op[0], + op[1], + weightsTiledF, + variablesToUpdate.get(c), + varInitializers.get(c))); + } + }); + + return controlOps; + } /** - * Calculate the mean of the operand, along all axes and keepDims is false - * + * Creates an Operand that adds the values by taking the logical and of labels and predictions to + * the specified confusion matrix variable. + * + * @param tf The TensorFlow Ops + * @param labels the labels + * @param predictions the predictions + * @param weights the weights applied to the logical and result, may be null + * @param variable the variable to update + * @param initializer the variable initializer to be applied to the variable, may be null. + * @param the data type for the variable. + * @return an Operand that updates the variable. + */ + private static Operand weightedAssignAdd( + Ops tf, + Operand labels, + Operand predictions, + Operand weights, + Variable variable, + Assign initializer) { + Class type = variable.type(); + Operand labelAndPred = cast(tf, tf.math.logicalAnd(labels, predictions), type); + + if (weights != null) { + labelAndPred = tf.math.mul(labelAndPred, weights); + } + // if multilabel: + // sum across examples, leaving shape (T, D) + // else: + // sum across ND, leaving shape (T) + Operand valueSum = tf.reduceSum(labelAndPred, tf.constant(1)); + Operand assignAdd; + if (initializer != null) { + Ops tfc = + tf.withSubScope("weightedAssignAdd") + .withControlDependencies(Collections.singletonList(initializer)); + assignAdd = tfc.assignAdd(variable, valueSum); + } else { + assignAdd = tf.assignAdd(variable, valueSum); + } + return assignAdd; + } + + /** + * Filters top-k values in the last dim of x and set the rest to NEG_INF. + * + *

Used for computing top-k prediction values in dense labels (which has the same shape as + * predictions) for recall and precision top-k metrics. + * + * @param tf The TensorFlow Ops + * @param x the tensor with any dimensions to filter + * @param topK the number of values to keep. + * @param the data type for x and the return value. + * @return the topK prediction values. + */ + private static Operand filterTopK(Ops tf, Operand x, int topK) { + Class type = x.type(); + Shape xShape = x.shape(); + // top has the same rank as x; the last dimension becomes indices of the topK features. + TopK top = tf.nn.topK(x, tf.constant(topK), TopK.sorted(false)); + // oneHot has an additional dimension: the one-hot representation of each topK index. + OneHot oneHot = + tf.oneHot( + top.indices(), + cast(tf, tf.constant(xShape.size(xShape.numDimensions() - 1)), TInt32.class), + tf.constant(1), + tf.constant(0), + OneHot.axis(-1L)); + // Sum the one-hot representations along the last dimension of x. + Operand topKMask = cast(tf, tf.reduceSum(oneHot, tf.constant(-2)), type); + + // x * top_k_mask + NEG_INF * (1 - top_k_mask) + Operand add1 = tf.math.mul(x, topKMask); + Operand add2 = + tf.math.mul( + cast(tf, tf.constant(NEG_INF), type), + tf.math.sub(cast(tf, tf.constant(1), type), topKMask)); + return tf.math.add(add1, add2); + } + + // alias for mean + + /** + * Calculate the mean of the operand, along all axes and {@code keepDims} is {@code false + * } * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -230,8 +706,8 @@ public static Operand mean(Ops tf, Operand x) { } /** - * Calculate the mean of the operand, alongside the specified axis with keepDims is - * false + * Calculate the mean of the operand, alongside the specified axis with {@code keepDims} is + * {@code false} * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -249,12 +725,12 @@ public static Operand mean( * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean - * @param keepDims Indicates whether to keep the dimensions or not. If keepdims is - * false, the rank of the tensor is reduced by 1 for each entry in axes - * . If keepdims is true, the reduced dimensions are retained + * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is + * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes + * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained * with length 1. * @param the type of the operand - * @return the mean of elements of x. + * @return the mean of elements of {@code x}. */ public static Operand mean(Ops tf, Operand x, boolean keepDims) { return mean(tf, x, null, keepDims); @@ -266,12 +742,12 @@ public static Operand mean(Ops tf, Operand x, boolean * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean * @param axes Axes to compute the mean. - * @param keepDims Indicates whether to keep the dimensions or not. If keepdims is - * false, the rank of the tensor is reduced by 1 for each entry in axes - * . If keepdims is true, the reduced dimensions are retained + * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is + * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes + * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained * with length 1. * @param the data type of the Operand - * @return the mean of elements of x. + * @return the mean of elements of {@code x}. */ public static Operand mean( Ops tf, Operand x, Operand axes, boolean keepDims) { @@ -281,9 +757,16 @@ public static Operand mean( return tf.math.mean(x, axes, Mean.keepDims(keepDims)); } + public static + LossTuple raggedAssertCompatibleAndGetFlatValues( + Ops tf, Operand labels, Operand predictions) { + // TODO handle ragged Tensors + Operand tLabels = cast(tf, labels, predictions.type()); + return new LossTuple<>(tLabels, predictions); + } + /** - * Calculate the mean of the operand, along all axes and keepDims is false - * + * Calculate the mean of the operand, along all axes and {@code keepDims} is false * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -294,8 +777,8 @@ public static Operand booleanMean(Ops tf, Operand x) { } /** - * Calculate the mean of the operand, alongside the specified axis with keepDims is - * false + * Calculate the mean of the operand, alongside the specified axis with {@code keepDims} is + * {@code false} * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -312,11 +795,11 @@ public static Operand booleanMean( * * @param tf the TensorFlow Ops * @param x the boolean Operand used to calculate the mean - * @param keepDims Indicates whether to keep the dimensions or not. If keepdims is - * false, the rank of the tensor is reduced by 1 for each entry in axes - * . If keepdims is true, the reduced dimensions are retained + * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is + * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes + * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained * with length 1. - * @return the mean of elements of x containing floating point numbers + * @return the mean of elements of {@code x} containing floating point numbers */ public static Operand booleanMean(Ops tf, Operand x, boolean keepDims) { return booleanMean(tf, x, null, keepDims); @@ -328,11 +811,11 @@ public static Operand booleanMean(Ops tf, Operand x, boolean ke * @param tf the TensorFlow Ops * @param x the boolean Operand used to calculate the mean * @param axes Axes to compute the mean. - * @param keepDims Indicates whether to keep the dimensions or not. If keepdims is - * false, the rank of the tensor is reduced by 1 for each entry in axes - * . If keepdims is true, the reduced dimensions are retained + * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is + * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes + * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained * with length 1. - * @return the mean of elements of x containing floating point numbers + * @return the mean of elements of {@code x} containing floating point numbers */ public static Operand booleanMean( Ops tf, Operand x, Operand axes, boolean keepDims) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java index 6583465da2e..47d7f8ab737 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.metrics.impl; import org.tensorflow.Operand; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; @@ -150,12 +151,13 @@ private static Operand hasValidNonscalarShape( private static Operand hasValidDims( Ops tf, Operand weightsShape, Operand valuesShape) { tf = tf.withSubScope("hasInvalidDims"); + FrameworkOps fops = FrameworkOps.create(tf); Operand valuesShape2d = tf.expandDims(valuesShape, tf.constant(-1)); Operand validDims = tf.concat(Arrays.asList(valuesShape2d, tf.onesLike(valuesShape2d)), tf.constant(1)); Operand weightsShape2d = tf.expandDims(weightsShape, tf.constant(-1)); - Operand invalidDims = SetsOps.difference(tf, weightsShape2d, validDims); + Operand invalidDims = fops.sets.difference(weightsShape2d, validDims); Operand numInvalidDims = tf.size(invalidDims, TInt32.class); return tf.math.equal(tf.constant(0), numInvalidDims); } From f1c63c049dcad74d7926f333f381f98a47416276 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:54:36 -0400 Subject: [PATCH 19/60] move nn.raw classes to nn in core, remove nn.raw --- .../src/gen/resources/ops.pb | Bin 1462296 -> 1462288 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb index 5472f5f883985bf6f4f266c9cc29668e59833462..fbcecceb5bd35e3681296ef57e58ae324733949a 100644 GIT binary patch delta 142 zcmbQSAacTj$c7fi7N!>F7M2#)7Pc1l7LFFq7OocV7M>Q~7QPn#7J)5-@?wmA(-p)7 zbD0*fOji^Wl;;#+lv3hS$jj54K2c0icKX?3A&Yhaal!2Z;zH-68Rt!ZP$V>sshw^5 n!y+M7exPCvE;XRy;DW@W;#9DfcAyp@76xLG?PrTcz3%`3&nztB delta 138 zcmbQRAacfn$c7fi7N!>F7M2#)7Pc1l7LFFq7OocV7M>Q~7QPn#7J)5-@?wmWrYndE z<}xi|nXV`%D9LKf{j;)2_G#D&gBGcKBbr$}fT kQ#af6yG25(JU|UPTv`wf?F);AfLIuaMYb<27R|o{05Q}nM*si- From 043654b8448fe97a626cbddf678c7befbba3756d Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 19:13:00 -0400 Subject: [PATCH 20/60] Update FrameworkOps.java --- .../src/main/java/org/tensorflow/framework/op/FrameworkOps.java | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index d9e3eec4b21..f182d9d7b80 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -62,6 +62,7 @@ private FrameworkOps(Ops coreOps) { sets = new SetOps(this); math = new MathOps(this); linalg = new LinalgOps(this); + } /** From 06c28df060a961f83a1ace310c5831010d5cd918 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Mon, 3 May 2021 10:05:28 -0400 Subject: [PATCH 21/60] Fix unusual regression error in confustion matrix. Needed to reduceAll on the AssertThats. This change is unrelated to this PR, but the bug showed up here. --- .../org/tensorflow/framework/op/MathOps.java | 300 +++++++++--------- 1 file changed, 151 insertions(+), 149 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 4c2210feb9c..8fda58806ca 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -56,11 +56,13 @@ import org.tensorflow.op.math.Square; import org.tensorflow.op.math.Sub; import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TBool; import org.tensorflow.types.TFloat16; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; import java.util.ArrayList; import java.util.Arrays; @@ -110,27 +112,27 @@ public Operand l2Normalize(Operand x, int[] axis) { * Computes the confusion matrix from predictions and labels. * *

The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid * labels for a given classification task. Both prediction and labels must be 1-D arrays of the * same shape in order for this function to work. * - *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + *

If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in * either predictions or labels. Class labels are expected to start at 0. For example, if - * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + *

If {@code weights} is not null, then each prediction contributes its corresponding weight to the * total value of the confusion matrix cell. * *

For example: * - *

+   * 
{@code
    *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
    *         [[0 0 0 0 0]
    *          [0 0 1 0 0]
    *          [0 0 1 0 0]
    *          [0 0 0 0 0]
    *          [0 0 0 0 1]]
-   * 
+ * }
* *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 * confusion matrix. @@ -152,27 +154,27 @@ public Operand confusionMatrix(Operand labels, Operand * Computes the confusion matrix from predictions and labels. * *

The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid * labels for a given classification task. Both prediction and labels must be 1-D arrays of the * same shape in order for this function to work. * - *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + *

If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in * either predictions or labels. Class labels are expected to start at 0. For example, if - * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + *

If {@code weights} is not null, then each prediction contributes its corresponding weight to the * total value of the confusion matrix cell. * *

For example: * - *

+   * 
{@code
    *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
    *         [[0 0 0 0 0]
    *          [0 0 1 0 0]
    *          [0 0 1 0 0]
    *          [0 0 0 0 0]
    *          [0 0 0 0 1]]
-   * 
+ * }
* *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 * confusion matrix. @@ -196,27 +198,27 @@ public Operand confusionMatrix( * Computes the confusion matrix from predictions and labels. * *

The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid * labels for a given classification task. Both prediction and labels must be 1-D arrays of the * same shape in order for this function to work. * - *

If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + *

If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in * either predictions or labels. Class labels are expected to start at 0. For example, if - * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

If `weights` is not `None`, then each prediction contributes its corresponding weight to the + *

If {@code weights} is not null, then each prediction contributes its corresponding weight to the * total value of the confusion matrix cell. * *

For example: * - *

+   * 
{@code
    *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
    *         [[0 0 0 0 0]
    *          [0 0 1 0 0]
    *          [0 0 1 0 0]
    *          [0 0 0 0 0]
    *          [0 0 0 0 1]]
-   * 
+ * }
* *

Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 * confusion matrix. @@ -277,19 +279,21 @@ public Operand confusionMatrix( one); } else { lNumClasses = Cast.create(lScope, numClasses, TInt64.class); + Operand less = Less.create(lScope, lLabels, lNumClasses); AssertThat labelsLess = AssertThat.create( lScope, - Less.create(lScope, lLabels, lNumClasses), + ReduceAll.create(scope, less, allAxes(less), ReduceAll.keepDims(false)), Collections.singletonList(Constant.scalarOf(lScope, "labels out of bounds"))); lLabels = Identity.create( lScope.withControlDependencies(Collections.singletonList(labelsLess)), lLabels); + less = Less.create(lScope, lPredictions, lNumClasses); AssertThat predictionsLess = AssertThat.create( lScope, - Less.create(lScope, lPredictions, lNumClasses), + ReduceAll.create(scope, less, allAxes(less), ReduceAll.keepDims(false)), Collections.singletonList(Constant.scalarOf(lScope, "predictions out of bounds"))); lPredictions = Identity.create( @@ -319,12 +323,12 @@ public Operand confusionMatrix( /** * Squeeze last dim if ranks differ from expected by exactly 1. * - * @param labels Label values, a Operand whose dimensions match predictions - * . - * @param predictions Predicted values, a Tensor of arbitrary dimensions. - * @param expectedRankDiff Expected result of rank(predictions) - rank(labels). + * @param labels Label values, a {@code Operand} whose dimensions match {@code predictions + * }. + * @param predictions Predicted values, a {@code Tensor} of arbitrary dimensions. + * @param expectedRankDiff Expected result of {@code rank(predictions) - rank(labels)}. * @param the data type for the labels, predictions and result - * @return labels and predictions, possibly with last dim squeezed. + * @return {@code labels} and {@code predictions}, possibly with last dim squeezed. */ public LossTuple removeSqueezableDimensions( Operand labels, Operand predictions, int expectedRankDiff) { @@ -372,10 +376,9 @@ public LossTuple removeSqueezableDimensions( * Creates an Operand that has all axes contained in the Operand's shape. * * @param op the Operand - * @param THe Data type for the Operand * @return an Operand that has all axes contained in the Operand's shape.. */ - public Operand allAxes(Operand op) { + public Operand allAxes(Operand op) { int rank = op.shape().numDimensions(); if (rank != Shape.UNKNOWN_SIZE) { int[] axes = new int[rank]; @@ -392,18 +395,18 @@ public Operand allAxes(Operand op) { /** * Transpose and reshape the input for contraction op. * - *

This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul` using - * `array_ops.transpose` and `array_ops.reshape`. The method takes a tensor and performs the + *

This method is helpful in reducing {@code math.tensordot} to {@code math_ops.matmul} using + * {@code array_ops.transpose} and {@code array_ops.reshape}. The method takes a tensor and performs the * correct transpose and reshape operation for a given set of indices. It returns the reshaped * tensor as well as a list of indices necessary to reshape the tensor again after matrix * multiplication. * * @param the type of Operand * @param a the Tensor - * @param axis unique indices specifying valid axes of `a`. + * @param axis unique indices specifying valid axes of {@code a}. * @param flipped whether to flip the dimensions or not * @return A tuple (reshapedA, freeDims, freeDimsStatic) where reshapedA is a reshaped to allow - * contraction via matmul, freeDims` is a TInt32 Operand, depending on whether the shape of a + * contraction via matmul, freeDims is a TInt32 Operand, depending on whether the shape of a * is fully specified, and freeDimsStatic is either a list of integers and null values, or * None, representing the inferred shape of the free dimensions */ @@ -703,50 +706,48 @@ private Operand[] tensordotAxes(Operand a, Operan * Tensor contraction of a and b along specified axes and outer product. *

* Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

- * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

- * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

- * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and {@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

- * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

* Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: - *

* cjklm = Σi aijk * blmi . *

- * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). - *

+ * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. * - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param axis sum over the last N axes of a and the - * first N axes of b in order. If `axes=0`, computes the outer - * product between `a` and `b`. + * first N axes of b in order. If {@code axis=0}, computes the outer + * product between {@code a} and {@code b}. * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @Endpoint(name = "tensordot") @@ -762,53 +763,53 @@ public Operand tensordot(Operand a, Operand b, in * Tensor contraction of a and b along specified axes and outer product. *

* Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

- * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

- * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

- * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and {@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

- * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

* Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: *

* cjklm = Σi aijk * blmi . *

- * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. *

* - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param axes If axes is a scalar, sum over the last N axes of a and the * first N axes of b in order. If axes is a list, the first and second row * contain the set of unique integers specifying axes along which the - * contraction is computed, for `a` and `b`, respectively. The number of - * axes for `a` and `b` must be equal. If `axes=0`, computes the outer - * product between `a` and `b`. + * contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. If {@code axis=0}, computes the outer + * product between {@code a} and {@code b}. * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @Endpoint(name = "tensordot") @@ -826,51 +827,51 @@ public Operand tensordot( * Tensor contraction of a and b along specified axes and outer product. *

* Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

- * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

- * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

- * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and{@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

- * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

* Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: *

* cjklm = Σi aijk * blmi . *

- * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. *

* - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param axes the first and second row * contain the set of unique integers specifying axes along which the - * contraction is computed, for `a` and `b`, respectively. The number of - * axes for `a` and `b` must be equal. I + * contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. I * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @Endpoint(name = "tensordot") @@ -887,51 +888,51 @@ public Operand tensordot(Operand a, Operand b, in * Tensor contraction of a and b along specified axes and outer product. *

* Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

- * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

- * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

- * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and{@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

- * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

* Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: *

* cjklm = Σi aijk * blmi . *

- * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. *

* - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param axes the first and second row * contain the set of unique integers specifying axes along which the - * contraction is computed, for `a` and `b`, respectively. The number of - * axes for `a` and `b` must be equal. I + * contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. I * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @Endpoint(name = "tensordot") @@ -948,49 +949,49 @@ public Operand tensordot(Operand a, Operand b, in * Tensor contraction of a and b along specified axes and outer product. *

* Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

- * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

- * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

- * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and{@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

- * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

* Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: *

* cjklm = Σi aijk * blmi . *

- * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. *

* - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param aAxis axes for the a Operand * @param bAxis axes for the b Operand * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @SuppressWarnings({"unchecked", "unused"}) @@ -1042,7 +1043,7 @@ public Operand tensordot( * Computes log(sum(exp(elements across dimensions of a tensor))). Reduces {@code input_tensor} * along the dimensions given in {@code axes}. * - *

Reduces `{@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} + *

Reduces {@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} * is true, the rank of the tensor is reduced by 1 for each of the entries in {@code axes}, which * must be unique. If {@code keepdims} is true, the reduced dimensions are retained with length 1. * If {@code axes} has no entries, all dimensions are reduced, and a tensor with a single element @@ -1052,8 +1053,9 @@ public Operand tensordot( * * @param input The tensor to reduce. * @param axes The dimensions to reduce. If null, reduces all dimensions. Must be in the range - * {@link [-rank(input_tensor), rank(input_tensor)]}. + * {@code [-rank(input_tensor), rank(input_tensor)]}. * @param keepDims If true, retains reduced dimensions with length 1. + * @param the data type for the input and the result * @return The reduced tensor. */ @Endpoint(name = "reduceLogSumExp") From 8f33d21c2a79fa554138cface5d771905ff597e8 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Mon, 3 May 2021 10:05:53 -0400 Subject: [PATCH 22/60] javadoc fixes --- .../tensorflow/framework/op/LinalgOps.java | 94 +++++++++---------- 1 file changed, 46 insertions(+), 48 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java index eb069a2db22..931f7f851c2 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java @@ -45,15 +45,15 @@ public class LinalgOps { } /** - * Multiplies matrix a by matrix b, producing a * b - * . + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b + * }. * - *

The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + *

The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions * specify matching batch size. * - *

Both matrices must be of the same type. The supported types are: TFloat16, - * TFloat32, TFloat64, TInt32. + *

Both matrices must be of the same type. The supported types are: {@code TFloat16}, + * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. * *

Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. @@ -80,15 +80,15 @@ public class LinalgOps { * *

Note: This is matrix product, not element-wise product. * - * @param a an Operand of of type TFloat16, TFloat32, TFloat64 - * , TInt32. with a rank > 1 - * @param b an Operand with same type and rank as a. + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 + * }, {@code TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. * @param the data type of the Operands - * @return A Operand of the same type as a and b where each inner-most - * matrix is the product of the corresponding matrices in a and b. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most + * matrix is the product of the corresponding matrices in {@code a} and {@code b}. * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If transposeA and adjointA - * , or transposeB and adjointB are both set to `true`. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} + * , or {@code transposeB} and {@code adjointB} are both set to `true`. */ @Endpoint(name = "matmul") public Operand matmul(Operand a, Operand b) { @@ -96,21 +96,19 @@ public Operand matmul(Operand a, Operand b) { } /** - * Multiplies matrix a by matrix b, producing a * b - * . + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b + * }. * - *

The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + *

The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions * specify matching batch size. * - *

Both matrices must be of the same type. The supported types are: TFloat16, - * TFloat32, TFloat64, TInt32. + *

Both matrices must be of the same type. The supported types are: {@code TFloat16}, + * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. * *

Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. * - *

- * *

Note: This is matrix product, not element-wise product. * *

A simple 2-D tensor matrix multiplication: @@ -133,17 +131,17 @@ public Operand matmul(Operand a, Operand b) { * * }

* - * @param a an Operand of of type TFloat16, TFloat32, TFloat64 - * , TInt32. with a rank > 1 - * @param b an Operand with same type and rank as a. - * @param transposeA If `true`, a is transposed before multiplication. - * @param transposeB If `True`, b is transposed before multiplication + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 + * }, {@code TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. + * @param transposeA If true, {@code a} is transposed before multiplication. + * @param transposeB If true, {@code b} is transposed before multiplication * @param the data type of the Operands - * @return A Operand of the same type as a and b where each inner-most - * matrix is the product of the corresponding matrices in a and b. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most + * matrix is the product of the corresponding matrices in {@code a} and {@code b}. * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If transposeA and adjointA - * , or transposeB and adjointB are both set to `true`. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} + * , or {@code transposeB} and {@code adjointB} are both set to `true`. */ @Endpoint(name = "matmul") public Operand matmul( @@ -152,15 +150,15 @@ public Operand matmul( } /** - * Multiplies matrix a by matrix b, producing a * b - * . + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b + * }. * - *

The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + *

The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions * specify matching batch size. * - *

Both matrices must be of the same type. The supported types are: TFloat16, - * TFloat32, TFloat64, TInt32. + *

Both matrices must be of the same type. The supported types are: {@code TFloat16}, + * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. * *

Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. @@ -187,25 +185,25 @@ public Operand matmul( * * } * - * @param a an Operand of of type TFloat16, TFloat32, TFloat64 - * , TInt32. with a rank > 1 - * @param b an Operand with same type and rank as a. - * @param transposeA If true, a is transposed before multiplication. - * @param transposeB If True, b is transposed before multiplication - * @param adjointA If true, a is conjugated and transposed before multiplication. - * @param adjointB If true, b is conjugated and transposed before multiplication. - * @param aIsSparse If true, a is treated as a sparse matrix. Notice, this does + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 + * }, {@code TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. + * @param transposeA If true, {@code a} is transposed before multiplication. + * @param transposeB If True, {@code b} is transposed before multiplication + * @param adjointA If true, {@code a} is conjugated and transposed before multiplication. + * @param adjointB If true, {@code b} is conjugated and transposed before multiplication. + * @param aIsSparse If true, {@code a} is treated as a sparse matrix. Notice, this does * not support {@link SparseTensor}, it just makes optimizations that assume most values - * in a are zero. - * @param bIsSparse If true, b is treated as a sparse matrix. Notice, this does + * in {@code a} are zero. + * @param bIsSparse If true, {@code b} is treated as a sparse matrix. Notice, this does * not support {@link SparseTensor}, it just makes optimizations that assume most values - * in b are zero. + * in {@code b} are zero. * @param the data type of the Operands - * @return A Operand of the same type as a and b where each inner-most - * matrix is the product of the corresponding matrices in a and b. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most + * matrix is the product of the corresponding matrices in {@code a} and {@code b}. * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If transposeA and adjointA - * , or transposeB and adjointB are both set to `true`. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} + * , or {@code transposeB} and {@code adjointB} are both set to `true`. */ @SuppressWarnings("unchecked") @Endpoint(name = "matmul") From a24b8cae2481b4ed75d4444b344c385bcf1b2a15 Mon Sep 17 00:00:00 2001 From: Adam Pocock Date: Mon, 3 May 2021 23:06:55 -0400 Subject: [PATCH 23/60] Setting all the optimizers to have useLocking = True (#310) * Setting all the optimizers to have useLocking = True, like Keras. Adding a determinism test that's currently failing. * More work on the GradientDescentTest. * Tidying up the test. --- .../framework/optimizers/AdaDelta.java | 4 +- .../framework/optimizers/AdaGrad.java | 6 +- .../framework/optimizers/AdaGradDA.java | 4 +- .../tensorflow/framework/optimizers/Adam.java | 4 +- .../framework/optimizers/Adamax.java | 3 +- .../framework/optimizers/GradientDescent.java | 6 +- .../framework/optimizers/Momentum.java | 3 +- .../framework/optimizers/RMSProp.java | 8 +- .../optimizers/GradientDescentTest.java | 160 ++++++++++++++++++ 9 files changed, 189 insertions(+), 9 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaDelta.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaDelta.java index aadbfeea54b..e5bab9228b4 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaDelta.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaDelta.java @@ -20,6 +20,7 @@ import org.tensorflow.Output; import org.tensorflow.op.Op; import org.tensorflow.op.core.Variable; +import org.tensorflow.op.train.ApplyAdadelta; import org.tensorflow.types.family.TType; import java.util.List; @@ -160,7 +161,8 @@ protected Op applyDense(Output gradient, Output variable tf.dtypes.cast(tf.constant(learningRate), gradient.type()), tf.dtypes.cast(tf.constant(rho), gradient.type()), tf.dtypes.cast(tf.constant(epsilon), gradient.type()), - gradient); + gradient, + ApplyAdadelta.useLocking(true)); } /** {@inheritDoc} */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaGrad.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaGrad.java index 2dd05ef31b3..66a170efcc2 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaGrad.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaGrad.java @@ -19,6 +19,7 @@ import org.tensorflow.Operand; import org.tensorflow.Output; import org.tensorflow.op.Op; +import org.tensorflow.op.train.ApplyAdagrad; import org.tensorflow.op.core.Variable; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ public class AdaGrad extends Optimizer { public static final float LEARNING_RATE_DEFAULT = 0.001f; public static final float INITIAL_ACCUMULATOR_DEFAULT = 0.01f; + private static final ApplyAdagrad.Options[] opts = new ApplyAdagrad.Options[]{ + ApplyAdagrad.updateSlots(true), ApplyAdagrad.useLocking(true)}; + private final float learningRate; private final float initialAccumulatorValue; @@ -140,7 +144,7 @@ private void createAdaGradSlot(Output v) { protected Op applyDense(Output gradient, Output variable) { Variable slot = getSlot(variable, ACCUMULATOR).get(); return tf.train.applyAdagrad( - variable, slot, tf.dtypes.cast(tf.constant(learningRate), gradient.type()), gradient); + variable, slot, tf.dtypes.cast(tf.constant(learningRate), gradient.type()), gradient, opts); } /** {@inheritDoc} */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaGradDA.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaGradDA.java index 7114c33339f..64473b00f69 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaGradDA.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/AdaGradDA.java @@ -22,6 +22,7 @@ import org.tensorflow.op.Op; import org.tensorflow.op.core.Assign; import org.tensorflow.op.core.Variable; +import org.tensorflow.op.train.ApplyAdagradDa; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -219,7 +220,8 @@ protected Op applyDense(Output gradient, Output variable tf.dtypes.cast(tf.constant(learningRate), gradient.type()), tf.dtypes.cast(tf.constant(l1Strength), gradient.type()), tf.dtypes.cast(tf.constant(l2Strength), gradient.type()), - globalStep); + globalStep, + ApplyAdagradDa.useLocking(true)); } /** diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Adam.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Adam.java index 72598d12543..ce581e41397 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Adam.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Adam.java @@ -26,6 +26,7 @@ import org.tensorflow.op.core.Assign; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.Variable; +import org.tensorflow.op.train.ApplyAdam; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TType; @@ -237,7 +238,8 @@ protected Op applyDense(Output gradient, Output variable tf.dtypes.cast(betaOneConst, gradient.type()), tf.dtypes.cast(betaTwoConst, gradient.type()), tf.dtypes.cast(epsilonConst, gradient.type()), - gradient); + gradient, + ApplyAdam.useLocking(true)); } /** diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Adamax.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Adamax.java index 0ecc1ac1451..70b1497c2d8 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Adamax.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Adamax.java @@ -170,7 +170,8 @@ protected Op applyDense(Output gradient, Output variable tf.dtypes.cast(betaOneConst, gradient.type()), tf.dtypes.cast(betaTwoConst, gradient.type()), tf.dtypes.cast(epsilonConst, gradient.type()), - gradient); + gradient, + ApplyAdaMax.useLocking(true)); } /** {@inheritDoc} */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/GradientDescent.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/GradientDescent.java index a373b2e5b55..7e2ec9593ed 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/GradientDescent.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/GradientDescent.java @@ -18,6 +18,7 @@ import org.tensorflow.Graph; import org.tensorflow.Output; import org.tensorflow.op.Op; +import org.tensorflow.op.train.ApplyGradientDescent; import org.tensorflow.types.family.TType; /** @@ -66,7 +67,10 @@ public GradientDescent(Graph graph, String name, float learningRate) { @Override protected Op applyDense(Output gradient, Output variable) { return tf.train.applyGradientDescent( - variable, tf.dtypes.cast(tf.constant(learningRate), gradient.type()), gradient); + variable, + tf.dtypes.cast(tf.constant(learningRate), gradient.type()), + gradient, + ApplyGradientDescent.useLocking(true)); } /** {@inheritDoc} */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Momentum.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Momentum.java index f6640409d60..ca53bd0c7e8 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Momentum.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Momentum.java @@ -139,7 +139,8 @@ protected Op applyDense(Output gradient, Output variable tf.dtypes.cast(tf.constant(learningRate), gradient.type()), gradient, tf.dtypes.cast(tf.constant(momentum), gradient.type()), - ApplyMomentum.useNesterov(useNesterov)); + ApplyMomentum.useNesterov(useNesterov), + ApplyMomentum.useLocking(true)); } /** {@inheritDoc} */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/RMSProp.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/RMSProp.java index e86e64971a4..79ced52dc08 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/RMSProp.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/RMSProp.java @@ -20,6 +20,8 @@ import org.tensorflow.Output; import org.tensorflow.op.Op; import org.tensorflow.op.core.Variable; +import org.tensorflow.op.train.ApplyCenteredRmsProp; +import org.tensorflow.op.train.ApplyRmsProp; import org.tensorflow.types.family.TType; import java.util.List; @@ -202,7 +204,8 @@ protected Op applyDense(Output gradient, Output variable tf.dtypes.cast(tf.constant(decay), gradient.type()), tf.dtypes.cast(tf.constant(momentum), gradient.type()), tf.dtypes.cast(tf.constant(epsilon), gradient.type()), - gradient); + gradient, + ApplyCenteredRmsProp.useLocking(true)); } return tf.train.applyRmsProp( variable, @@ -212,7 +215,8 @@ protected Op applyDense(Output gradient, Output variable tf.dtypes.cast(tf.constant(decay), gradient.type()), tf.dtypes.cast(tf.constant(momentum), gradient.type()), tf.dtypes.cast(tf.constant(epsilon), gradient.type()), - gradient); + gradient, + ApplyRmsProp.useLocking(true)); } /** {@inheritDoc} */ diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java index aefcc537979..d6786b71972 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java @@ -2,19 +2,34 @@ import org.junit.jupiter.api.*; import org.tensorflow.Graph; +import org.tensorflow.Session; +import org.tensorflow.Tensor; +import org.tensorflow.framework.initializers.Glorot; +import org.tensorflow.framework.initializers.VarianceScaling; import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.ndarray.FloatNdArray; import org.tensorflow.ndarray.Shape; +import org.tensorflow.ndarray.buffer.DataBuffers; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; import org.tensorflow.op.core.Assign; import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Init; +import org.tensorflow.op.core.Placeholder; import org.tensorflow.op.core.Variable; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.Mean; +import org.tensorflow.op.nn.Relu; +import org.tensorflow.proto.framework.ConfigProto; +import org.tensorflow.proto.framework.GraphDef; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TType; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; /** Test cases for GradientDescent Optimizer */ @@ -97,4 +112,149 @@ public void testBasic() { session.evaluate(expectedVar1, var1); } } + + // This test fails due to incorrect gradients being generated some of the time, when + // using an identical graph on identical data. It should not, but it seems to be a + // problem in TF-core. + @Disabled + @Test + public void testDeterminism() { + ConfigProto config = + ConfigProto.newBuilder() + .setIntraOpParallelismThreads(1) + .setInterOpParallelismThreads(1) + .build(); + + GraphDef def; + String initName; + String trainName; + String lossName; + + String fcWeightName, fcBiasName, outputWeightName, outputBiasName; + + try (Graph g = new Graph()) { + Ops tf = Ops.create(g); + + Glorot initializer = + new Glorot<>(tf, VarianceScaling.Distribution.TRUNCATED_NORMAL, 1L); + // Inputs + Placeholder input = + tf.withName("input").placeholder(TFloat32.class, Placeholder.shape(Shape.of(-1, 20))); + + // Fully connected layer + Variable fcWeights = + tf.variable(initializer.call(tf.array(20L, 200L), TFloat32.class)); + fcWeightName = fcWeights.op().name(); + Variable fcBiases = tf.variable(tf.fill(tf.array(200), tf.constant(0.1f))); + fcBiasName = fcBiases.op().name(); + Relu relu = tf.nn.relu(tf.math.add(tf.linalg.matMul(input, fcWeights), fcBiases)); + + // Output layer + Variable outputWeights = + tf.variable(initializer.call(tf.array(200L, 2L), TFloat32.class)); + outputWeightName = outputWeights.op().name(); + Variable outputBiases = tf.variable(tf.fill(tf.array(2L), tf.constant(0.1f))); + outputBiasName = outputBiases.op().name(); + Add output = tf.math.add(tf.linalg.matMul(relu, outputWeights), outputBiases); + + // Loss + Placeholder placeholder = + tf.withName("output").placeholder(TFloat32.class, Placeholder.shape(Shape.of(-1, 2))); + Mean loss = + tf.math.mean( + tf.nn.raw.softmaxCrossEntropyWithLogits(output, placeholder).loss(), tf.constant(0)); + lossName = loss.op().name(); + + GradientDescent gd = new GradientDescent(g, 10.0f); + Op trainingOp = gd.minimize(loss); + trainName = trainingOp.op().name(); + + // Create the init op + Init init = tf.init(); + initName = init.op().name(); + + def = g.toGraphDef(); + } + + float[] data = + new float[] { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, -8.0f, -9.0f, 10.0f, 11.0f, 12.0f, 13.0f, + -14.0f, -15.0f, 0.16f, 0.17f, 0.18f, 1.9f, 0.2f + }; + TFloat32 dataTensor = TFloat32.tensorOf(Shape.of(1, 20), DataBuffers.of(data)); + float[] target = new float[] {0.2f, 0.8f}; + TFloat32 targetTensor = TFloat32.tensorOf(Shape.of(1, 2), DataBuffers.of(target)); + + int numRuns = 20; + List> initialized = new ArrayList<>(numRuns); + List> trained = new ArrayList<>(numRuns); + float[] initialLoss = new float[numRuns]; + float[] postTrainingLoss = new float[numRuns]; + + for (int i = 0; i < numRuns; i++) { + try (Graph g = new Graph(); + Session s = new Session(g, config)) { + g.importGraphDef(def); + s.run(initName); + + initialized.add( + s.runner() + .fetch(fcWeightName) + .fetch(fcBiasName) + .fetch(outputWeightName) + .fetch(outputBiasName) + .run()); + + TFloat32 lossVal = (TFloat32) s.runner() + .addTarget(trainName) + .feed("input", dataTensor) + .feed("output", targetTensor) + .fetch(lossName) + .run().get(0); + initialLoss[i] = lossVal.getFloat(); + lossVal.close(); + + trained.add( + s.runner() + .fetch(fcWeightName) + .fetch(fcBiasName) + .fetch(outputWeightName) + .fetch(outputBiasName) + .run()); + + lossVal = (TFloat32) s.runner() + .addTarget(trainName) + .feed("input", dataTensor) + .feed("output", targetTensor) + .fetch(lossName) + .run().get(0); + postTrainingLoss[i] = lossVal.getFloat(); + lossVal.close(); + } + } + + for (int i = 1; i < numRuns; i++) { + assertEquals(initialLoss[0], initialLoss[i]); + assertEquals(postTrainingLoss[0], postTrainingLoss[i]); + // Because the weights are references not copies. + assertEquals(initialized.get(i), trained.get(i)); + assertEquals( + initialized.get(0), + initialized.get(i), + "Variables not initialized identically (0," + i + ")"); + assertEquals( + trained.get(0), trained.get(i), "Variables not trained identically (0," + i + ")"); + } + + for (List curInit : initialized) { + for (Tensor t : curInit) { + t.close(); + } + } + for (List curTrained : trained) { + for (Tensor t : curTrained) { + t.close(); + } + } + } } From 94f5b151b17d76ebf286efb13869b34758468719 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Mon, 17 May 2021 16:20:19 -0400 Subject: [PATCH 24/60] Load TF library before computing TString size (#322) --- .../java/org/tensorflow/EagerSession.java | 7 ++++- .../src/main/java/org/tensorflow/Graph.java | 7 ++++- .../main/java/org/tensorflow/RawTensor.java | 7 ++++- .../java/org/tensorflow/SavedModelBundle.java | 7 ++++- .../src/main/java/org/tensorflow/Server.java | 7 ++++- .../main/java/org/tensorflow/TensorFlow.java | 7 ++--- .../buffer/ByteSequenceTensorBuffer.java | 10 +++++++ .../java/org/tensorflow/WrongEnvTest.java | 2 +- .../tensorflow/op/core/BooleanMaskTest.java | 29 ++++++++++--------- .../op/core/BooleanMaskUpdateTest.java | 3 +- .../org/tensorflow/op/core/IndexingTest.java | 2 +- .../org/tensorflow/types/TStringTest.java | 20 +++++++------ 12 files changed, 72 insertions(+), 36 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java index 8e7465388a8..dad842f7038 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java @@ -404,6 +404,11 @@ private static void delete(TFE_Context handle) { } static { - TensorFlow.init(); + try { + // Ensure that TensorFlow native library and classes are ready to be used + Class.forName("org.tensorflow.TensorFlow"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java index ff805c73b53..7f659b262a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java @@ -1070,6 +1070,11 @@ private static SaverDef addVariableSaver(Graph graph) { } static { - TensorFlow.init(); + try { + // Ensure that TensorFlow native library and classes are ready to be used + Class.forName("org.tensorflow.TensorFlow"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java index c332fd7f1d1..2a4a21face3 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java @@ -228,6 +228,11 @@ private static long[] shape(TF_Tensor handle) { private ByteDataBuffer buffer = null; static { - TensorFlow.init(); + try { + // Ensure that TensorFlow native library and classes are ready to be used + Class.forName("org.tensorflow.TensorFlow"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java index 0974cc94a24..6992e5eee37 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java @@ -435,6 +435,11 @@ private static void validateTags(String[] tags) { } static { - TensorFlow.init(); + try { + // Ensure that TensorFlow native library and classes are ready to be used + Class.forName("org.tensorflow.TensorFlow"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Server.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Server.java index e3b685889e1..2488a93c929 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Server.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Server.java @@ -178,6 +178,11 @@ private static void delete(TF_Server nativeHandle) { private int numJoining; static { - TensorFlow.init(); + try { + // Ensure that TensorFlow native library and classes are ready to be used + Class.forName("org.tensorflow.TensorFlow"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java index 338101c962b..de481d256a3 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java @@ -32,6 +32,7 @@ /** Static utility methods describing the TensorFlow runtime. */ public final class TensorFlow { + /** Returns the version of the underlying TensorFlow runtime. */ public static String version() { return TF_Version().getString(); @@ -106,7 +107,7 @@ private static OpList libraryOpList(TF_Library handle) { private TensorFlow() {} /** Load the TensorFlow runtime C library. */ - static void init() { + static { try { NativeLibrary.load(); } catch (Exception e) { @@ -121,8 +122,4 @@ static void init() { throw e; } } - - static { - init(); - } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/buffer/ByteSequenceTensorBuffer.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/buffer/ByteSequenceTensorBuffer.java index acaeaedbc11..48ee4f72bee 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/buffer/ByteSequenceTensorBuffer.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/buffer/ByteSequenceTensorBuffer.java @@ -28,6 +28,7 @@ import org.bytedeco.javacpp.Loader; import org.bytedeco.javacpp.Pointer; import org.bytedeco.javacpp.PointerScope; +import org.tensorflow.TensorFlow; import org.tensorflow.ndarray.buffer.DataBuffer; import org.tensorflow.internal.c_api.TF_TString; import org.tensorflow.ndarray.impl.buffer.AbstractDataBuffer; @@ -132,4 +133,13 @@ void writeNext(byte[] bytes) { } private final TF_TString data; + + static { + try { + // Ensure that TensorFlow native library and classes are ready to be used + Class.forName("org.tensorflow.TensorFlow"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/WrongEnvTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/WrongEnvTest.java index b2fbc1e794a..18bdeb40e83 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/WrongEnvTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/WrongEnvTest.java @@ -20,7 +20,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.tensorflow.op.Ops; import org.tensorflow.types.TInt32; diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java index a4d9293ccf8..7c5210c0f2d 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java @@ -18,7 +18,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Operand; import org.tensorflow.Session; @@ -29,6 +29,7 @@ import org.tensorflow.types.TInt32; public class BooleanMaskTest { + @Test public void testBooleanMask(){ try (Graph g = new Graph(); @@ -43,24 +44,24 @@ public void testBooleanMask(){ Operand output1 = BooleanMask.create(scope, input, mask); Operand output2 = BooleanMask.create(scope, input2, mask, BooleanMask.axis(1)); - try (TFloat32 result = (TFloat32) sess.runner().fetch(output1).run().get(0)) { + try (TInt32 result = (TInt32) sess.runner().fetch(output1).run().get(0)) { // expected shape from Python tensorflow assertEquals(Shape.of(5), result.shape()); - assertEquals(0, result.getFloat(0)); - assertEquals(1, result.getFloat(1)); - assertEquals(4, result.getFloat(2)); - assertEquals(5, result.getFloat(3)); - assertEquals(6, result.getFloat(4)); + assertEquals(0, result.getInt(0)); + assertEquals(1, result.getInt(1)); + assertEquals(4, result.getInt(2)); + assertEquals(5, result.getInt(3)); + assertEquals(6, result.getInt(4)); } - try (TFloat32 result = (TFloat32) sess.runner().fetch(output2).run().get(0)) { + try (TInt32 result = (TInt32) sess.runner().fetch(output2).run().get(0)) { // expected shape from Python tensorflow - assertEquals(Shape.of(5), result.shape()); - assertEquals(0, result.getFloat(0)); - assertEquals(1, result.getFloat(1)); - assertEquals(4, result.getFloat(2)); - assertEquals(5, result.getFloat(3)); - assertEquals(6, result.getFloat(4)); + assertEquals(Shape.of(1, 5), result.shape()); + assertEquals(0, result.getInt(0, 0)); + assertEquals(1, result.getInt(0, 1)); + assertEquals(4, result.getInt(0, 2)); + assertEquals(5, result.getInt(0, 3)); + assertEquals(6, result.getInt(0, 4)); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java index ab852bbffb2..c2b514bfdb6 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java @@ -19,7 +19,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.List; -import org.junit.Test; + +import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Operand; import org.tensorflow.Session; diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IndexingTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IndexingTest.java index 6e86573b7cf..9a66d2445d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IndexingTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IndexingTest.java @@ -17,7 +17,7 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Session; import org.tensorflow.ndarray.Shape; diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TStringTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TStringTest.java index 7efeb93f0d8..c8182ec8d57 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TStringTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TStringTest.java @@ -23,7 +23,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.nio.charset.StandardCharsets; + import org.bytedeco.javacpp.Pointer; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.tensorflow.ndarray.NdArray; import org.tensorflow.ndarray.NdArrays; @@ -39,16 +41,15 @@ public void createScalar() { assertEquals("Pretty vacant", tensor.getObject()); } - @Test - public void createrScalarLongerThan127() { - TString tensor = TString.scalarOf("Long String 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 !"); - assertNotNull(tensor); - assertEquals(Shape.scalar(), tensor.shape()); - assertEquals("Long String 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 !", tensor.getObject()); - } - + @Test + public void createrScalarLongerThan127() { + TString tensor = TString.scalarOf("Long String 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 !"); + assertNotNull(tensor); + assertEquals(Shape.scalar(), tensor.shape()); + assertEquals("Long String 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 !", tensor.getObject()); + } - @Test + @Test public void createVector() { TString tensor = TString.vectorOf("Pretty", "vacant"); assertNotNull(tensor); @@ -106,6 +107,7 @@ public void initializingTensorWithRawBytes() { } @Test + @Disabled // FIXME This test does not deterministically succeed, so skip it by default public void testNoLeaks() throws Exception { // warm up and try to get all JIT compilation done to stabilize memory usage... for (int i = 0; i < 1000; i++) { From 743475dfdaf2d42ed05d7f34394294a19f5641c3 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Wed, 19 May 2021 09:42:06 -0400 Subject: [PATCH 25/60] Update README.md --- tensorflow-framework/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow-framework/README.md b/tensorflow-framework/README.md index b70ee51ec65..57edf2b0180 100644 --- a/tensorflow-framework/README.md +++ b/tensorflow-framework/README.md @@ -1,4 +1,4 @@ -# Framework API +# TensorFlow Framework API This is the primary Java API for building and training neural networks with TensorFlow. This API deliberately mirrors the overall structure of Python Keras. However, it @@ -25,4 +25,4 @@ More specifically, the following goals drive API evolution: * Also, the framework API should support fine control over all aspects of modeling, training, and inference. Unlike with Python Keras, we want this to feel like staying in the same API rather than diving into a separate layer. But here again, if we are ever unable to reconcile this goal - with easy translation from Python Keras, we may split the framework API into two layers. \ No newline at end of file + with easy translation from Python Keras, we may split the framework API into two layers. From 3648a96aa567a7ea21d84934a7fde59fcea8fb74 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 21 May 2021 15:25:38 -0700 Subject: [PATCH 26/60] Fix sometimes generating Javadoc for scope param in Ops (#291) Signed-off-by: Ryan Nett --- .../annotations/org/tensorflow/op/NnOps.java | 3 - .../annotations/org/tensorflow/op/Ops.java | 555 ++++++++---------- .../org/tensorflow/op/ShapeOps.java | 32 - .../processor/operator/OperatorProcessor.java | 16 +- 4 files changed, 248 insertions(+), 358 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 8958b4fe2ff..acab988384a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -1835,7 +1835,6 @@ public Selu selu(Operand features) { * *

* - * @param scope The TensorFlow scope * @param labels the labels * @param logits the logits of type float32 or float64 * @param the type of labels and logits @@ -1897,7 +1896,6 @@ public Softmax softmax(Operand logits) { * disallow backpropagation into labels, pass label tensors through * tf.stopGradient before feeding it to this function. * - * @param scope current scope * @param labels Each vector along the class dimension should hold a valid probability * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] * , each row of labels[i] must be a valid probability distribution. @@ -2125,7 +2123,6 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo * , or TFloat64, and labels must have the dtype of TInt32 * or TInt64. * - * @param scope current scope * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r * is rank of labels and result) and the dataType is TInt32 * or TInt64. Each entry in labels must be an index in [0, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 733e7ca7051..4044838de87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -442,42 +442,38 @@ public Any any(Operand input, Operand axis, Any.Option } /** - * Creates a constant of {@code int} elements. + * Creates a constant of {@code String} elements, using the default UTF-8 charset. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return the {@code String} constant */ - public Constant array(int... data) { + public Constant array(String... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code String} elements, using the default UTF-8 charset. + * Creates a constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return the {@code String} constant + * @return a float constant */ - public Constant array(String... data) { + public Constant array(int... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code boolean} elements. + * Creates a constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a boolean constant + * @return a double constant */ - public Constant array(boolean... data) { + public Constant array(double... data) { return Constant.arrayOf(scope, data); } /** * Creates a constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a long constant */ @@ -486,42 +482,38 @@ public Constant array(long... data) { } /** - * Creates a constant of {@code float} elements. + * Creates a constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return a byte constant */ - public Constant array(float... data) { + public Constant array(byte... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code double} elements. + * Creates a constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a double constant + * @return a boolean constant */ - public Constant array(double... data) { + public Constant array(boolean... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code byte} elements. + * Creates a constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a byte constant + * @return a float constant */ - public Constant array(byte... data) { + public Constant array(float... data) { return Constant.arrayOf(scope, data); } /** * Creates a constant of {@code String} elements, using the given charset. * - * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. @@ -1006,7 +998,6 @@ public Bitcast bitcast(Operand input, Clas * In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape must match * the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape. * - * @param scope * @param tensor The tensor to mask. * @param mask The mask to apply. * @param options carries optional attributes values @@ -1163,184 +1154,222 @@ public Concat concat(Iterable> values, } /** - * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single {@code int} element. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code long} elements. - * @return a long constant + * @param data The value to put into the new constant. + * @return an integer constant */ - public Constant constant(LongNdArray data) { + public Constant constant(int data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-3 constant of {@code double} elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant + */ + public Constant constant(double[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code int} elements. + * Creates a rank-5 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a byte constant */ - public Constant constant(int[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(byte[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code int} elements. + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. + * + * @param data an n-dimensional array of {@code String} elements. + * @return a string constant + */ + public Constant constant(NdArray data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-4 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return an integer constant */ - public Constant constant(int[][][] data) { + public Constant constant(int[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code double} element. + * Creates a constant containing a single {@code byte} element. * - * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. - * @return a double constant + * @return a byte constant */ - public Constant constant(double data) { + public Constant constant(byte data) { return Constant.scalarOf(scope, data); } /** - * Creates a rank-5 constant of {@code long} elements. + * Creates a rank-2 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a long constant */ - public Constant constant(long[][][][][] data) { + public Constant constant(long[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code boolean} elements. + * Creates a rank-6 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a float constant */ - public Constant constant(boolean[][][][][] data) { + public Constant constant(float[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. + * Creates a rank-6 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code int} elements. - * @return an integer constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public Constant constant(IntNdArray data) { + public Constant constant(boolean[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. + * Creates a rank-4 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code double} elements. - * @return a double constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public Constant constant(DoubleNdArray data) { + public Constant constant(boolean[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code int} elements. + * Creates a rank-3 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a float constant */ - public Constant constant(int[][][][] data) { + public Constant constant(float[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code float} elements. + * Creates a rank-5 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a float constant */ - public Constant constant(float[][][][][][] data) { + public Constant constant(float[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code byte} element. + * Creates a rank-5 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a byte constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public Constant constant(byte data) { - return Constant.scalarOf(scope, data); + public Constant constant(long[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code boolean} elements. + * Creates a rank-1 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return an integer constant */ - public Constant constant(boolean[][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(int[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a rank-4 constant of {@code float} elements. + * Creates a rank-2 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a float constant */ - public Constant constant(float[][][][] data) { + public Constant constant(float[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code long} elements. + * Creates a rank-2 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return a boolean constant */ - public Constant constant(long[][] data) { + public Constant constant(boolean[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code byte} elements. + * Creates a constant containing a single {@code double} element. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a byte constant + * @param data The value to put into the new constant. + * @return a double constant */ - public Constant constant(byte[][][][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(double data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code boolean} element. + * + * @param data The value to put into the new constant. + * @return a boolean constant + */ + public Constant constant(boolean data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code long} element. + * + * @param data The value to put into the new constant. + * @return a long constant + */ + public Constant constant(long data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a {@code String} constant using the default, UTF-8 encoding. + * + * @param data The string to put into the new constant. + * @return a string constant + */ + public Constant constant(String data) { + return Constant.scalarOf(scope, data); } /** * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of {@code boolean} elements. * @return a boolean constant */ @@ -1349,68 +1378,62 @@ public Constant constant(BooleanNdArray data) { } /** - * Creates a rank-2 constant of {@code float} elements. + * Creates a rank-1 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return a double constant */ - public Constant constant(float[][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(double[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. + * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code byte} elements. - * @return a byte constant + * @param data an n-dimensional array of {@code long} elements. + * @return a long constant */ - public Constant constant(ByteNdArray data) { + public Constant constant(LongNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code byte} elements. + * Creates a rank-1 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a byte constant + * @return a float constant */ - public Constant constant(byte[][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(float[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a rank-5 constant of {@code double} elements. + * Creates a rank-3 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return a long constant */ - public Constant constant(double[][][][][] data) { + public Constant constant(long[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code float} elements. + * Creates a rank-3 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return a boolean constant */ - public Constant constant(float[][][] data) { + public Constant constant(boolean[][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-1 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a byte constant @@ -1420,103 +1443,83 @@ public Constant constant(byte[] data) { } /** - * Creates a rank-1 constant of {@code float} elements. + * Creates a rank-3 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return an integer constant */ - public Constant constant(float[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(int[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code boolean} elements. + * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a boolean constant + * @param data an n-dimensional array of {@code int} elements. + * @return an integer constant */ - public Constant constant(boolean[][] data) { + public Constant constant(IntNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the default UTF-8 encoding. + * Creates a rank-1 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code String} elements. - * @return a string constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public Constant constant(NdArray data) { - return Constant.tensorOf(scope, data); + public Constant constant(long[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a {@code String} constant using the default, UTF-8 encoding. + * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant + * @param data an n-dimensional array of {@code float} elements. + * @return a float constant */ - public Constant constant(String data) { - return Constant.scalarOf(scope, data); + public Constant constant(FloatNdArray data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code double} elements. + * Creates a rank-5 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return an integer constant */ - public Constant constant(double[][][][] data) { + public Constant constant(int[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code double} elements. + * Creates a rank-5 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][] data) { + public Constant constant(double[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code int} element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return an integer constant - */ - public Constant constant(int data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a rank-4 constant of {@code byte} elements. + * Creates a rank-5 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a byte constant + * @return a boolean constant */ - public Constant constant(byte[][][][] data) { + public Constant constant(boolean[][][][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-6 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return an integer constant @@ -1526,55 +1529,29 @@ public Constant constant(int[][][][][][] data) { } /** - * Creates a constant containing a single {@code long} element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a long constant - */ - public Constant constant(long data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a constant containing a single {@code float} element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a float constant - */ - public Constant constant(float data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a rank-5 constant of {@code float} elements. + * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a float constant + * @param data an n-dimensional array of {@code double} elements. + * @return a double constant */ - public Constant constant(float[][][][][] data) { + public Constant constant(DoubleNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code double} elements. + * Creates a rank-6 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][][] data) { + public Constant constant(double[][][][][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-6 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a long constant @@ -1584,33 +1561,19 @@ public Constant constant(long[][][][][][] data) { } /** - * Creates a rank-4 constant of {@code long} elements. + * Creates a rank-2 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return an integer constant */ - public Constant constant(long[][][][] data) { + public Constant constant(int[][] data) { return Constant.tensorOf(scope, data); } - /** - * Creates a rank-1 constant of {@code long} elements. - * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a long constant - */ - public Constant constant(long[] data) { - return Constant.vectorOf(scope, data); - } - /** * Creates a rank-1 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a boolean constant @@ -1620,132 +1583,110 @@ public Constant constant(boolean[] data) { } /** - * Creates a rank-3 constant of {@code byte} elements. + * Creates a constant containing a single {@code float} element. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a byte constant + * @param data The value to put into the new constant. + * @return a float constant */ - public Constant constant(byte[][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(float data) { + return Constant.scalarOf(scope, data); } /** - * Creates a rank-6 constant of {@code byte} elements. + * Creates a rank-4 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a byte constant */ - public Constant constant(byte[][][][][][] data) { + public Constant constant(byte[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code int} elements. + * Creates a rank-4 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a float constant */ - public Constant constant(int[][] data) { + public Constant constant(float[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. + * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code float} elements. - * @return a float constant + * @param data an n-dimensional array of {@code byte} elements. + * @return a byte constant */ - public Constant constant(FloatNdArray data) { + public Constant constant(ByteNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code int} elements. + * Creates a rank-6 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a byte constant */ - public Constant constant(int[][][][][] data) { + public Constant constant(byte[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code double} elements. + * Creates a rank-4 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return a long constant */ - public Constant constant(double[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(long[][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code boolean} elements. + * Creates a rank-2 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a byte constant */ - public Constant constant(boolean[][][][][][] data) { + public Constant constant(byte[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code double} elements. + * Creates a rank-2 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][][][][][] data) { + public Constant constant(double[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code boolean} element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a boolean constant - */ - public Constant constant(boolean data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a rank-4 constant of {@code boolean} elements. + * Creates a rank-3 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a byte constant */ - public Constant constant(boolean[][][][] data) { + public Constant constant(byte[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code long} elements. + * Creates a rank-4 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return a double constant */ - public Constant constant(long[][][] data) { + public Constant constant(double[][][][] data) { return Constant.tensorOf(scope, data); } @@ -1753,7 +1694,6 @@ public Constant constant(long[][][] data) { * Creates a rank-1 constant of {@code long} elements representing the size of each dimensions of * the given shape. * - * @param scope is a scope used to add the underlying operation. * @param shape a shape * @return a long constant */ @@ -1761,10 +1701,21 @@ public Constant constant(Shape shape) { return Constant.tensorOf(scope, shape); } + /** + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the given encoding. + * + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of {@code String} elements. + * @return a string constant + */ + public Constant constant(Charset charset, NdArray data) { + return Constant.tensorOf(scope, charset, data); + } + /** * Creates a constant of {@code String} elements, using the given charset. * - * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. @@ -1777,7 +1728,6 @@ public Constant constant(Charset charset, String[] data) { /** * Creates a {@code String} constant using a specified encoding. * - * @param scope is a scope used to add the underlying operation. * @param charset The encoding from String to bytes. * @param data The string to put into the new constant. * @return a string constant @@ -1787,48 +1737,33 @@ public Constant constant(Charset charset, String data) { } /** - * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the given encoding. - * - * @param scope is a scope used to add the underlying operation. - * @param charset charset used to encode/decode string bytes. - * @param data an n-dimensional array of {@code String} elements. - * @return a string constant - */ - public Constant constant(Charset charset, NdArray data) { - return Constant.tensorOf(scope, charset, data); - } - - /** - * Create a {@link TFloat32} constant with data from the given buffer. + * Create a {@link TBool} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a float constant + * @return an boolean constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, FloatDataBuffer data) { + public Constant constant(Shape shape, BooleanDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** - * Create a {@link TBool} constant with data from the given buffer. + * Create a {@link TString} constant with data from the given buffer, using the default UTF-8 + * encoding. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an boolean constant + * @return a string constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, BooleanDataBuffer data) { + public Constant constant(Shape shape, DataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** * Create a {@link TUint8} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a byte constant @@ -1839,36 +1774,32 @@ public Constant constant(Shape shape, ByteDataBuffer data) { } /** - * Create a {@link TInt64} constant with data from the given buffer. + * Create a {@link TInt32} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a long constant + * @return an integer constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, LongDataBuffer data) { + public Constant constant(Shape shape, IntDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** - * Create a {@link TString} constant with data from the given buffer, using the default UTF-8 - * encoding. + * Create a {@link TInt64} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a string constant + * @return a long constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, DataBuffer data) { + public Constant constant(Shape shape, LongDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** * Create a {@link TFloat64} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a double constant @@ -1879,15 +1810,14 @@ public Constant constant(Shape shape, DoubleDataBuffer data) { } /** - * Create a {@link TInt32} constant with data from the given buffer. + * Create a {@link TFloat32} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an integer constant + * @return a float constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, IntDataBuffer data) { + public Constant constant(Shape shape, FloatDataBuffer data) { return Constant.tensorOf(scope, shape, data); } @@ -1908,7 +1838,6 @@ public Constant constant(Class type, Number number) { /** * Create a {@link TString} constant with data from the given buffer, using the given encoding. * - * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -1923,7 +1852,6 @@ public Constant constant(Charset charset, Shape shape, DataBuffer the tensor type - * @param scope is a scope used to add the underlying operation. * @param type the tensor type class * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -1942,7 +1870,6 @@ public Constant constant(Class type, Shape shape, ByteDa *

Note: this endpoint cannot be simply called {@code constant} since it will conflict with * other endpoints accepting an NdArray in parameter {e.g. {@link #tensorOf(Scope, FloatNdArray)}}. * - * @param scope is a scope used to add the underlying operation. * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` */ @@ -2690,7 +2617,6 @@ public GetSessionTensor getSessionTensor(Operand h /** * Adds gradients computation ops to the graph according to scope. * - * @param scope current graph scope * @param y outputs of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param options carries optional attributes values @@ -2936,7 +2862,6 @@ public ImmutableConst immutableConst(Class dtype, Shape * } * } * - * @param scope current scope * @return an op grouping all initializers added to the graph * @throws IllegalArgumentException if the execution environment in scope is not a graph */ @@ -2951,7 +2876,6 @@ public Init init() { * and executing an {@link org.tensorflow.op.core.Init#create(Scope) init} operation from a graph * session. This is a no-op if executed in an eager session. * - * @param scope * @param initializer * @see org.tensorflow.op.core.Init#create(Scope) init */ @@ -3655,7 +3579,6 @@ public OneHot oneHot(Operand indices, /** * Creates a one valued tensor given its type and shape. * - * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones @@ -6039,7 +5962,6 @@ public StopGradient stopGradient(Operand input) { * Requirements: * `0 != strides[i] for i in [0, m)` Only one ellipsis. * - * @param scope current scope * @param data type for {@code output()} output * @param indices The indices to slice. See {@link Indices}. * @return a new instance of StridedSlice @@ -6183,7 +6105,6 @@ public StridedSlice stridedSlice(Operand * the slice of `ref`. * * @param data type for {@code outputRef()} output - * @param scope current scope * @param ref the tensor to assign to. * @param value the value to assign. * @param indices The indices to slice. See {@link Indices}. @@ -7726,7 +7647,6 @@ public VarIsInitializedOp varIsInitializedOp(Operand resource) * Only supported on Graph sessions as the {@link org.tensorflow.op.core.Assign} op * does not work in an EagerSession. * - * @param scope current scope * @param init The op to use to initialise this variable. * @param options carries optional attributes values * @return a new instance of Variable @@ -7896,7 +7816,6 @@ public XlaSpmdShardToFullShape xlaSpmdShardToFullShape(Oper /** * Creates a zeroed tensor given its type and shape. * - * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor datatype * @return a constant tensor initialized with zeros diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java index ac5ec77a7fb..24a7cdca0e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java @@ -44,7 +44,6 @@ public final class ShapeOps { * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append * @return a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -58,7 +57,6 @@ public Operand append(Shape shape, long lastDimension) { * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append * @return a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -73,7 +71,6 @@ public Operand append(Shape shape, int lastDimension) { * operand representing a shape, followed by the dimensions of an operand representing a shape to * append. * - * @param scope current scope * @param shape the TensorFlow shape * @param shapeToAppend the other shape to append * @return a 1-dimensional operand that represents a new shape containing the dimensions of the @@ -88,7 +85,6 @@ public Operand append(Operand shape, Operand shapeT * Flatten the operand to 1 dimension. * * @param the type of operand - * @param scope current scope * @param operand the operand to flatten * @return the reshaped operand */ @@ -99,7 +95,6 @@ public Operand flatten(Operand operand) { /** * Flatten the shape to 1 dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @return the flattened shape */ @@ -112,7 +107,6 @@ public Operand flatten(Shape shape) { * * @param the type of operand * @param the shape datatype - * @param scope current scope * @param operand the operand to flatten * @param type the shape datatype * @return the reshaped operand @@ -126,7 +120,6 @@ public Operand flatten(Operand operan * Flatten the shape to 1 dimension. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the flattened shape @@ -138,7 +131,6 @@ public Operand flatten(Shape shape, Class type) { /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand containing the Shape's first dimension */ @@ -149,7 +141,6 @@ public Operand head(Shape shape) { /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -162,7 +153,6 @@ public Operand head(Shape shape, Class type) { /** * Get the number of dimensions of the shape object. * - * @param scope current scope * @param shape the shape * @return the number of dimensions */ @@ -174,7 +164,6 @@ public Operand numDimensions(Shape shape) { * Get the number of dimensions of the shape object. * * @param the shape datatype - * @param scope the curren scope * @param shape the shape * @param type the shape datatype * @return the number of dimensions @@ -187,7 +176,6 @@ public Operand numDimensions(Shape shape, Class typ * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend * @return a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -201,7 +189,6 @@ public Operand prepend(Shape shape, long firstDimension) { * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend * @return a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -216,7 +203,6 @@ public Operand prepend(Shape shape, int firstDimension) { * operand representing the shape to prepend, followed by the dimensions of an operand * representing a shape. * - * @param scope current scope * @param shape an operand containing the dimensions of a shape * @param shapeToPrepend an operand containing the dimensions of the shape to prepend * @return a 1-dimensional operand that represents a new shape containing the dimensions of an @@ -231,7 +217,6 @@ public Operand prepend(Operand shape, Operand shape * Reshapes the operand by reducing the shape to the specified axis. * * @param the type of Operand - * @param scope current scope * @param operand the operand * @param axis the axis * @return the reshaped operand @@ -243,7 +228,6 @@ public Operand reduceDims(Operand operand, Operand reduceDims(Shape shape, Operand axis) { * * @param the type of Operand * @param the shape datatype - * @param scope current scope * @param operand the operand * @param axis the axis * @param type the shape datatype @@ -272,7 +255,6 @@ public Operand reduceDims(Operand ope * Reduces the shape to the specified axis. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis * @param type the shape datatype @@ -285,7 +267,6 @@ public Operand reduceDims(Shape shape, Operand axis /** * Get the size represented by the TensorFlow shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return the size */ @@ -296,7 +277,6 @@ public Operand size(Shape shape) { /** * Get the size of the specified dimension for the shape of the tensor. * - * @param scope current scope * @param input the operand * @param dim the dimension * @return the size of the specified dimension @@ -308,7 +288,6 @@ public Operand size(Operand input, Operand /** * Get the size of the specified dimension in the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @return the size of the specified dimension @@ -321,7 +300,6 @@ public Operand size(Shape shape, Operand dim) { * Get the size represented by the TensorFlow shape. * * @param the type of the shape - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the size @@ -334,7 +312,6 @@ public Operand size(Shape shape, Class type) { * Get the size of the specified dimension for the shape of the tensor. * * @param the shape datatype - * @param scope current scope * @param input the operand * @param dim the dimension * @param type the shape datatype @@ -349,7 +326,6 @@ public Operand size(Operand input, Op * Get the size of the specified dimension in the shape. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @param type the shape datatype @@ -362,7 +338,6 @@ public Operand size(Shape shape, Operand dim, Class /** * Removes dimensions of size 1 from the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return the squeezed shape */ @@ -374,7 +349,6 @@ public Operand squeeze(Shape shape) { * Removes dimensions of size 1 from the shape. * * @param the shape datatype. - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @return the squeezed shape @@ -387,7 +361,6 @@ public Operand squeeze(Shape shape, Class type) { * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of the * Shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand that contains the dimension matching the last dimension of the * Shape @@ -400,7 +373,6 @@ public Operand tail(Shape shape) { * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * * the Shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -415,7 +387,6 @@ public Operand tail(Shape shape, Class type) { * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the @@ -429,7 +400,6 @@ public Operand take(Shape shape, Operand n) { * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. @@ -445,7 +415,6 @@ public Operand take(Shape shape, Operand n, Class Operand takeLast(Shape shape, Operand * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index afb4050c366..bea817e9011 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -363,18 +363,24 @@ private MethodSpec buildOpMethod( private String buildOpMethodJavadoc( TypeElement opClass, ExecutableElement endpointMethod, boolean copyClassDescription) { Javadoc methodJavadoc = parseJavadoc(endpointMethod); + + Javadoc javadoc; + if (!copyClassDescription) { - return methodJavadoc.toText(); + javadoc = new Javadoc(methodJavadoc.getDescription()); + } else { + javadoc = parseJavadoc(opClass); } - Javadoc classJavadoc = parseJavadoc(opClass); + // Copy all endpoint method tags to the description, except for the `scope` parameter which // will be inferred by the Ops class methodJavadoc.getBlockTags().forEach(t -> { - if (!t.getTagName().equals("param") || t.getName().map(s -> !s.equals("scope")).orElse(true)) { - classJavadoc.addBlockTag(t); + if (!(t.getTagName().equals("param") && t.getName().map(s -> s.equals("scope")).orElse(false))) { + javadoc.addBlockTag(t); } }); - return classJavadoc.toText(); + + return javadoc.toText(); } private static Collection collectGroupOps(OpsSpec ops, Multimap groupedMethods) { From ceae4897095218b9293bdce5b107f0ab02aa3e72 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 22 May 2021 19:10:13 -0700 Subject: [PATCH 27/60] Use spotless plugin for formating (#308) --- .mvn/jvm.config | 5 ++ license-header | 14 +++++ pom.xml | 57 ++++++++++++++----- .../tensorflow-core-generator/pom.xml | 12 ++++ 4 files changed, 75 insertions(+), 13 deletions(-) create mode 100644 .mvn/jvm.config create mode 100644 license-header diff --git a/.mvn/jvm.config b/.mvn/jvm.config new file mode 100644 index 00000000000..faf08556636 --- /dev/null +++ b/.mvn/jvm.config @@ -0,0 +1,5 @@ +--add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED \ No newline at end of file diff --git a/license-header b/license-header new file mode 100644 index 00000000000..12bf9309e9a --- /dev/null +++ b/license-header @@ -0,0 +1,14 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ \ No newline at end of file diff --git a/pom.xml b/pom.xml index 728f39fcb3a..f9272d26421 100644 --- a/pom.xml +++ b/pom.xml @@ -1,5 +1,5 @@ + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> 4.0.0 @@ -44,6 +44,7 @@ true true true + 2.10.3 @@ -102,19 +103,19 @@ junit-jupiter-engine ${junit.version} test - - - org.openjdk.jmh - jmh-core - ${jmh.version} - test - - - org.openjdk.jmh - jmh-generator-annprocess - ${jmh.version} + + + org.openjdk.jmh + jmh-core + ${jmh.version} + test + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} test - + @@ -299,6 +300,36 @@ true + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + origin/master + + + + + + + + + ./license-header + + + + + + + + spotless-check + initialize + + check + + + + diff --git a/tensorflow-core/tensorflow-core-generator/pom.xml b/tensorflow-core/tensorflow-core-generator/pom.xml index 2c9cca5f924..25608fe7e24 100644 --- a/tensorflow-core/tensorflow-core-generator/pom.xml +++ b/tensorflow-core/tensorflow-core-generator/pom.xml @@ -59,6 +59,18 @@ + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + + + src/main/java/org/tensorflow/proto/framework/** + + + + From 0f7274e89fc0c27c96d95940f9155ec4ed1f9182 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 23 May 2021 19:21:43 -0700 Subject: [PATCH 28/60] Quick fix for spotless (#324) --- .mvn/jvm.config | 5 ----- CONTRIBUTING.md | 14 ++++++++++++++ pom.xml | 41 +++++++++++++++++++++++++++++------------ 3 files changed, 43 insertions(+), 17 deletions(-) delete mode 100644 .mvn/jvm.config diff --git a/.mvn/jvm.config b/.mvn/jvm.config deleted file mode 100644 index faf08556636..00000000000 --- a/.mvn/jvm.config +++ /dev/null @@ -1,5 +0,0 @@ ---add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 06cde403bee..8a5fff2dcb6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,6 +21,20 @@ the `dev` profile in your Maven command to use those artifacts instead of buildi Modifying the native op generation code (not the annotation processor) or the JavaCPP configuration (not the abstract Pointers) will require a complete build could be required to reflect the changes, otherwise `-Pdev` should be fine. +## JDK 16+ + +If you're using JDK 16+, you need to add some exports for the formatter plugin: + +``` +--add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED +``` + +This can be done in `.mvn/jvm.config` or `MAVEN_OPTS`. + ### Native Builds In some cases, like when adding GPU support or re-generating op classes, you will need to re-build the native library. 99% of this is building diff --git a/pom.xml b/pom.xml index f9272d26421..76504524ae9 100644 --- a/pom.xml +++ b/pom.xml @@ -44,7 +44,7 @@ true true true - 2.10.3 + 2.11.1 @@ -229,6 +229,33 @@ + + + format + + (,16) + + + + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + + + + spotless-check + initialize + + check + + + + + + + @@ -305,6 +332,7 @@ spotless-maven-plugin ${spotless.version} + origin/master @@ -318,17 +346,6 @@ - - - - - spotless-check - initialize - - check - - - From ace917bf30d50a8414d666cd1bafc7808e43aa19 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Wed, 26 May 2021 08:58:00 -0400 Subject: [PATCH 29/60] Temporarily disabling Linux MKL-GPU ... which refuses to build for the last 3 days --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 563aebd7055..f993a57e48e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -63,7 +63,7 @@ jobs: needs: prepare strategy: matrix: - ext: ["", -mkl, -gpu, -mkl-gpu] + ext: ["", -mkl, -gpu] #, -mkl-gpu] steps: - name: Install environment run: | From 3b4533c97eb51b0b653d2bfb1810851795c81e82 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Thu, 27 May 2021 18:18:02 -0700 Subject: [PATCH 30/60] Fix Scope name collisions (#248) --- license-header | 14 - pom.xml | 18 +- .../annotations/org/tensorflow/op/Ops.java | 10 +- .../java/org/tensorflow/EagerSession.java | 61 ++-- .../org/tensorflow/ExecutionEnvironment.java | 42 +-- .../src/main/java/org/tensorflow/Graph.java | 259 +++++++++-------- .../java/org/tensorflow/op/NameScope.java | 83 +++++- .../main/java/org/tensorflow/op/Scope.java | 88 +++--- .../java/org/tensorflow/op/ScopeTest.java | 46 ++- .../processor/operator/OperatorProcessor.java | 271 ++++++++++-------- 10 files changed, 537 insertions(+), 355 deletions(-) delete mode 100644 license-header diff --git a/license-header b/license-header deleted file mode 100644 index 12bf9309e9a..00000000000 --- a/license-header +++ /dev/null @@ -1,14 +0,0 @@ -/* - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ \ No newline at end of file diff --git a/pom.xml b/pom.xml index 76504524ae9..7288f3661b3 100644 --- a/pom.xml +++ b/pom.xml @@ -342,7 +342,23 @@ - ./license-header + +/* Copyright $YEAR The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 4044838de87..92e4cabdbd1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -354,10 +354,10 @@ public final class Ops { public final SparseOps sparse; - public final TpuOps tpu; - public final BitwiseOps bitwise; + public final TpuOps tpu; + public final MathOps math; public final AudioOps audio; @@ -385,8 +385,8 @@ private Ops(Scope scope) { random = new RandomOps(this); strings = new StringsOps(this); sparse = new SparseOps(this); - tpu = new TpuOps(this); bitwise = new BitwiseOps(this); + tpu = new TpuOps(this); math = new MathOps(this); audio = new AudioOps(this); signal = new SignalOps(this); @@ -7884,7 +7884,7 @@ public final Scope scope() { * Creates an API for building operations in the provided execution environment */ public static Ops create(ExecutionEnvironment env) { - return new Ops(new Scope(env)); + return new Ops(env.baseScope()); } /** @@ -7893,6 +7893,6 @@ public static Ops create(ExecutionEnvironment env) { *

Invoking this method is equivalent to {@code Ops.create(EagerSession.getDefault())}. */ public static Ops create() { - return new Ops(new Scope(EagerSession.getDefault())); + return create(EagerSession.getDefault()); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java index dad842f7038..c5d67128406 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java @@ -1,18 +1,18 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_ContextOptionsSetAsync; @@ -29,6 +29,7 @@ import org.tensorflow.internal.c_api.TFE_ContextOptions; import org.tensorflow.internal.c_api.TF_Status; import org.tensorflow.op.Op; +import org.tensorflow.op.Scope; import org.tensorflow.op.core.Assign; import org.tensorflow.op.core.Placeholder; import org.tensorflow.op.core.Variable; @@ -112,7 +113,8 @@ public Options devicePlacementPolicy(DevicePlacementPolicy value) { * Configures the session based on the data found in the provided configuration. * * @param config a config protocol buffer - * @see config.proto + * @see config.proto */ public Options config(ConfigProto config) { this.config = config; @@ -306,6 +308,11 @@ public void checkInput(Op input) { } } + @Override + public Scope baseScope() { + return baseScope; + } + TFE_Context nativeHandle() { checkSession(); return nativeHandle; @@ -314,17 +321,16 @@ TFE_Context nativeHandle() { /** * Attach the list of native resources to this eager session scope. * - *

When the eager session is closed (i.e. by calling {@link #close()} explicitly or - * implicitly via try-with-resources), all native resources attached to the session will be - * released as well, unless so other references are {@link Pointer#retainReference() retaining} - * them.

+ *

When the eager session is closed (i.e. by calling {@link #close()} explicitly or implicitly + * via try-with-resources), all native resources attached to the session will be released as well, + * unless so other references are {@link Pointer#retainReference() retaining} them. * *

Attached resources can still be garbage collected though if their associated {@link Pointer} * is no longer reachable in Java, independently of their reference count. Therefore, it is * assumed that these resources are not required by the native library once the Java client no - * longer needs them.

+ * longer needs them. * - *

Attaching a resource already attached to this session will have no effect.

+ *

Attaching a resource already attached to this session will have no effect. * * @param resources resources to attach to the session */ @@ -339,14 +345,14 @@ void attach(Pointer... resources) { * Detach a list of resources from this eager session scope. * *

Detached native resources will prevent them to be automatically released when the session is - * closed.

+ * closed. * *

Note though that this method will decrement the reference count of each resources being - * detached, which may automatically released them if that count reaches 0. Therefore, - * invoking {@link Pointer#retainReference()} prior to this call on any resource that must remain - * valid after being detached might be required.

+ * detached, which may automatically released them if that count reaches 0. Therefore, invoking + * {@link Pointer#retainReference()} prior to this call on any resource that must remain valid + * after being detached might be required. * - *

Detaching a resource that is not attached to this session will have no effect.

+ *

Detaching a resource that is not attached to this session will have no effect. * * @param resources resources to detach from the session */ @@ -362,6 +368,8 @@ void detach(Pointer... resources) { private final WeakPointerScope nativeResources; private TFE_Context nativeHandle; + private final Scope baseScope = new Scope(this); + private EagerSession(Options options) { this.nativeResources = new WeakPointerScope(); this.nativeHandle = allocate(options.async, options.devicePlacementPolicy.code, options.config); @@ -381,7 +389,8 @@ private synchronized void doClose() { } } - private static TFE_Context allocate(boolean async, int devicePlacementPolicy, ConfigProto config) { + private static TFE_Context allocate( + boolean async, int devicePlacementPolicy, ConfigProto config) { try (PointerScope scope = new PointerScope()) { TFE_ContextOptions opts = TFE_ContextOptions.newContextOptions(); TF_Status status = TF_Status.newStatus(); @@ -390,7 +399,7 @@ private static TFE_Context allocate(boolean async, int devicePlacementPolicy, Co TFE_ContextOptionsSetConfig(opts, configBytes, configBytes.capacity(), status); status.throwExceptionIfNotOK(); } - TFE_ContextOptionsSetAsync(opts, (byte)(async ? 1 : 0)); + TFE_ContextOptionsSetAsync(opts, (byte) (async ? 1 : 0)); TFE_ContextOptionsSetDevicePlacementPolicy(opts, devicePlacementPolicy); TFE_Context context = TFE_NewContext(opts, status); status.throwExceptionIfNotOK(); diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java index d5389bcd0ad..a18c7fff38b 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java @@ -1,25 +1,24 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import org.tensorflow.op.Op; +import org.tensorflow.op.Scope; -/** - * Defines an environment for creating and executing TensorFlow {@link Operation}s. - */ +/** Defines an environment for creating and executing TensorFlow {@link Operation}s. */ public interface ExecutionEnvironment { enum Types { @@ -49,11 +48,12 @@ default boolean isOpEnabled(String opType) { } /** - * Checks that {@code input} is valid to use as an input in this execution environment. Throws {@link - * IllegalArgumentException} if not. + * Checks that {@code input} is valid to use as an input in this execution environment. Throws + * {@link IllegalArgumentException} if not. * * @param input The op to check - * @throws IllegalArgumentException if input can't be used as an input in this execution environment. + * @throws IllegalArgumentException if input can't be used as an input in this execution + * environment. */ void checkInput(Op input); @@ -71,4 +71,10 @@ default boolean isEager() { default boolean isGraph() { return environmentType() == Types.GRAPH; } + + /** + * Get the top level scope for this execution environment. Is cached, which is necessary to + * prevent name collisions. + */ + Scope baseScope(); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java index 7f659b262a6..b69fe89da0a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java @@ -1,18 +1,18 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.tensorflow.internal.c_api.global.tensorflow.TF_AddGradientsWithPrefix; @@ -52,6 +52,7 @@ import org.tensorflow.ndarray.StdArrays; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.Identity; import org.tensorflow.op.core.NoOp; @@ -63,7 +64,6 @@ import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; - /** * A data flow graph representing a TensorFlow computation. * @@ -74,18 +74,16 @@ */ public final class Graph implements ExecutionEnvironment, AutoCloseable { - /** - * Create an empty Graph. - */ + /** Create an empty Graph. */ public Graph() { nativeHandle = allocate(); + this.baseScope = new Scope(this); } - /** - * Create a Graph from an existing handle (takes ownership). - */ + /** Create a Graph from an existing handle (takes ownership). */ Graph(TF_Graph nativeHandle) { this.nativeHandle = nativeHandle; + this.baseScope = new Scope(this); } Graph(TF_Graph nativeHandle, SaverDef saverDef) { @@ -138,8 +136,8 @@ public GraphOperation operation(String name) { } /** - * Returns the operation (node in the Graph) with the provided name, or throws {@link IllegalArgumentException} if - * there isn't one. + * Returns the operation (node in the Graph) with the provided name, or throws {@link + * IllegalArgumentException} if there isn't one. * * @param name name of the operation to look for * @return operation in the graph with this name @@ -155,9 +153,9 @@ public GraphOperation operationOrThrow(String name) { /** * Returns the output with the provided name, or {@code null} if there is no such output. - *

Names should be of the - * format {@code /scope/op}, with an optional index: {@code /scope/op:1}. {@code 0} is used if the index is not - * specified. + * + *

Names should be of the format {@code /scope/op}, with an optional index: {@code + * /scope/op:1}. {@code 0} is used if the index is not specified. * * @param output the output to get * @return the output with this name, or null if there isn't one @@ -181,15 +179,17 @@ public Output output(String output) { } return new Output(operation, index); } catch (NumberFormatException e) { - throw new IllegalArgumentException("Could not get output for badly formatted output name: \"" + output + "\"", e); + throw new IllegalArgumentException( + "Could not get output for badly formatted output name: \"" + output + "\"", e); } } /** - * Returns the output with the provided name, or throws {@link IllegalArgumentException} if there isn't one. - *

Names should be of the - * format {@code /scope/op}, with an optional index: {@code /scope/op:1}. {@code 0} is used if the index is not - * specified. + * Returns the output with the provided name, or throws {@link IllegalArgumentException} if there + * isn't one. + * + *

Names should be of the format {@code /scope/op}, with an optional index: {@code + * /scope/op:1}. {@code 0} is used if the index is not specified. * * @param output the output to get * @return the output with this name @@ -220,16 +220,20 @@ private GraphOperation graphOp(Operand operand) { } /** - * Finds the operations used to produce {@code outputs}, assuming {@code inputs} are provided. Includes control dependencies. - *

- * Note that this function can easily return ops upstream of inputs as part of the body. Depending on your use, the - * returned body should probably be filtered for {@code Placeholder}s, at least. + * Finds the operations used to produce {@code outputs}, assuming {@code inputs} are provided. + * Includes control dependencies. + * + *

Note that this function can easily return ops upstream of inputs as part of the body. + * Depending on your use, the returned body should probably be filtered for {@code Placeholder}s, + * at least. * - * @param inputs the inputs of the subgraph. Must be from single output ops. May not be null. - * @param outputs the outputs of the subgraph. May not be null. - * @return the set of operations needed to calculate outputs from inputs, including outputs and inputs + * @param inputs the inputs of the subgraph. Must be from single output ops. May not be null. + * @param outputs the outputs of the subgraph. May not be null. + * @return the set of operations needed to calculate outputs from inputs, including outputs and + * inputs */ - public synchronized Set completeSubgraph(Set> inputs, Set> outputs) { + public synchronized Set completeSubgraph( + Set> inputs, Set> outputs) { if (inputs == null) { throw new IllegalArgumentException("Inputs can't be null."); @@ -245,7 +249,8 @@ public synchronized Set completeSubgraph(Set> inputs, for (Operand input : inputs) { if (input.op().numOutputs() > 1) { - throw new IllegalStateException("Only ops with one output are supported as subgraph inputs"); + throw new IllegalStateException( + "Only ops with one output are supported as subgraph inputs"); } GraphOperation op = graphOp(input); inputOps.add(op); @@ -277,15 +282,14 @@ public synchronized Set completeSubgraph(Set> inputs, currents.add(inputOp); } } - } return seen; } /** - * Get all ops directly or indirectly required to calculate {@code outputs} (not including {@code outputs}), including - * control dependencies. + * Get all ops directly or indirectly required to calculate {@code outputs} (not including {@code + * outputs}), including control dependencies. * * @param outputs the starting points of the traversal. * @return the ops needed to calculate {@code outputs}, not including {@code outputs} @@ -306,8 +310,8 @@ public Set subgraphToOps(Set outputs) { } /** - * Get all ops that use one of {@code inputs} directly or indirectly (not including {@code inputs}), including control - * dependencies. + * Get all ops that use one of {@code inputs} directly or indirectly (not including {@code + * inputs}), including control dependencies. * * @param inputs the starting points of the traversal. * @return the ops that depend on {@code inputs}, not including {@code inputs} @@ -328,8 +332,8 @@ public synchronized Set subgraphFromOps(Set inpu } /** - * Get all ops directly or indirectly required to calculate {@code outputs} (not including {@code outputs}), including - * control dependencies. + * Get all ops directly or indirectly required to calculate {@code outputs} (not including {@code + * outputs}), including control dependencies. * * @param outputs the starting points of the traversal. * @return the ops needed to calculate {@code outputs}, not including {@code outputs} @@ -339,8 +343,8 @@ public Set subgraphTo(Set> outputs) { } /** - * Get all ops that use one of {@code inputs} directly or indirectly (not including {@code inputs}), including control - * dependencies. + * Get all ops that use one of {@code inputs} directly or indirectly (not including {@code + * inputs}), including control dependencies. * * @param inputs the starting points of the traversal. * @return the ops that depend on {@code inputs}, not including {@code inputs} @@ -363,8 +367,8 @@ public synchronized Set subgraphFrom(Set> inputs) { * @param type of the Operation (i.e., identifies the computation to be performed) * @param name to refer to the created Operation in the graph. * @return an {@link OperationBuilder}, which will add the Operation to the graph when {@link - * OperationBuilder#build()} is invoked. If {@link OperationBuilder#build()} is not invoked, then some resources may - * leak. + * OperationBuilder#build()} is invoked. If {@link OperationBuilder#build()} is not invoked, + * then some resources may leak. */ @Override public GraphOperationBuilder opBuilder(String type, String name) { @@ -383,18 +387,26 @@ public Types environmentType() { public void checkInput(Op input) { if (input.env().isEager()) { throw new IllegalArgumentException( - "Input " + input + " was from an eager session, can't use in a graph. Use tf.constantOf(input.asTensor())"); + "Input " + + input + + " was from an eager session, can't use in a graph. Use tf.constantOf(input.asTensor())"); } if (input.env() != this) { - throw new IllegalArgumentException("Input " + input + " was from a different graph, can't use."); + throw new IllegalArgumentException( + "Input " + input + " was from a different graph, can't use."); } } + @Override + public Scope baseScope() { + return baseScope; + } + /** * Import a representation of a TensorFlow graph. * - *

The representation of the graph, referred to as a {@code GraphDef}, can be - * generated by {@link #toGraphDef()} and equivalents in other language APIs. + *

The representation of the graph, referred to as a {@code GraphDef}, can be generated by + * {@link #toGraphDef()} and equivalents in other language APIs. * * @param graphDef {@code GraphDef} proto to import * @throws IllegalArgumentException if graphDef is not a recognized serialization of a graph. @@ -442,19 +454,18 @@ public synchronized void addInitializer(Op initializer) { initializers.add(initializer); } - /** - * Returns all initializers added to the graph via {@link #addInitializer(Op)} - */ + /** Returns all initializers added to the graph via {@link #addInitializer(Op)} */ public List initializers() { return Collections.unmodifiableList(initializers); } /** - * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, i.e., {@code d(y_1 + y_2 - * + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...} + * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, i.e., + * {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...} * *

{@code dx} are used as initial gradients (which represent the symbolic partial derivatives - * of some loss function {@code L} w.r.t. {@code y}). {@code dx} must be null or have size of {@code y}. + * of some loss function {@code L} w.r.t. {@code y}). {@code dx} must be null or have size of + * {@code y}. * *

If {@code dx} is null, the implementation will use dx of {@link * org.tensorflow.op.core.OnesLike OnesLike} for all shapes in {@code y}. @@ -464,8 +475,8 @@ public List initializers() { * *

If {@code prefix} is null, then one will be chosen automatically. * - * @param prefix unique string prefix applied before the names of nodes added to the graph to compute gradients. If - * null, a default one will be chosen. + * @param prefix unique string prefix applied before the names of nodes added to the graph to + * compute gradients. If null, a default one will be chosen. * @param y output of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param dx if not null, the partial derivatives of some loss function {@code L} w.r.t. {@code y} @@ -515,8 +526,11 @@ public Output[] addGradients(String prefix, Output[] y, Output[] x, Out dxIndices); int ndy = dyHandlesAndIndices.length >> 1; if (ndy != dy.length) { - throw new IllegalStateException(String.valueOf(ndy) + " gradients were added to the graph when " + dy.length - + " were expected"); + throw new IllegalStateException( + String.valueOf(ndy) + + " gradients were added to the graph when " + + dy.length + + " were expected"); } for (int i = 0, j = ndy; i < ndy; ++i, ++j) { GraphOperation op = new GraphOperation(this, (TF_Operation) dyHandlesAndIndices[i]); @@ -527,23 +541,24 @@ public Output[] addGradients(String prefix, Output[] y, Output[] x, Out } /** - * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, i.e., {@code dy/dx_1, - * dy/dx_2...} - *

- * This is a simplified version of {@link #addGradients(String, Output[], Output[], Output[])} where {@code y} is a - * single output, {@code dx} is null and {@code prefix} is null. + * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, i.e., + * {@code dy/dx_1, dy/dx_2...} + * + *

This is a simplified version of {@link #addGradients(String, Output[], Output[], Output[])} + * where {@code y} is a single output, {@code dx} is null and {@code prefix} is null. * * @param y output of the function to derive * @param x inputs of the function for which partial derivatives are computed * @return the partial derivatives {@code dy} with the size of {@code x} */ public Output[] addGradients(Output y, Output[] x) { - return addGradients(null, new Output[]{y}, x, null); + return addGradients(null, new Output[] {y}, x, null); } /** - * Used to instantiate an abstract class which overrides the buildSubgraph method to build a conditional or body - * subgraph for a while loop. After Java 8, this can alternatively be used to create a lambda for the same purpose. + * Used to instantiate an abstract class which overrides the buildSubgraph method to build a + * conditional or body subgraph for a while loop. After Java 8, this can alternatively be used to + * create a lambda for the same purpose. * *

To be used when calling {@link #whileLoop(Output[], * org.tensorflow.Graph.WhileSubgraphBuilder, org.tensorflow.Graph.WhileSubgraphBuilder, String)} @@ -558,7 +573,9 @@ public Output[] addGradients(Output y, Output[] x) { * } * }; * + * * Example usage (after Java 8): + * *

    * WhileSubgraphBuilder bodyGraphBuilder = (bodyGraph, bodyInputs, bodyOutputs) -> { //
    *   build body subgraph
@@ -657,13 +674,15 @@ public Output[] whileLoop(
   }
 
   /**
-   * Return the {@link SaverDef} instance used to save the state of all variables present in this graph.
+   * Return the {@link SaverDef} instance used to save the state of all variables present in this
+   * graph.
    *
-   * 

The first time this method is called it builds the {@link SaverDef}. If this graph already contains a - * "save/restore_all" operation then it is assumed to contain all necessary saving and restoring operations. If that - * operation does not exist then the graph is mutated to add all the nodes necessary to save and restore the state of - * the graph. Consequently, any variables that are added to the graph after this call will not be saved nor restored - * using this {@link SaverDef}. + *

The first time this method is called it builds the {@link SaverDef}. If this graph already + * contains a "save/restore_all" operation then it is assumed to contain all necessary saving and + * restoring operations. If that operation does not exist then the graph is mutated to add all the + * nodes necessary to save and restore the state of the graph. Consequently, any variables that + * are added to the graph after this call will not be saved nor restored using this {@link + * SaverDef}. * * @return a {@link SaverDef} instance */ @@ -678,11 +697,12 @@ synchronized SaverDef saverDef() { // regenerate SaverDef without mutating. The names mirror // the python implementation for compatibility. // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/training/saver.py - saverDef = SaverDef.newBuilder() - .setFilenameTensorName("save/filename") - .setSaveTensorName("save/control_dependency") - .setRestoreOpName("save/restore_all") - .build(); + saverDef = + SaverDef.newBuilder() + .setFilenameTensorName("save/filename") + .setSaveTensorName("save/control_dependency") + .setRestoreOpName("save/restore_all") + .build(); } } return saverDef; @@ -692,6 +712,7 @@ synchronized SaverDef saverDef() { private TF_Graph nativeHandle; private int refcount = 0; private SaverDef saverDef; + private final Scope baseScope; private final List initializers = new ArrayList<>(); @@ -757,7 +778,9 @@ private final void advance() { try { Object[] nativeReturn = nextOperation(reference.nativeHandle(), this.position); - if (nativeReturn != null && nativeReturn[0] != null && !((TF_Operation) nativeReturn[0]).isNull()) { + if (nativeReturn != null + && nativeReturn[0] != null + && !((TF_Operation) nativeReturn[0]).isNull()) { this.operation = new GraphOperation(this.graph, (TF_Operation) nativeReturn[0]); this.position = (Integer) nativeReturn[1]; } @@ -863,14 +886,21 @@ private static GraphDef toGraphDef(TF_Graph handle) { } } - static void resolveOutputs(String type, TF_Operation[] srcOps, - int[] srcIndices, TF_Output dst, int n) { + static void resolveOutputs( + String type, TF_Operation[] srcOps, int[] srcIndices, TF_Output dst, int n) { if (srcOps.length != n) { - throw new IllegalArgumentException("expected " + n + ", got " + srcOps.length + " " + type + " Operations"); + throw new IllegalArgumentException( + "expected " + n + ", got " + srcOps.length + " " + type + " Operations"); } if (srcIndices.length != n) { throw new IllegalArgumentException( - "expected " + n + ", got " + srcIndices.length + " " + type + " Operation output indices"); + "expected " + + n + + ", got " + + srcIndices.length + + " " + + type + + " Operation output indices"); } for (int i = 0; i < n; ++i) { if (srcOps[i] == null || srcOps[i].isNull()) { @@ -905,7 +935,8 @@ private static Object[] addGradients( resolveOutputs("x", outputHandles, outputIndices, x, nx); if (gradInputHandles != null) { if (gradInputHandles.length != ny) { - throw new IllegalArgumentException("expected " + ny + ", got " + gradInputHandles.length + " handles"); + throw new IllegalArgumentException( + "expected " + ny + ", got " + gradInputHandles.length + " handles"); } dx = new TF_Output(ny); resolveOutputs("dx", gradInputHandles, gradInputIndices, dx, ny); @@ -961,9 +992,13 @@ private static Object[] whileLoop( condOutputIndices[0] = condOutputOutput.index(); Object[] condOutputHandlesAndIndices = - buildSubgraph(condGraphBuilder, params.cond_graph(), - condInputHandles, condInputIndices, - condOutputHandles, condOutputIndices); + buildSubgraph( + condGraphBuilder, + params.cond_graph(), + condInputHandles, + condInputIndices, + condOutputHandles, + condOutputIndices); // build body subgraph TF_Output bodyInputsOutput = params.body_inputs(); @@ -980,22 +1015,28 @@ private static Object[] whileLoop( } Object[] bodyOutputHandlesAndIndices = - buildSubgraph(bodyGraphBuilder, params.body_graph(), - bodyInputHandles, bodyInputIndices, - bodyOutputHandles, bodyOutputIndices); - - if (condOutputHandlesAndIndices == null || - bodyOutputHandlesAndIndices == null) { + buildSubgraph( + bodyGraphBuilder, + params.body_graph(), + bodyInputHandles, + bodyInputIndices, + bodyOutputHandles, + bodyOutputIndices); + + if (condOutputHandlesAndIndices == null || bodyOutputHandlesAndIndices == null) { return null; } // set cond_output param to output of the conditional subgraph - condOutputOutput.oper((TF_Operation) condOutputHandlesAndIndices[0]) + condOutputOutput + .oper((TF_Operation) condOutputHandlesAndIndices[0]) .index((Integer) condOutputHandlesAndIndices[1]); // set body_outputs param to outputs of the body subgraph for (int i = 0, j = ninputs; i < ninputs; ++i, ++j) { - bodyOutputsOutput.position(i).oper((TF_Operation) bodyOutputHandlesAndIndices[i]) + bodyOutputsOutput + .position(i) + .oper((TF_Operation) bodyOutputHandlesAndIndices[i]) .index((Integer) bodyOutputHandlesAndIndices[j]); } @@ -1042,20 +1083,12 @@ private static SaverDef addVariableSaver(Graph graph) { Operand varSlices = tf.zerosLike(varNamesTensor); Placeholder saveFilename = tf.withName("filename").placeholder(TString.class); - Save saveVariables = tf.train.save( - saveFilename, - varNamesTensor, - varSlices, - varOutputs - ); - Identity id = tf.withControlDependencies(Arrays.asList(saveFilename, saveVariables)) - .withName("control_dependency").identity(saveFilename); - Restore restoreVariables = tf.train.restore( - saveFilename, - varNamesTensor, - varSlices, - varTypes - ); + Save saveVariables = tf.train.save(saveFilename, varNamesTensor, varSlices, varOutputs); + Identity id = + tf.withControlDependencies(Arrays.asList(saveFilename, saveVariables)) + .withName("control_dependency") + .identity(saveFilename); + Restore restoreVariables = tf.train.restore(saveFilename, varNamesTensor, varSlices, varTypes); List restoreOps = new ArrayList<>(varOutputs.size()); for (int i = 0; i < varOutputs.size(); ++i) { restoreOps.add(tf.assign(varOutputs.get(i), (Operand) restoreVariables.tensors().get(i))); diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/NameScope.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/NameScope.java index 2e84cac1ac7..903a12f66b2 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/NameScope.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/NameScope.java @@ -1,23 +1,26 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow.op; import java.util.HashMap; import java.util.Map; +import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.tensorflow.ExecutionEnvironment; +import org.tensorflow.Graph; /** * A class to manage scoped (hierarchical) names for operators. @@ -36,12 +39,12 @@ */ final class NameScope { - NameScope withSubScope(String scopeName) { + NameScope withSubScope(String scopeName, ExecutionEnvironment env) { checkPattern(NAME_REGEX, scopeName); // Override with opName if it exists. String actualName = (opName != null) ? opName : scopeName; String newPrefix = fullyQualify(makeUnique(actualName)); - return new NameScope(newPrefix, null, null); + return new NameScope(newPrefix, null, null).withUsedFrom(env); } NameScope withName(String name) { @@ -50,6 +53,46 @@ NameScope withName(String name) { return new NameScope(opPrefix, name, ids); } + private static final Pattern NAME_PATTERN = Pattern.compile("(.+)_(\\d+)", Pattern.DOTALL); + + /** "Import" used names from a graph. Useful when adding to a loaded graph. */ + private NameScope withUsedFrom(ExecutionEnvironment env) { + + if (env instanceof Graph) { + ((Graph) env) + .operations() + .forEachRemaining( + op -> { + if (op.name().startsWith(opPrefix != null ? opPrefix : "")) { + String name = op.name(); + + if (opPrefix != null) { + name = name.substring(opPrefix.length() + 1); + } + + if (!name.contains("/")) { + Matcher matcher = NAME_PATTERN.matcher(name); + if (matcher.find()) { + String realName = matcher.group(1); + int num = Integer.parseInt(matcher.group(2)) + 1; + + if (!(ids.containsKey(realName) && ids.get(realName) > num)) { + ids.put(realName, num); + } + } else { + if (!ids.containsKey(name)) { + ids.put(name, 1); + } else { + ids.put(name, ids.get(name) + 1); + } + } + } + } + }); + } + return this; + } + String makeOpName(String name) { checkPattern(NAME_REGEX, name); // Override with opName if it exists. @@ -62,9 +105,12 @@ String makeOpName(String name) { * *

A root-level namescope generates operator names with no components, like {@code Const_72} * and {@code result}. + * + * @param env */ - NameScope() { + NameScope(ExecutionEnvironment env) { this(null, null, null); + withUsedFrom(env); } private NameScope(String opPrefix, String opName, Map ids) { @@ -120,6 +166,13 @@ private String fullyQualify(String name) { // instance mapped to the next available numeric suffix for it. private final Map ids; + static boolean isValidName(String name) { + if (name == null) { + return false; + } + return NAME_REGEX.matcher(name).matches(); + } + private static void checkPattern(Pattern pattern, String name) { if (name == null) { throw new IllegalArgumentException("Names cannot be null"); diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/Scope.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/Scope.java index 85e283d9260..2aef70f6af0 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/Scope.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/Scope.java @@ -1,18 +1,18 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow.op; import java.util.ArrayList; @@ -21,7 +21,8 @@ import org.tensorflow.OperationBuilder; /** - * Manages groups of related properties when creating Tensorflow Operations, such as a common name prefix. + * Manages groups of related properties when creating Tensorflow Operations, such as a common name + * prefix. * *

A {@code Scope} is a container for common properties applied to TensorFlow Ops. Normal user * code initializes a {@code Scope} and provides it to Operation building classes. For example: @@ -80,15 +81,16 @@ public final class Scope { /** * Create a new top-level scope. * + *

For internal use only, use {@link ExecutionEnvironment#baseScope()} if you need a + * base level scope. + * * @param env The execution environment used by the scope. */ public Scope(ExecutionEnvironment env) { - this(env, new NameScope(), new ArrayList<>(), DeviceSpec.newBuilder().build()); + this(env, new NameScope(env), new ArrayList<>(), DeviceSpec.newBuilder().build()); } - /** - * Returns the execution environment used by this scope. - */ + /** Returns the execution environment used by this scope. */ public ExecutionEnvironment env() { return env; } @@ -97,7 +99,8 @@ public ExecutionEnvironment env() { * Returns a new scope where added operations will have the provided name prefix. * *

Ops created with this scope will have {@code name/childScopeName/} as the prefix. The actual - * name will be unique in the returned scope. All other properties are inherited from the current scope. + * name will be unique in the returned scope. All other properties are inherited from the current + * scope. * *

The child scope name must match the regular expression {@code [A-Za-z0-9.][A-Za-z0-9_.\-]*} * @@ -106,7 +109,8 @@ public ExecutionEnvironment env() { * @throws IllegalArgumentException if the name is invalid */ public Scope withSubScope(String childScopeName) { - return new Scope(env, nameScope.withSubScope(childScopeName), controlDependencies, deviceSpec); + return new Scope( + env, nameScope.withSubScope(childScopeName, env), controlDependencies, deviceSpec); } /** @@ -126,29 +130,34 @@ public Scope withName(String opName) { } /** - * Returns a new scope where added operations will be prefixed by this scope's op name - * (set by {@link #withName(String)}), or the given default if it is unset. This is intended to be used for - * composite ops. + * Returns a new scope where added operations will be prefixed by this scope's op name (set by + * {@link #withName(String)}), or the given default if it is unset. This is intended to be used + * for composite ops. * - *

Ops created with this scope will have {@code name/opName/} as the prefix. The actual - * name will be unique in the returned scope. All other properties are inherited from the current + *

Ops created with this scope will have {@code name/opName/} as the prefix. The actual name + * will be unique in the returned scope. All other properties are inherited from the current * scope. * - *

The default child scope name must match the regular expression {@code [A-Za-z0-9.][A-Za-z0-9_.\-]*} + *

The default child scope name must match the regular expression {@code + * [A-Za-z0-9.][A-Za-z0-9_.\-]*} * * @param defaultName name of the sub scope if this scope's name hasn't been set. * @return a new subscope * @throws IllegalArgumentException if the name is invalid */ - public Scope withNameAsSubScope(String defaultName){ - return new Scope(env, nameScope.withSubScope(nameScope.makeOpName(defaultName)), controlDependencies, deviceSpec); + public Scope withNameAsSubScope(String defaultName) { + return new Scope( + env, + nameScope.withSubScope(nameScope.makeOpName(defaultName), env), + controlDependencies, + deviceSpec); } /** * Return a new scope that uses the provided device specification for an op. * - *

Operations created within this scope will place the created operations on the device(s) matching the provided - * spec. + *

Operations created within this scope will place the created operations on the device(s) + * matching the provided spec. * * @param deviceSpec device specification for an operator in the returned scope * @return a new Scope that uses opName for operations. @@ -170,8 +179,8 @@ public Scope withDevice(DeviceSpec deviceSpec) { * }

* *

Note: if you provide a composite operator building class (i.e, a class that creates a - * set of related operations by calling other operator building code), the provided name will act as a subscope to all - * underlying operators. + * set of related operations by calling other operator building code), the provided name will act + * as a subscope to all underlying operators. * * @param defaultName name for the underlying operator. * @return unique name for the operator. @@ -181,8 +190,15 @@ public String makeOpName(String defaultName) { return nameScope.makeOpName(defaultName); } + public static boolean isValidOpName(String name) { + return NameScope.isValidName(name); + } + private Scope( - ExecutionEnvironment env, NameScope nameScope, Iterable controlDependencies, DeviceSpec deviceSpec) { + ExecutionEnvironment env, + NameScope nameScope, + Iterable controlDependencies, + DeviceSpec deviceSpec) { this.env = env; this.nameScope = nameScope; this.controlDependencies = controlDependencies; @@ -206,8 +222,8 @@ public Scope withControlDependencies(Iterable controls) { } /** - * Applies device specification and adds each Operand in controlDependencies as a control input to the provided - * builder. + * Applies device specification and adds each Operand in controlDependencies as a control input to + * the provided builder. * * @param builder OperationBuilder to add control inputs and device specification to */ @@ -233,9 +249,7 @@ public OperationBuilder applyControlDependencies(OperationBuilder builder) { private final NameScope nameScope; private final DeviceSpec deviceSpec; - /** - * Returns device string from the scope. - */ + /** Returns device string from the scope. */ public String getDeviceString() { return deviceSpec.toString(); } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/ScopeTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/ScopeTest.java index 62881dcee8c..84eabd3da1a 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/ScopeTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/ScopeTest.java @@ -1,18 +1,18 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2017-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow.op; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -29,6 +29,24 @@ /** Unit tests for {@link org.tensorflow.op.Scope}. */ public class ScopeTest { + @Test + public void testSeparateOps() { + try (Graph g = new Graph()) { + Ops tf1 = Ops.create(g); + Ops tf2 = Ops.create(g); + + tf1.constant(2); + tf1.withName("Constant2").constant(2); + tf1.withSubScope("Scope").constant(2); + tf1.withSubScope("Scope").withName("Constant4").constant(2); + + tf2.constant(2); + tf2.withName("Constant2").constant(2); + tf2.withSubScope("Scope").constant(2); + tf2.withSubScope("Scope").withName("Constant4").constant(2); + } + } + @Test public void basicNames() { try (Graph g = new Graph()) { @@ -168,9 +186,9 @@ public void composite() { // assertNotNull(g.operation("variance/zero")); // Verify correct results as well. - TInt32 result = (TInt32)sess.runner().fetch(var1.output()).run().get(0); + TInt32 result = (TInt32) sess.runner().fetch(var1.output()).run().get(0); assertEquals(21704, result.getInt()); - result = (TInt32)sess.runner().fetch(var2.output()).run().get(0); + result = (TInt32) sess.runner().fetch(var2.output()).run().get(0); assertEquals(21704, result.getInt()); } } diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index bea817e9011..1b1d5cb0fb3 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -1,19 +1,18 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow.processor.operator; import com.github.javaparser.ast.comments.JavadocComment; @@ -156,26 +155,28 @@ public Set getSupportedAnnotationTypes() { } private static class OpsSpec { - private static final Comparator PARAMETER_SPEC_COMPARATOR = (o1, o2) -> { - if (o1.parameters.size() > o2.parameters.size()) { - return 1; - } - if (o1.parameters.size() < o2.parameters.size()) { - return -1; - } - List firstParams = o1.parameters; - List secondParams = o2.parameters; - for (int i = 0; i < firstParams.size(); i++) { - ParameterSpec first = firstParams.get(i); - ParameterSpec second = secondParams.get(i); - int compare = first.name.compareTo(second.name); - if (compare != 0) { - return compare; - } - } - return 0; - }; - private static final Comparator METHOD_SPEC_COMPARATOR = Comparator.comparing((MethodSpec m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); + private static final Comparator PARAMETER_SPEC_COMPARATOR = + (o1, o2) -> { + if (o1.parameters.size() > o2.parameters.size()) { + return 1; + } + if (o1.parameters.size() < o2.parameters.size()) { + return -1; + } + List firstParams = o1.parameters; + List secondParams = o2.parameters; + for (int i = 0; i < firstParams.size(); i++) { + ParameterSpec first = firstParams.get(i); + ParameterSpec second = secondParams.get(i); + int compare = first.name.compareTo(second.name); + if (compare != 0) { + return compare; + } + } + return 0; + }; + private static final Comparator METHOD_SPEC_COMPARATOR = + Comparator.comparing((MethodSpec m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); final String groupName; final String fieldName; @@ -183,7 +184,8 @@ private static class OpsSpec { final List methods; final List subGroups = new ArrayList<>(); - OpsSpec(String groupName, String fieldName, ClassName className, Collection methods) { + OpsSpec( + String groupName, String fieldName, ClassName className, Collection methods) { this.groupName = groupName; this.fieldName = fieldName; this.className = className; @@ -227,11 +229,11 @@ private void error(Element e, String message, Object... args) { private void write(TypeSpec spec) { try { JavaFile.builder("org.tensorflow.op", spec) - .addFileComment(LICENSE) - .addFileComment("\nThis class has been generated, DO NOT EDIT!\n") - .skipJavaLangImports(true) - .build() - .writeTo(filer); + .addFileComment(LICENSE) + .addFileComment("\nThis class has been generated, DO NOT EDIT!\n") + .skipJavaLangImports(true) + .build() + .writeTo(filer); } catch (IOException e) { throw new AssertionError(e); } @@ -262,7 +264,7 @@ private boolean collectOpsMethods( result = false; continue; } - collectOpMethods(groupedMethods, (TypeElement)e, annotation); + collectOpMethods(groupedMethods, (TypeElement) e, annotation); } return result; } @@ -281,7 +283,8 @@ private void collectOpMethods( String opGroup = getAnnotationElementValueAsString("group", operatorAnnot); String opName = getAnnotationElementValueAsString("name", operatorAnnot); if (Strings.isNullOrEmpty(opName)) { - opName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, ClassName.get(opClass).simpleName()); + opName = + CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, ClassName.get(opClass).simpleName()); } // Build an endpoint for each method annotated with @Endpoint, which takes in parameter a scope // and, optionally, a list of arguments @@ -293,11 +296,17 @@ private void collectOpMethods( throw new IllegalArgumentException( "Endpoint " + opMethod + " of class " + opClass + " must be static and public"); } - if (opMethod.getParameters().isEmpty() || - !((TypeElement)types.asElement(opMethod.getParameters().get(0).asType())).getQualifiedName() + if (opMethod.getParameters().isEmpty() + || !((TypeElement) types.asElement(opMethod.getParameters().get(0).asType())) + .getQualifiedName() .equals(elements.getName(Names.Scope.toString()))) { throw new IllegalArgumentException( - "Endpoint " + opMethod + " of class " + opClass + " must take an instance of " + Names.Scope + "Endpoint " + + opMethod + + " of class " + + opClass + + " must take an instance of " + + Names.Scope + " as its first parameter"); } String endpointGroup = getAnnotationElementValueAsString("group", endpointAnnot); @@ -311,15 +320,19 @@ private void collectOpMethods( boolean describeByClass = getAnnotationElementValueAsBoolean("describeByClass", endpointAnnot, false); boolean deprecated = opMethod.getAnnotation(Deprecated.class) != null || opClassDeprecated; - MethodSpec method = buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); + MethodSpec method = + buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); groupedMethods.put(endpointGroup, method); } } } private MethodSpec buildOpMethod( - String methodName, TypeElement opClass, ExecutableElement endpointMethod, - boolean describeByClass, boolean deprecated) { + String methodName, + TypeElement opClass, + ExecutableElement endpointMethod, + boolean describeByClass, + boolean deprecated) { MethodSpec.Builder builder = MethodSpec.methodBuilder(methodName) .addModifiers(Modifier.PUBLIC) @@ -341,9 +354,7 @@ private MethodSpec buildOpMethod( if (!NoType.class.isAssignableFrom(endpointMethod.getReturnType().getClass())) { call.append("return "); } - call.append("$T.") - .append(endpointMethod.getSimpleName()) - .append("(scope"); + call.append("$T.").append(endpointMethod.getSimpleName()).append("(scope"); boolean first = true; for (VariableElement param : endpointMethod.getParameters()) { ParameterSpec p = ParameterSpec.get(param); @@ -374,50 +385,68 @@ private String buildOpMethodJavadoc( // Copy all endpoint method tags to the description, except for the `scope` parameter which // will be inferred by the Ops class - methodJavadoc.getBlockTags().forEach(t -> { - if (!(t.getTagName().equals("param") && t.getName().map(s -> s.equals("scope")).orElse(false))) { - javadoc.addBlockTag(t); - } - }); + methodJavadoc + .getBlockTags() + .forEach( + t -> { + if (!(t.getTagName().equals("param") + && t.getName().map(s -> s.equals("scope")).orElse(false))) { + javadoc.addBlockTag(t); + } + }); return javadoc.toText(); } - private static Collection collectGroupOps(OpsSpec ops, Multimap groupedMethods) { + private static Collection collectGroupOps( + OpsSpec ops, Multimap groupedMethods) { Map groups = new HashMap<>(); - // The `group` label added in the `@Operator` annotation has the same syntax as a package name, which (in most - // case) consists of a simple label but could also be a deeper tree, like `linalg.sparse`. In this case, - // the `LinalgSparseOps` group should be added as the `sparse` field of the `LinalgOps` group, and the latter + // The `group` label added in the `@Operator` annotation has the same syntax as a package name, + // which (in most + // case) consists of a simple label but could also be a deeper tree, like `linalg.sparse`. In + // this case, + // the `LinalgSparseOps` group should be added as the `sparse` field of the `LinalgOps` group, + // and the latter // should be added as the `linalg` field of the `Ops` root class. - groupedMethods.keys().forEach(group -> { - OpsSpec parentClass = ops; - int startPos = 0; - do { - int delimiterPos = group.indexOf('.', startPos); - String groupName = delimiterPos < 0 ? group : group.substring(0, delimiterPos); - OpsSpec groupOps = groups.get(groupName); - - // Create spec for this group if we have not encountered it yet in our iteration - if (groupOps == null) { - String fieldName = delimiterPos < 0 ? - group.substring(startPos) : group.substring(startPos, delimiterPos); - ClassName className = ClassName.get("org.tensorflow.op", - CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, groupName.replace('.', '_')) + "Ops"); - groupOps = new OpsSpec(groupName, fieldName, className, groupedMethods.get(groupName)); - parentClass.subGroups.add(groupOps); - groups.put(groupName, groupOps); - } - parentClass = groupOps; - startPos = delimiterPos + 1; - } while (startPos > 0); - }); + groupedMethods + .keys() + .forEach( + group -> { + OpsSpec parentClass = ops; + int startPos = 0; + do { + int delimiterPos = group.indexOf('.', startPos); + String groupName = delimiterPos < 0 ? group : group.substring(0, delimiterPos); + OpsSpec groupOps = groups.get(groupName); + + // Create spec for this group if we have not encountered it yet in our iteration + if (groupOps == null) { + String fieldName = + delimiterPos < 0 + ? group.substring(startPos) + : group.substring(startPos, delimiterPos); + ClassName className = + ClassName.get( + "org.tensorflow.op", + CaseFormat.LOWER_UNDERSCORE.to( + CaseFormat.UPPER_CAMEL, groupName.replace('.', '_')) + + "Ops"); + groupOps = + new OpsSpec(groupName, fieldName, className, groupedMethods.get(groupName)); + parentClass.subGroups.add(groupOps); + groups.put(groupName, groupOps); + } + parentClass = groupOps; + startPos = delimiterPos + 1; + } while (startPos > 0); + }); return groups.values(); } private static TypeSpec buildGroupClass(OpsSpec spec) { - //System.out.println("Generating " + spec.className + " class"); + // System.out.println("Generating " + spec.className + " class"); MethodSpec.Builder ctorBuilder = MethodSpec.constructorBuilder() @@ -436,7 +465,8 @@ private static TypeSpec buildGroupClass(OpsSpec spec) { Names.Ops) .addMethods(spec.methods); - MethodSpec.Builder opsBuilder = MethodSpec.methodBuilder("ops") + MethodSpec.Builder opsBuilder = + MethodSpec.methodBuilder("ops") .addModifiers(Modifier.PUBLIC, Modifier.FINAL) .returns(Names.Ops) .addJavadoc("Get the parent {@link " + Names.Ops.simpleName() + "} object.") @@ -449,21 +479,23 @@ private static TypeSpec buildGroupClass(OpsSpec spec) { builder.addMethod(ctorBuilder.build()); builder.addField( - FieldSpec.builder(Names.Scope, "scope").addModifiers(Modifier.PRIVATE, Modifier.FINAL).build()); + FieldSpec.builder(Names.Scope, "scope") + .addModifiers(Modifier.PRIVATE, Modifier.FINAL) + .build()); builder.addField( - FieldSpec.builder(Names.Ops, "ops").addModifiers(Modifier.PRIVATE, Modifier.FINAL).build()); + FieldSpec.builder(Names.Ops, "ops").addModifiers(Modifier.PRIVATE, Modifier.FINAL).build()); return builder.build(); } private static TypeSpec buildTopClass(OpsSpec spec) { - //System.out.println("Generating " + spec.className + " class"); + // System.out.println("Generating " + spec.className + " class"); MethodSpec.Builder ctorBuilder = MethodSpec.constructorBuilder() - .addModifiers(Modifier.PRIVATE) .addParameter(Names.Scope, "scope") + .addModifiers(Modifier.PRIVATE) .addStatement("this.scope = scope", Names.Scope); TypeSpec.Builder opsBuilder = @@ -531,16 +563,16 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .build()); opsBuilder.addMethod( - MethodSpec.methodBuilder("withDevice") - .addModifiers(Modifier.PUBLIC) - .addParameter(Names.DeviceSpec, "deviceSpec") - .returns(Names.Ops) - .addStatement("return new Ops(scope.withDevice(deviceSpec))") - .addJavadoc( - "Returns an API that places the created operations on the device(s) matching the provided spec.\n\n" - + "@see {@link $T#withDevice(DeviceSpec)}\n", - Names.Scope) - .build()); + MethodSpec.methodBuilder("withDevice") + .addModifiers(Modifier.PUBLIC) + .addParameter(Names.DeviceSpec, "deviceSpec") + .returns(Names.Ops) + .addStatement("return new Ops(scope.withDevice(deviceSpec))") + .addJavadoc( + "Returns an API that places the created operations on the device(s) matching the provided spec.\n\n" + + "@see {@link $T#withDevice(DeviceSpec)}\n", + Names.Scope) + .build()); opsBuilder.addMethod( MethodSpec.methodBuilder("withControlDependencies") @@ -555,7 +587,9 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .build()); opsBuilder.addField( - FieldSpec.builder(Names.Scope, "scope").addModifiers(Modifier.PRIVATE, Modifier.FINAL).build()); + FieldSpec.builder(Names.Scope, "scope") + .addModifiers(Modifier.PRIVATE, Modifier.FINAL) + .build()); opsBuilder.addMethod( MethodSpec.methodBuilder("scope") @@ -570,7 +604,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addModifiers(Modifier.PUBLIC, Modifier.STATIC) .addParameter(Names.ExecutionEnvironment, "env") .returns(Names.Ops) - .addStatement("return new Ops(new $T(env))", Names.Scope) + .addStatement("return new Ops(env.baseScope())", Names.Scope) .addJavadoc( "Creates an API for building operations in the provided execution environment\n") .build()); @@ -579,7 +613,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { MethodSpec.methodBuilder("create") .addModifiers(Modifier.PUBLIC, Modifier.STATIC) .returns(Names.Ops) - .addStatement("return new Ops(new $T($T.getDefault()))", Names.Scope, Names.EagerSession) + .addStatement("return create($T.getDefault())", Names.EagerSession) .addJavadoc( "Creates an API for building operations in the default eager execution environment\n\n" + "

Invoking this method is equivalent to {@code Ops.create(EagerSession.getDefault())}.\n") @@ -588,27 +622,39 @@ private static TypeSpec buildTopClass(OpsSpec spec) { return opsBuilder.build(); } - private static void addGroupFields(TypeSpec.Builder classBuilder, MethodSpec.Builder ctorBuilder, List groups, boolean isTopClass) { - groups.forEach(group -> { - classBuilder.addField( - FieldSpec.builder(group.className, group.fieldName) - .addModifiers(Modifier.PUBLIC, Modifier.FINAL) - .build() - ); - ctorBuilder.addStatement("$L = new $T(" + (isTopClass ? "this" : "ops") + ")", group.fieldName, group.className).build(); - }); + private static void addGroupFields( + TypeSpec.Builder classBuilder, + MethodSpec.Builder ctorBuilder, + List groups, + boolean isTopClass) { + groups.forEach( + group -> { + classBuilder.addField( + FieldSpec.builder(group.className, group.fieldName) + .addModifiers(Modifier.PUBLIC, Modifier.FINAL) + .build()); + ctorBuilder + .addStatement( + "$L = new $T(" + (isTopClass ? "this" : "ops") + ")", + group.fieldName, + group.className) + .build(); + }); } private static AnnotationMirror getAnnotationMirror(Element element, Name annotationName) { for (AnnotationMirror am : element.getAnnotationMirrors()) { - if (((TypeElement)am.getAnnotationType().asElement()).getQualifiedName().equals(annotationName)) { + if (((TypeElement) am.getAnnotationType().asElement()) + .getQualifiedName() + .equals(annotationName)) { return am; } } return null; } - private static AnnotationValue getAnnotationElementValue(String elementName, AnnotationMirror am) { + private static AnnotationValue getAnnotationElementValue( + String elementName, AnnotationMirror am) { for (Map.Entry entry : am.getElementValues().entrySet()) { if (entry.getKey().getSimpleName().contentEquals(elementName)) { @@ -623,7 +669,8 @@ private static String getAnnotationElementValueAsString(String elementName, Anno return value != null ? value.getValue().toString() : ""; } - private static boolean getAnnotationElementValueAsBoolean(String elementName, AnnotationMirror am, boolean defaultValue) { + private static boolean getAnnotationElementValueAsBoolean( + String elementName, AnnotationMirror am, boolean defaultValue) { AnnotationValue value = getAnnotationElementValue(elementName, am); return value != null ? Boolean.parseBoolean(value.toString()) : defaultValue; } From daeb25740207d5699450ed92ab8ddb8ec24b012c Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 30 May 2021 17:42:56 -0700 Subject: [PATCH 31/60] Native functions v2 (#233) * Initial native function use Signed-off-by: Ryan Nett * Allow body constants Signed-off-by: Ryan Nett * Fix body forbids Signed-off-by: Ryan Nett * Use default eager session for tensor calls Signed-off-by: Ryan Nett * Use default eager for single tensor call too Signed-off-by: Ryan Nett * Get functions from graph Signed-off-by: Ryan Nett * Start of saver support Signed-off-by: Ryan Nett * Update loading, detect statefulness, use PartitionedCall Signed-off-by: Ryan Nett * Start of dependencies Signed-off-by: Ryan Nett * Support dependencies Signed-off-by: Ryan Nett * Remove unwrapping Signed-off-by: Ryan Nett * Proper attribute setters Signed-off-by: Ryan Nett * Add ignored gradient test Signed-off-by: Ryan Nett * Rebase fix Signed-off-by: Ryan Nett * Op generation for functions Signed-off-by: Ryan Nett * Rebase fix Signed-off-by: Ryan Nett * SavedFunction for running functions from SavedModelBundles Signed-off-by: Ryan Nett * Review fixes Signed-off-by: Ryan Nett * Generation and better javadoc Signed-off-by: Ryan Nett * Rework pointer scopes Signed-off-by: Ryan Nett * SessionFunction instead of SavedModelBundle specific class Signed-off-by: Ryan Nett * Add CallableFunction javadoc Signed-off-by: Ryan Nett * Remove obsolete test Signed-off-by: Ryan Nett * Rebase fix Signed-off-by: Ryan Nett * Formatting fixes and nits Signed-off-by: Ryan Nett * Add session function test, Signature.builder with name Signed-off-by: Ryan Nett * Remove extra synchronization Signed-off-by: Ryan Nett * Formatting Signed-off-by: Ryan Nett * New names Signed-off-by: Ryan Nett * Note on SavedModel functions Signed-off-by: Ryan Nett * Fix tests Signed-off-by: Ryan Nett * Rename name method Signed-off-by: Ryan Nett * Re-add tests w/ SessionFunction Signed-off-by: Ryan Nett * Helper methods for saving Signed-off-by: Ryan Nett --- .../annotations/org/tensorflow/op/Ops.java | 28 + .../internal/c_api/TF_Function.java | 2 +- .../java/org/tensorflow/ConcreteFunction.java | 609 ++++++++++++++---- .../org/tensorflow/EagerOperationBuilder.java | 99 ++- .../java/org/tensorflow/EagerSession.java | 20 + .../org/tensorflow/ExecutionEnvironment.java | 9 + .../src/main/java/org/tensorflow/Graph.java | 106 ++- .../org/tensorflow/GraphOperationBuilder.java | 132 +++- .../java/org/tensorflow/NativeFunction.java | 155 +++++ .../java/org/tensorflow/OperationBuilder.java | 84 ++- .../java/org/tensorflow/SavedModelBundle.java | 225 +++++-- .../src/main/java/org/tensorflow/Session.java | 215 ++++--- .../java/org/tensorflow/SessionFunction.java | 127 ++++ .../main/java/org/tensorflow/Signature.java | 189 ++++-- .../main/java/org/tensorflow/TensorFlow.java | 40 +- .../java/org/tensorflow/TensorFunction.java | 129 ++++ .../internal/c_api/AbstractTF_Function.java | 53 ++ .../internal/c_api/presets/tensorflow.java | 531 +++++++++------ .../java/org/tensorflow/op/core/Function.java | 58 ++ .../org/tensorflow/ConcreteFunctionTest.java | 163 +++-- .../tensorflow/EagerOperationBuilderTest.java | 27 +- .../tensorflow/GraphOperationBuilderTest.java | 27 +- .../org/tensorflow/SavedModelBundleTest.java | 243 +++---- .../test/java/org/tensorflow/SessionTest.java | 114 ++-- .../org/tensorflow/op/core/FunctionTest.java | 67 ++ 25 files changed, 2588 insertions(+), 864 deletions(-) create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/NativeFunction.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SessionFunction.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFunction.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/AbstractTF_Function.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Function.java create mode 100644 tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/FunctionTest.java diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 92e4cabdbd1..c68b6ee8ff7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -19,6 +19,8 @@ import java.nio.charset.Charset; import java.util.List; +import java.util.Map; +import org.tensorflow.ConcreteFunction; import org.tensorflow.DeviceSpec; import org.tensorflow.EagerSession; import org.tensorflow.ExecutionEnvironment; @@ -87,6 +89,7 @@ import org.tensorflow.op.core.ExtractVolumePatches; import org.tensorflow.op.core.Fill; import org.tensorflow.op.core.Fingerprint; +import org.tensorflow.op.core.Function; import org.tensorflow.op.core.Gather; import org.tensorflow.op.core.GatherNd; import org.tensorflow.op.core.GetSessionHandle; @@ -1116,6 +1119,31 @@ public Bucketize bucketize(Operand input, List boundar return Bucketize.create(scope, input, boundaries); } + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. Only works for functions with a single input and output. + * + * @param argument the argument to the call + * @return the output of the function + * @see ConcreteFunction#call(Ops, Operand) + */ + public Operand call(ConcreteFunction function, Operand argument) { + return Function.call(scope, function, argument); + } + + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. + * + * @param arguments the arguments to the call + * @return the outputs of the function + * @see ConcreteFunction#call(Ops, Map) + */ + public Map> call(ConcreteFunction function, + Map> arguments) { + return Function.call(scope, function, arguments); + } + /** * Clips tensor values to a specified min and max. * Given a tensor {@code t}, this operation returns a tensor of the same type and diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java index e370b2f9f08..829d1cede3c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java @@ -13,7 +13,7 @@ // Once created and added to graphs, functions can be invoked by creating an // operation whose operation type matches the function name. @Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) -public class TF_Function extends Pointer { +public class TF_Function extends org.tensorflow.internal.c_api.AbstractTF_Function { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public TF_Function() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ConcreteFunction.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ConcreteFunction.java index 71dc0f7cefc..3e264e0e25d 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ConcreteFunction.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ConcreteFunction.java @@ -1,54 +1,81 @@ -/* - * Copyright 2020 The TensorFlow Authors. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. +/* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= */ package org.tensorflow; -import java.io.IOException; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_FunctionSetAttrValueProto; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_GraphToFunction; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; -import java.util.ListIterator; -import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; +import org.bytedeco.javacpp.BytePointer; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.PointerPointer; +import org.bytedeco.javacpp.PointerScope; +import org.tensorflow.Graph.Reference; +import org.tensorflow.internal.c_api.TF_Function; +import org.tensorflow.internal.c_api.TF_Operation; +import org.tensorflow.internal.c_api.TF_Output; +import org.tensorflow.internal.c_api.TF_Status; import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.Placeholder; +import org.tensorflow.op.core.PlaceholderWithDefault; +import org.tensorflow.proto.framework.AttrValue; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.proto.framework.FunctionDef; +import org.tensorflow.proto.framework.OpDef.ArgDef; import org.tensorflow.proto.framework.SignatureDef; import org.tensorflow.proto.framework.TensorInfo; +import org.tensorflow.proto.framework.TensorShapeProto; +import org.tensorflow.types.TBool; +import org.tensorflow.types.family.TType; /** * A graph that can be invoked as a single function, with an input and output signature. * - *

A function can also invoke a - * tf.function - * defined in a {@link SavedModelBundle}. + *

A function can also invoke a tf.function defined in a {@link + * SavedModelBundle}. * *

{@code
  * ConcreteFunction myFunction = savedModelBundle.function("myFunctionSignatureName");
  * Map outputTensorMap = myFunction.call(inputTensorMap);
  * }
*/ -public class ConcreteFunction implements AutoCloseable { +public class ConcreteFunction implements AutoCloseable, TensorFunction { /** * Creates a function by building a new graph. * - *

The {@code functionBuilder} must initialize the function graph from the provided - * {@link Ops} instance and return a valid signature that will be used to feed the input tensors - * and fetch the output tensors on execution. + *

The {@code functionBuilder} must initialize the function graph from the provided {@link Ops} + * instance and return a valid signature that will be used to feed the input tensors and fetch the + * output tensors on execution. * - *

The function will be the owner of the new graph and its resulting session. Therefore, - * the function must be enclosed properly with a try-with-resources block to guarantee that - * all native resources will be freed once the function is discarded. For example: + *

The function will be the owner of the new graph and its resulting session. Therefore, the + * function must be enclosed properly with a try-with-resources block to guarantee that all native + * resources will be freed once the function is discarded. For example: * *

{@code
    * public class MyModel {
@@ -72,23 +99,19 @@ public class ConcreteFunction implements AutoCloseable {
    * @return the new function
    */
   public static ConcreteFunction create(Function functionBuilder) {
-    Graph graph = new Graph();
-    try {
+    try (Graph graph = new Graph()) {
       Ops tf = Ops.create(graph);
       Signature signature = functionBuilder.apply(tf);
-      return new ConcreteFunction(signature, graph, new Session(graph), Ownership.GRAPH_AND_SESSION);
-    } catch (Exception e) {
-      graph.close();
-      throw e;
+      return buildFromGraph(graph, signature);
     }
   }
 
   /**
    * Create a function from a signature and an existing graph.
    *
-   * 

The function will keep the ownership of the session used to run the graph but not - * the graph itself, meaning that the lifetime of the latter can extend beyond the scope - * of the function. For example: + *

The function will keep the ownership of the session used to run the graph but not the graph + * itself, meaning that the lifetime of the latter can extend beyond the scope of the function. + * For example: * *

{@code
    * try (Graph g = new Graph()) {
@@ -109,15 +132,15 @@ public static ConcreteFunction create(Function functionBuilder)
    * @return a new function
    */
   public static ConcreteFunction create(Signature signature, Graph graph) {
-    return new ConcreteFunction(signature, graph, new Session(graph), Ownership.SESSION_ONLY);
+    return buildFromGraph(graph, signature);
   }
 
   /**
    * Create a function from a signature and a valid graph session.
    *
-   * 

The function will not own the session nor its graph, meaning that their lifetime - * can extend beyond the scope of the function. Therefore the function does not need to be - * closed after its usage. For example: + *

The function will not own the session nor its graph, meaning that their lifetime can extend + * beyond the scope of the function. Therefore the function does not need to be closed after its + * usage. For example: * *

{@code
    * try (Graph g = new Graph()) {
@@ -143,152 +166,480 @@ public static ConcreteFunction create(Signature signature, Graph graph) {
    * @return a new function
    */
   public static ConcreteFunction create(Signature signature, Session session) {
-    return new ConcreteFunction(signature, session.graph(), session, Ownership.NONE);
+    return buildFromGraph(session.graph(), signature);
   }
 
-  /**
-   * Returns the signature of this function
-   */
+  /** Returns the signature of this function */
+  @Override
   public Signature signature() {
     return signature;
   }
 
   /**
-   * Invokes a function.
-   *
-   * 

Caller is responsible for closing all Tensors. + * Get the name of the function definition. This is what it will show up under in the graph and + * any exported GraphDefs, and should be used for anything using tensorflow core directly. + */ + public String getDefinedName() { + return nativeFunction.getName(); + } + + /** Get the {@link FunctionDef} proto. */ + public FunctionDef getFunctionDef() { + return nativeFunction.getFunctionDef(); + } + + /** Get whether the function is stateful. */ + public boolean isStateful() { + return nativeFunction.isStateful(); + } + + Set getDependencies() { + return dependencies; + } + + @Override + public void close() { + scope.close(); + } + + @Override + public String toString() { + return signature.toString(); + } + + // TODO migrate to the actual ops once they are generated + public static final String CALL_OP = "PartitionedCall"; + // TODO migrate to the actual ops once they are generated + public static final String STATEFUL_CALL_OP = "StatefulPartitionedCall"; + + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. * - * @param arguments list of tensors to pass in input to the function, - * mapped by their signature name - * @return output tensors resulting from the execution of the function, - * mapped by their signature name + * @param scope the scope to call the function in + * @param arguments the arguments to the call + * @return the outputs of the function */ - public Map call(Map arguments) - throws IllegalArgumentException { + public Map> call(Scope scope, Map> arguments) { + List> inputList = new ArrayList<>(); - final SignatureDef signatureDef = signature.asSignatureDef(); - final Session.Runner runner = session.runner(); + Output[] inputs = new Output[signature().inputNames().size()]; - signatureDef.getInputsMap().forEach((argName, t) -> { - Tensor tensor = arguments.get(argName); - if (tensor == null) { - throw new IllegalArgumentException(String.format("Missing argument [%s]", argName)); + int i = 0; + for (String inputName : signature().inputNames()) { + if (!arguments.containsKey(inputName)) { + throw new IllegalArgumentException( + "Function " + + signature().methodName() + + " has parameter \"" + + inputName + + "\", but no argument was passed for it."); } - runner.feed(t.getName(), tensor); - }); - Map outputToNode = signatureDef.getOutputsMap(); - outputToNode.values().forEach(t -> runner.fetch(t.getName())); + Operand input = arguments.get(inputName); + if (input == null) { + throw new IllegalArgumentException( + "Can't pass null as an argument to a function. Argument \"" + + inputName + + "\" was null."); + } + inputs[i] = input.asOutput(); + i++; + } - List resultTensors = runner.run(); - try { - ListIterator resultTensorIter = resultTensors.listIterator(); - Map returnMap = new HashMap(); + scope.env().attachFunction(this); + String name = getDefinedName(); - // Use the output names as present in the signature definition - for (String nodeName: outputToNode.keySet()) { - returnMap.put(nodeName, resultTensorIter.next()); - } - return returnMap; + String displayName = Scope.isValidOpName(name) ? name : "FunctionCall"; + + OperationBuilder opBuilder = + scope + .env() + .opBuilder(isStateful() ? STATEFUL_CALL_OP : CALL_OP, scope.makeOpName(displayName)); + + opBuilder.addInputList(inputs); + + opBuilder.setAttr("f", this); + opBuilder.setAttr("Tin", inputDtypes); + opBuilder.setAttr("Tout", outputDtypes); + + opBuilder = scope.apply(opBuilder); + Operation op = opBuilder.build(); + + int numOutputs1 = op.numOutputs(); + List> outputList = new ArrayList<>(signature().outputNames().size()); + + for (i = 0; i < numOutputs1; i++) { + outputList.add(op.output(i)); + } + + Map> namedOutputs = new LinkedHashMap<>(signature().outputNames().size()); - } catch (Exception e) { - // Release tensors before throwing exception - for (Tensor t : resultTensors) { - t.close(); + List outputNames = new ArrayList<>(signature().outputNames()); + for (i = 0; i < outputNames.size(); i++) { + String outputName = outputNames.get(i); + + if (i > outputList.size()) { + throw new IllegalStateException( + "Somehow, not all required outputs were returned from the function"); } - throw e; + + Operand output = outputList.get(i); + namedOutputs.put(outputName, output); } + + return Collections.unmodifiableMap(namedOutputs); } /** - * Invokes a function with a single input and output. - * - *

Caller is responsible for closing all Tensors. + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. Only works for functions with a single input and output. * - * @param tensor input tensor - * @return output tensor - * @throws IllegalArgumentException if there are multiple input or output parameters defined - * in the function + * @param scope the scope to call the function in + * @param argument the argument to the call + * @return the output of the function */ - public Tensor call(Tensor tensor) throws IllegalArgumentException { + public Operand call(Scope scope, Operand argument) { final SignatureDef signatureDef = signature.asSignatureDef(); if (signatureDef.getInputsCount() != 1) { throw new IllegalArgumentException( - String.format("Function [%s] requires multiple inputs", signatureDef.getMethodName())); + String.format("Function [%s] requires multiple inputs", signatureDef.getMethodName())); } - String inputNodeName = signatureDef.getInputsMap().values().iterator().next().getName(); + String inputName = signatureDef.getInputsMap().keySet().iterator().next(); if (signatureDef.getOutputsCount() != 1) { throw new IllegalArgumentException( - String.format("Function [%s] has multiple outputs", signatureDef.getMethodName())); + String.format("Function [%s] has multiple outputs", signatureDef.getMethodName())); } - String outputNodeName = signatureDef.getOutputsMap().values().iterator().next().getName(); + String outputName = signatureDef.getOutputsMap().keySet().iterator().next(); + + Map> inputMap = new LinkedHashMap<>(); + inputMap.put(inputName, argument); + + return call(scope, inputMap).get(outputName); + } + + @Override + public Map call(Map arguments) { + // FIXME need to manage input/output operand lifetimes + Ops tf = Ops.create(); + Map> inputs = new LinkedHashMap<>(arguments.size()); - return session.runner().feed(inputNodeName, tensor).fetch(outputNodeName).run().get(0); + for (String inputName : arguments.keySet()) { + Tensor argument = arguments.get(inputName); + inputs.put(inputName, tf.constantOf((TType) argument)); + } + Map> outputs = tf.call(this, inputs); + Map tensorOutputs = new LinkedHashMap<>(outputs.size()); + for (String outputName : outputs.keySet()) { + tensorOutputs.put(outputName, outputs.get(outputName).asTensor()); + } + return tensorOutputs; } /** - * Export this function as a saved model. - * - *

This method is convenient shortcut equivalent to - * {@code SavedModel.exporter(exportDir).withFunction(this).export()} + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. * - * @param exportDir directory where to export the saved model - * @throws IOException if saved model or variable state cannot be written on disk + * @param tf the scope to call the function in + * @param arguments the arguments to the call + * @return the outputs of the function */ - public void save(String exportDir) throws IOException { - SavedModelBundle.exporter(exportDir).withFunction(this).export(); + public Map> call(Ops tf, Map> arguments) { + return tf.call(this, arguments); } /** - * Returns the session used to execute the graph when calling this function + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. Only works for functions with a single input and output. * - *

In general, a user does not need to handle directly the session of a function and rely - * on {@link #call(Map)} to execute the graph instead. But in some cases, direct access to - * the session might be necessary, as it allows more running options. - * - * @return the function session + * @param tf the scope to call the function in + * @param argument the argument to the call + * @return the output of the function + */ + public Operand call(Ops tf, Operand argument) { + return tf.call(this, argument); + } + + TF_Function nativeHandle() { + if (nativeFunction.getNativeHandle().isNull()) { + throw new IllegalStateException("Function has been closed"); + } + return nativeFunction.getNativeHandle(); + } + + /** All native functions should have deallocators registered */ + ConcreteFunction( + Signature signature, + NativeFunction nativeFunction, + Collection availableFunctions) { + this(signature, nativeFunction, nativeFunction.getAllDependencies(availableFunctions)); + } + + /** + * Detects the signature from the handle. Does not close passed functions. All passed functions + * should have deallocators. */ - public Session session() { - return session; + static ConcreteFunction fromNativeHandle( + NativeFunction nativeFunction, Collection availableFunctions) { + + Signature.Builder builder = + Signature.builder() + .methodName(nativeFunction.getFunctionDef().getSignature().getName()) + .key(nativeFunction.getName()); + + for (ArgDef input : nativeFunction.getFunctionDef().getSignature().getInputArgList()) { + TensorInfo info = + TensorInfo.newBuilder() + .setDtype(input.getType()) + .setTensorShape(TensorShapeProto.newBuilder().setUnknownRank(true).build()) + .setName(input.getName()) + .build(); + + builder.input(input.getName(), info); + } + + for (ArgDef outputDef : nativeFunction.getFunctionDef().getSignature().getOutputArgList()) { + TensorInfo info = + TensorInfo.newBuilder() + .setDtype(outputDef.getType()) + .setTensorShape(TensorShapeProto.newBuilder().setUnknownRank(true).build()) + .setName(outputDef.getName()) + .build(); + + builder.output(outputDef.getName(), info); + } + + return new ConcreteFunction(builder.build(), nativeFunction, availableFunctions); + } + + private final Signature signature; + private final NativeFunction nativeFunction; + private final PointerScope scope; + private final Set dependencies; + private final DataType[] inputDtypes; + private final DataType[] outputDtypes; + + /** All native functions should have deallocators registered */ + private ConcreteFunction( + Signature signature, NativeFunction nativeFunction, Set dependencies) { + this.signature = signature; + this.nativeFunction = nativeFunction; + this.dependencies = Collections.unmodifiableSet(dependencies); + + if (this.signature.getInputs().size() + != nativeFunction.getFunctionDef().getSignature().getInputArgCount()) { + throw new IllegalArgumentException( + "Signature must have the same number of inputs as the native function. Expected " + + nativeFunction.getFunctionDef().getSignature().getInputArgCount() + + ", got " + + this.signature.getInputs().size()); + } + + if (this.signature.getOutputs().size() + != nativeFunction.getFunctionDef().getSignature().getOutputArgCount()) { + throw new IllegalArgumentException( + "New signature must have the same number of outputs as the native function. Expected " + + nativeFunction.getFunctionDef().getSignature().getOutputArgCount() + + ", got " + + this.signature.getOutputs().size()); + } + + inputDtypes = + this.signature.getInputs().values().stream().map(x -> x.dataType).toArray(DataType[]::new); + + List inputs = Arrays.asList(inputDtypes); + List nativeInputs = + nativeFunction.getFunctionDef().getSignature().getInputArgList().stream() + .map(ArgDef::getType) + .collect(Collectors.toList()); + + if (!dataTypesMatch(inputs, nativeInputs)) { + throw new IllegalArgumentException( + "Data types of the signature's inputs must match the native function's (in order). Expected " + + nativeInputs + + ", got " + + inputs); + } + + outputDtypes = + signature().getOutputs().values().stream().map(x -> x.dataType).toArray(DataType[]::new); + + List outputs = Arrays.asList(outputDtypes); + List nativeOutputs = + nativeFunction.getFunctionDef().getSignature().getOutputArgList().stream() + .map(ArgDef::getType) + .collect(Collectors.toList()); + + if (!dataTypesMatch(outputs, nativeOutputs)) { + throw new IllegalArgumentException( + "Data types of the signature's outputs must match the native function's (in order). Expected " + + nativeOutputs + + ", got " + + outputs); + } + + try (PointerScope scope = new PointerScope()) { + this.scope = scope; + scope.extend(); + scope.attach(this.nativeFunction.getNativeHandle()); + this.dependencies.forEach(scope::attach); + } } /** - * Returns the graph of this function + * FIXME: This causes native errors when I use it (Linux GPU, 6.1 CC), but I'm leaving it because + * how to enable XLA JIT is extremely non-obvious. + * + *

Causes {@code OP_REQUIRES failed at xla_ops.cc:363 : Not found: could not find registered + * platform with id: 0x7f75af03e6e8} (it's a warning, but the resulting TF_Status fails). */ - public Graph graph() { - return graph; + private void makeJit() { + try (PointerScope scope = new PointerScope()) { + byte[] bytes = AttrValue.newBuilder().setB(true).build().toByteArray(); + BytePointer trueValue = new BytePointer(bytes); + + TF_Status status1 = TF_Status.newStatus(); + TF_FunctionSetAttrValueProto( + nativeHandle(), "_XlaMustCompile", trueValue, bytes.length, status1); + status1.throwExceptionIfNotOK(); + + TF_Status status2 = TF_Status.newStatus(); + TF_FunctionSetAttrValueProto(nativeHandle(), "_noinline", trueValue, bytes.length, status2); + status2.throwExceptionIfNotOK(); + } } - @Override - public void close() { - if (ownership != Ownership.NONE) { - session.close(); - if (ownership == Ownership.GRAPH_AND_SESSION) { - graph.close(); + private static boolean dataTypesMatch(List a, List b) { + if (a.size() != b.size()) { + return false; + } + + for (int i = 0; i < a.size(); i++) { + DataType aType = a.get(i); + DataType bType = b.get(i); + + if (aType != DataType.DT_INVALID && bType != DataType.DT_INVALID && !a.equals(b)) { + return false; } } + + return true; } - @Override - public String toString() { - return signature.toString(); + private static TF_Operation outputHandle(Operand operand) { + if (operand == null) { + throw new NullPointerException("Can't get output handle for null operand"); + } + + Pointer handle = operand.asOutput().getUnsafeNativeHandle(); + if (handle.isNull()) { + throw new NullPointerException("Native handle of operand is null, has it been closed?"); + } + + if (!(handle instanceof TF_Operation)) { + throw new IllegalArgumentException("Operand was not a graph operand"); + } + + return (TF_Operation) handle; } - private enum Ownership { - GRAPH_AND_SESSION, SESSION_ONLY, NONE; + private static TF_Output resolveToOutput(Graph graph, List> operands) { + TF_Output handles = new TF_Output(operands.size()); + for (int i = 0; i < operands.size(); i++) { + Operand input = operands.get(i); + graph.checkInput(input); + TF_Operation handle = outputHandle(input); + + handles.position(i).oper(handle).index(input.asOutput().index()); + } + handles.position(0); + return handles; } - private final Graph graph; - private final Session session; - private final Signature signature; - private final Ownership ownership; + private static ConcreteFunction buildFromGraph(Graph graph, Signature signature) { + try (PointerScope scope = new PointerScope(); + Reference ref = graph.ref()) { + TF_Status status = TF_Status.newStatus(); - ConcreteFunction(Signature signature, Graph graph, Session session, Ownership ownership) { - this.graph = graph; - this.session = session; - this.signature = signature; - this.ownership = ownership; + List> inputs = + signature.getInputs().entrySet().stream() + .map( + (x) -> + TensorFunction.validateDescription(x.getValue(), graph, x.getKey(), "Input")) + .collect(Collectors.toList()); + + List> outputs = + signature.getOutputs().entrySet().stream() + .map( + (x) -> + TensorFunction.validateDescription(x.getValue(), graph, x.getKey(), "Output")) + .collect(Collectors.toList()); + + List ops = + new ArrayList<>(graph.completeSubgraph(new HashSet<>(inputs), new HashSet<>(outputs))); + + inputs.forEach(input -> ops.remove((GraphOperation) input.op())); + + ops.forEach( + x -> { + if (x.type().equals(Placeholder.OP_NAME) + || x.type().equals(PlaceholderWithDefault.OP_NAME)) { + throw new IllegalArgumentException( + "Can't calculate outputs (" + + outputs + + ") from inputs (" + + inputs + + "), " + + "they also depend on \"" + + x + + "\""); + } + }); + + // Python sometimes has NoOps as outputs + Ops tf = Ops.create(graph).withSubScope("functionControlOutputs"); + for (int i = 0; i < outputs.size(); i++) { + Operand output = outputs.get(i); + if (output.op().numOutputs() < 1) { + Operand realOutput = + tf.withControlDependencies(Collections.singletonList(output)) + .withName(output.op().name() + "_control") + .constant(false); + ops.add((GraphOperation) realOutput.op()); + outputs.set(i, realOutput); + } + } + + PointerPointer operations = new PointerPointer<>(ops.size()); + for (int i = 0; i < ops.size(); i++) { + operations.put(i, ops.get(i).getUnsafeNativeHandle()); + } + + TF_Function handle = + TF_GraphToFunction( + ref.nativeHandle(), + new BytePointer(signature.key()), + (byte) 1, + ops.size(), + operations, + inputs.size(), + resolveToOutput(graph, inputs), + outputs.size(), + resolveToOutput(graph, outputs), + null, + null, + new BytePointer( + signature.methodName() != null + ? signature.methodName() + : "Method " + signature.key()), + status); + + handle.withDeallocator(); + status.throwExceptionIfNotOK(); + return new ConcreteFunction( + signature, new NativeFunction(handle), graph.getNativeFunctions(scope)); + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperationBuilder.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperationBuilder.java index f1dd6216a79..e3283ee2ab3 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperationBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperationBuilder.java @@ -1,18 +1,18 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_Execute; @@ -22,6 +22,8 @@ import static org.tensorflow.internal.c_api.global.tensorflow.TFE_OpSetAttrBoolList; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_OpSetAttrFloat; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_OpSetAttrFloatList; +import static org.tensorflow.internal.c_api.global.tensorflow.TFE_OpSetAttrFunctionList; +import static org.tensorflow.internal.c_api.global.tensorflow.TFE_OpSetAttrFunctionName; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_OpSetAttrInt; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_OpSetAttrIntList; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_OpSetAttrShape; @@ -35,6 +37,9 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; import org.bytedeco.javacpp.BooleanPointer; import org.bytedeco.javacpp.BytePointer; import org.bytedeco.javacpp.IntPointer; @@ -88,7 +93,8 @@ public EagerOperationBuilder addInputList(Output[] inputs) { @Override public OperationBuilder addControlInput(Operation control) { - // No-op. Any operations passed to this method will already be evaluated (b/c eager evaluation). + // No-op. Any operations passed to this method will already be evaluated (b/c eager + // evaluation). return this; } @@ -217,15 +223,35 @@ public EagerOperationBuilder setAttr(String name, Shape[] values) { return this; } + @Override + public OperationBuilder setAttr(String name, ConcreteFunction value) { + session.attachFunction(value); + setAttrFunctionName(opHandle, name, value.getDefinedName()); + return this; + } + + @Override + public OperationBuilder setAttr(String name, ConcreteFunction[] value) { + for (ConcreteFunction fn : value) { + session.attachFunction(fn); + } + + setAttrFunctionList( + opHandle, + session.nativeHandle(), + name, + Arrays.stream(value).map(ConcreteFunction::getDefinedName).collect(Collectors.toList())); + + return this; + } + private TFE_Op opHandle; private final EagerSession session; private final String type; private final String name; - /** - * This value should be >= to the maximum number of outputs in any op - */ + /** This value should be >= to the maximum number of outputs in any op */ private static final int MAX_OUTPUTS_PER_OP = 1000; private static void requireOp(TFE_Op handle) { @@ -267,7 +293,8 @@ private static TFE_TensorHandle[] execute(TFE_Op opHandle, EagerSession session) requireOp(opHandle); try (PointerScope scope = new PointerScope()) { IntPointer numRetvals = new IntPointer(1).put(MAX_OUTPUTS_PER_OP); - PointerPointer retvals = new PointerPointer(MAX_OUTPUTS_PER_OP); + PointerPointer retvals = + new PointerPointer(MAX_OUTPUTS_PER_OP); TF_Status status = TF_Status.newStatus(); TFE_Execute(opHandle, retvals, numRetvals, status); status.throwExceptionIfNotOK(); @@ -294,7 +321,8 @@ private static void addInput(TFE_Op opHandle, TFE_TensorHandle tensorHandle) { private static void addInputList(TFE_Op opHandle, TFE_TensorHandle[] tensorHandles) { requireOp(opHandle); try (PointerScope scope = new PointerScope()) { - PointerPointer tensorPointers = new PointerPointer(tensorHandles.length); + PointerPointer tensorPointers = + new PointerPointer(tensorHandles.length); for (int i = 0; i < tensorHandles.length; ++i) { requireTensorHandle(tensorHandles[i]); tensorPointers.put(i, tensorHandles[i]); @@ -363,7 +391,8 @@ private static void setAttrBool(TFE_Op opHandle, String name, boolean value) { private static void setAttrBoolList(TFE_Op opHandle, String name, boolean[] values) { requireOp(opHandle); try (PointerScope scope = new PointerScope()) { - TFE_OpSetAttrBoolList(opHandle, name, new BytePointer(new BooleanPointer(values)), values.length); + TFE_OpSetAttrBoolList( + opHandle, name, new BytePointer(new BooleanPointer(values)), values.length); } } @@ -408,8 +437,36 @@ private static void setAttrShapeList(TFE_Op opHandle, String name, long[] shapes shapesPointer.position(shapesPointer.position() + numDims[i] * 8); } TF_Status status = TF_Status.newStatus(); - TFE_OpSetAttrShapeList(opHandle, new BytePointer(name), shapesPointers, new IntPointer(numDims), - numDims.length, status); + TFE_OpSetAttrShapeList( + opHandle, + new BytePointer(name), + shapesPointers, + new IntPointer(numDims), + numDims.length, + status); + } + } + + private static void setAttrFunctionName(TFE_Op opHandle, String attrName, String functionName) { + requireOp(opHandle); + try (PointerScope scope = new PointerScope()) { + TFE_OpSetAttrFunctionName(opHandle, attrName, functionName, functionName.length()); + } + } + + private static void setAttrFunctionList( + TFE_Op opHandle, TFE_Context context, String attrName, List functionNames) { + requireOp(opHandle); + requireContext(context); + try (PointerScope scope = new PointerScope()) { + PointerPointer fns = new PointerPointer<>(functionNames.size()); + for (int i = 0; i < functionNames.size(); i++) { + TF_Status status = TF_Status.newStatus(); + TFE_Op op = TFE_Op.newOp(context, functionNames.get(i), status); + status.throwExceptionIfNotOK(); + fns.put(i, op); + } + TFE_OpSetAttrFunctionList(opHandle, new BytePointer(attrName), fns, functionNames.size()); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java index c5d67128406..84fe7675c40 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java @@ -15,6 +15,7 @@ */ package org.tensorflow; +import static org.tensorflow.internal.c_api.global.tensorflow.TFE_ContextAddFunction; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_ContextOptionsSetAsync; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_ContextOptionsSetConfig; import static org.tensorflow.internal.c_api.global.tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy; @@ -284,6 +285,25 @@ public OperationBuilder opBuilder(String type, String name) { return new EagerOperationBuilder(this, type, name); } + @Override + public void attachFunction(ConcreteFunction function) { + checkSession(); + try (PointerScope scope = new PointerScope()) { + TF_Status status = TF_Status.newStatus(); + TFE_ContextAddFunction(nativeHandle, function.nativeHandle(), status); + status.throwExceptionIfNotOK(); + + function + .getDependencies() + .forEach( + fn -> { + TF_Status status2 = TF_Status.newStatus(); + TFE_ContextAddFunction(nativeHandle, fn, status2); + status2.throwExceptionIfNotOK(); + }); + } + } + @Override public Types environmentType() { return Types.EAGER; diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java index a18c7fff38b..6f50aeafe98 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java @@ -37,6 +37,15 @@ enum Types { */ OperationBuilder opBuilder(String type, String name); + /** + * Attach the function and its dependencies to this execution environment, allowing it to be + * called. + * + *

Done automatically in the {@link org.tensorflow.op.Ops#call(ConcreteFunction, + * java.util.Map)} ops. + */ + void attachFunction(ConcreteFunction function); + /** * Returns true if the given operation is valid in this execution environment. * diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java index b69fe89da0a..f3e712492b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java @@ -18,8 +18,11 @@ import static org.tensorflow.internal.c_api.global.tensorflow.TF_AddGradientsWithPrefix; import static org.tensorflow.internal.c_api.global.tensorflow.TF_DeleteGraph; import static org.tensorflow.internal.c_api.global.tensorflow.TF_FinishWhile; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_GraphCopyFunction; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_GraphGetFunctions; import static org.tensorflow.internal.c_api.global.tensorflow.TF_GraphImportGraphDef; import static org.tensorflow.internal.c_api.global.tensorflow.TF_GraphNextOperation; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_GraphNumFunctions; import static org.tensorflow.internal.c_api.global.tensorflow.TF_GraphOperationByName; import static org.tensorflow.internal.c_api.global.tensorflow.TF_GraphToGraphDef; import static org.tensorflow.internal.c_api.global.tensorflow.TF_ImportGraphDefOptionsSetPrefix; @@ -39,10 +42,12 @@ import java.util.stream.Collectors; import org.bytedeco.javacpp.BytePointer; import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.PointerPointer; import org.bytedeco.javacpp.PointerScope; import org.bytedeco.javacpp.SizeTPointer; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.c_api.TF_Buffer; +import org.tensorflow.internal.c_api.TF_Function; import org.tensorflow.internal.c_api.TF_Graph; import org.tensorflow.internal.c_api.TF_ImportGraphDefOptions; import org.tensorflow.internal.c_api.TF_Operation; @@ -378,6 +383,95 @@ public GraphOperationBuilder opBuilder(String type, String name) { return new GraphOperationBuilder(this, type, name); } + @Override + public void attachFunction(ConcreteFunction function) { + try (Reference ref = ref(); + PointerScope scope = new PointerScope()) { + TF_Status status = TF_Status.newStatus(); + TF_GraphCopyFunction(ref.nativeHandle(), function.nativeHandle(), null, status); + status.throwExceptionIfNotOK(); + + function + .getDependencies() + .forEach( + x -> { + TF_Status status2 = TF_Status.newStatus(); + TF_GraphCopyFunction(ref.nativeHandle(), x, null, status2); + status2.throwExceptionIfNotOK(); + }); + } + } + + /** + * Get the graph's functions. + * + * @param outerScope the pointer scope to attach the functions to. + */ + List getNativeFunctions(PointerScope outerScope) { + try (Reference ref = ref(); + PointerScope scope = new PointerScope()) { + TF_Status status = TF_Status.newStatus(); + + int numFunctions = TF_GraphNumFunctions(ref.nativeHandle()); + + PointerPointer output = new PointerPointer<>(numFunctions); + + TF_GraphGetFunctions(ref.nativeHandle(), output, numFunctions, status); + status.throwExceptionIfNotOK(); + + List funcs = new ArrayList<>(numFunctions); + for (int i = 0; i < numFunctions; i++) { + TF_Function function = output.get(TF_Function.class, i); + + function.withDeallocator(); + outerScope.attach(function); + + funcs.add(new NativeFunction(function)); + } + + return funcs; + } + } + + /** + * Get the function attached to the graph with the given native name. Returns {@code null} if none + * found. + * + * @param key the name of the native function. Note that this may include an argument hash. + * @return the found {@link ConcreteFunction}, or {@code null} if none were found with the correct + * name + */ + public ConcreteFunction getFunction(String key) { + try (Reference ref = ref(); + PointerScope scope = new PointerScope()) { + List funcs = getNativeFunctions(scope); + + for (NativeFunction f : funcs) { + + if (f.getName().equals(key)) { + return ConcreteFunction.fromNativeHandle(f, funcs); + } + } + } + return null; + } + + /** + * Get the functions attached to the graph. + * + * @return all functions attached to this graph. + */ + public List getFunctions() { + try (Reference ref = ref(); + PointerScope scope = new PointerScope()) { + List funcs = getNativeFunctions(scope); + + return funcs.stream() + .map(x -> ConcreteFunction.fromNativeHandle(x, funcs)) + .collect(Collectors.toList()); + } + } + @Override public Types environmentType() { return Types.GRAPH; @@ -1077,12 +1171,20 @@ private static SaverDef addVariableSaver(Graph graph) { } } + Placeholder saveFilename = tf.withName("filename").placeholder(TString.class); + + if (varNames.isEmpty()) { + return SaverDef.newBuilder() + .setFilenameTensorName(saveFilename.op().name()) + .setSaveTensorName(tf.withName("empty_save").identity(saveFilename).op().name()) + .setRestoreOpName(tf.withName("restore_all").noOp().op().name()) + .build(); + } + // FIXME Need an easier way to initialize an NdArray from a list String[] tmp = new String[varNames.size()]; Constant varNamesTensor = tf.constant(StdArrays.ndCopyOf(varNames.toArray(tmp))); Operand varSlices = tf.zerosLike(varNamesTensor); - - Placeholder saveFilename = tf.withName("filename").placeholder(TString.class); Save saveVariables = tf.train.save(saveFilename, varNamesTensor, varSlices, varOutputs); Identity id = tf.withControlDependencies(Arrays.asList(saveFilename, saveVariables)) diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperationBuilder.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperationBuilder.java index 72858ece572..53ab50db4b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperationBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperationBuilder.java @@ -1,18 +1,18 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.tensorflow.internal.c_api.global.tensorflow.TF_AddControlInput; @@ -24,6 +24,7 @@ import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrBoolList; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrFloat; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrFloatList; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrFuncName; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrInt; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrIntList; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrShape; @@ -34,9 +35,13 @@ import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrTensorList; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrType; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrTypeList; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrValueProto; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetDevice; import java.nio.charset.Charset; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; import org.bytedeco.javacpp.BooleanPointer; import org.bytedeco.javacpp.BytePointer; import org.bytedeco.javacpp.IntPointer; @@ -45,6 +50,7 @@ import org.bytedeco.javacpp.PointerPointer; import org.bytedeco.javacpp.PointerScope; import org.bytedeco.javacpp.SizeTPointer; +import org.tensorflow.Graph.Reference; import org.tensorflow.internal.c_api.TF_Graph; import org.tensorflow.internal.c_api.TF_Operation; import org.tensorflow.internal.c_api.TF_OperationDescription; @@ -52,11 +58,12 @@ import org.tensorflow.internal.c_api.TF_Status; import org.tensorflow.internal.c_api.TF_Tensor; import org.tensorflow.ndarray.Shape; +import org.tensorflow.proto.framework.AttrValue; +import org.tensorflow.proto.framework.AttrValue.ListValue; import org.tensorflow.proto.framework.DataType; +import org.tensorflow.proto.framework.NameAttrList; -/** - * An {@link OperationBuilder} for adding {@link GraphOperation}s to a {@link Graph}. - */ +/** An {@link OperationBuilder} for adding {@link GraphOperation}s to a {@link Graph}. */ public final class GraphOperationBuilder implements OperationBuilder { GraphOperationBuilder(Graph graph, String type, String name) { @@ -94,7 +101,8 @@ public GraphOperationBuilder addControlInput(Operation control) { } if (control.env() != graph) { - throw new IllegalArgumentException("Control input " + control + " was from a different graph, can't use."); + throw new IllegalArgumentException( + "Control input " + control + " was from a different graph, can't use."); } Graph.Reference r = graph.ref(); @@ -344,6 +352,30 @@ public GraphOperationBuilder setAttr(String name, String[] value) { return this; } + @Override + public OperationBuilder setAttr(String name, ConcreteFunction value) { + graph.attachFunction(value); + try (Reference r = graph.ref()) { + setAttrFunctionName(unsafeNativeHandle, name, value.getDefinedName()); + } + return this; + } + + @Override + public OperationBuilder setAttr(String name, ConcreteFunction[] value) { + for (ConcreteFunction f : value) { + graph.attachFunction(f); + } + + try (Reference r = graph.ref()) { + setAttrFunctionList( + unsafeNativeHandle, + name, + Arrays.stream(value).map(ConcreteFunction::getDefinedName).collect(Collectors.toList())); + } + return this; + } + private TF_OperationDescription unsafeNativeHandle; private Graph graph; @@ -394,11 +426,16 @@ private static void addInput(TF_OperationDescription handle, TF_Operation opHand } } - private static void addInputList(TF_OperationDescription handle, TF_Operation[] opHandles, int[] indices) { + private static void addInputList( + TF_OperationDescription handle, TF_Operation[] opHandles, int[] indices) { requireHandle(handle); if (indices.length != opHandles.length) { - throw new IllegalArgumentException("mismatch in number of Operations (" - + opHandles.length + ") and output indices (" + indices.length + ") provided"); + throw new IllegalArgumentException( + "mismatch in number of Operations (" + + opHandles.length + + ") and output indices (" + + indices.length + + ") provided"); } try (PointerScope scope = new PointerScope()) { @@ -412,8 +449,8 @@ private static void addInputList(TF_OperationDescription handle, TF_Operation[] private static void addControlInput(TF_OperationDescription handle, TF_Operation opHandle) { if (opHandle == null || opHandle.isNull()) { - throw new IllegalStateException("control input is not valid, " - + "perhaps the Graph containing it has been closed()?"); + throw new IllegalStateException( + "control input is not valid, " + "perhaps the Graph containing it has been closed()?"); } requireHandle(handle); TF_AddControlInput(handle, opHandle); @@ -459,7 +496,8 @@ private static void setAttrBool(TF_OperationDescription handle, String name, boo TF_SetAttrBool(handle, name, (byte) (value ? 1 : 0)); } - private static void setAttrBoolList(TF_OperationDescription handle, String name, boolean[] value) { + private static void setAttrBoolList( + TF_OperationDescription handle, String name, boolean[] value) { requireHandle(handle); try (PointerScope scope = new PointerScope()) { TF_SetAttrBoolList(handle, name, new BytePointer(new BooleanPointer(value)), value.length); @@ -476,7 +514,8 @@ private static void setAttrTypeList(TF_OperationDescription handle, String name, TF_SetAttrTypeList(handle, name, type, type.length); } - private static void setAttrTensor(TF_OperationDescription handle, String name, TF_Tensor tensorHandle) { + private static void setAttrTensor( + TF_OperationDescription handle, String name, TF_Tensor tensorHandle) { requireHandle(handle); requireTensor(tensorHandle); @@ -487,7 +526,8 @@ private static void setAttrTensor(TF_OperationDescription handle, String name, T } } - private static void setAttrTensorList(TF_OperationDescription handle, String name, TF_Tensor[] tensorHandles) { + private static void setAttrTensorList( + TF_OperationDescription handle, String name, TF_Tensor[] tensorHandles) { requireHandle(handle); try (PointerScope scope = new PointerScope()) { @@ -498,12 +538,14 @@ private static void setAttrTensorList(TF_OperationDescription handle, String nam } TF_Status status = TF_Status.newStatus(); - TF_SetAttrTensorList(handle, new BytePointer(name), tensors.position(0), tensorHandles.length, status); + TF_SetAttrTensorList( + handle, new BytePointer(name), tensors.position(0), tensorHandles.length, status); status.throwExceptionIfNotOK(); } } - private static void setAttrShape(TF_OperationDescription handle, String name, long[] shape, int numDims) { + private static void setAttrShape( + TF_OperationDescription handle, String name, long[] shape, int numDims) { requireHandle(handle); // num_dims and env->GetArrayLength(shape) are assumed to be consistent. @@ -511,7 +553,8 @@ private static void setAttrShape(TF_OperationDescription handle, String name, lo TF_SetAttrShape(handle, name, shape, numDims); } - private static void setAttrShapeList(TF_OperationDescription handle, String name, long[] shapes, int[] numDims) { + private static void setAttrShapeList( + TF_OperationDescription handle, String name, long[] shapes, int[] numDims) { requireHandle(handle); try (PointerScope scope = new PointerScope()) { @@ -521,11 +564,13 @@ private static void setAttrShapeList(TF_OperationDescription handle, String name shapesPointers.put(i, shapesPointer); shapesPointer.position(shapesPointer.position() + numDims[i] * 8); } - TF_SetAttrShapeList(handle, new BytePointer(name), shapesPointers, new IntPointer(numDims), numDims.length); + TF_SetAttrShapeList( + handle, new BytePointer(name), shapesPointers, new IntPointer(numDims), numDims.length); } } - private static void setAttrStringList(TF_OperationDescription handle, String name, byte[][] value) { + private static void setAttrStringList( + TF_OperationDescription handle, String name, byte[][] value) { requireHandle(handle); try (PointerScope scope = new PointerScope()) { @@ -539,4 +584,33 @@ private static void setAttrStringList(TF_OperationDescription handle, String nam TF_SetAttrStringList(handle, new BytePointer(name), valuePointers, lengths, value.length); } } + + private static void setAttrFunctionName( + TF_OperationDescription opHandle, String attrName, String functionName) { + requireHandle(opHandle); + try (PointerScope scope = new PointerScope()) { + TF_SetAttrFuncName(opHandle, attrName, functionName, functionName.length()); + } + } + + private static void setAttrFunctionList( + TF_OperationDescription opHandle, String attrName, List functionNames) { + requireHandle(opHandle); + try (PointerScope scope = new PointerScope()) { + TF_Status status = TF_Status.newStatus(); + AttrValue value = + AttrValue.newBuilder() + .setList( + ListValue.newBuilder() + .addAllFunc( + functionNames.stream() + .map(x -> NameAttrList.newBuilder().setName(x).build()) + .collect(Collectors.toList())) + .build()) + .build(); + byte[] bytes = value.toByteArray(); + TF_SetAttrValueProto(opHandle, attrName, new BytePointer(bytes), bytes.length, status); + status.throwExceptionIfNotOK(); + } + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/NativeFunction.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/NativeFunction.java new file mode 100644 index 00000000000..faab6dbca7b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/NativeFunction.java @@ -0,0 +1,155 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ +package org.tensorflow; + +import static org.tensorflow.internal.c_api.global.tensorflow.TF_FunctionName; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_FunctionToFunctionDef; + +import com.google.protobuf.InvalidProtocolBufferException; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.stream.Collectors; +import org.bytedeco.javacpp.PointerScope; +import org.tensorflow.internal.c_api.TF_Buffer; +import org.tensorflow.internal.c_api.TF_Function; +import org.tensorflow.internal.c_api.TF_Status; +import org.tensorflow.proto.framework.FunctionDef; +import org.tensorflow.proto.framework.NodeDef; + +/** + * A class holding a native function handle and providing cached access to it's {@link FunctionDef}. + */ +class NativeFunction { + public NativeFunction(TF_Function nativeHandle) { + this.nativeHandle = nativeHandle; + } + + /** Get the native handle. No guarantees about liveness are made. */ + public TF_Function getNativeHandle() { + return nativeHandle; + } + + /** Get the function's {@link FunctionDef} */ + public synchronized FunctionDef getFunctionDef() { + if (functionDef == null) { + try (PointerScope scope = new PointerScope()) { + TF_Buffer funcDefBuffer = TF_Buffer.newBuffer(); + TF_Status status = TF_Status.newStatus(); + + TF_FunctionToFunctionDef(nativeHandle, funcDefBuffer, status); + status.throwExceptionIfNotOK(); + + try { + functionDef = FunctionDef.parseFrom(funcDefBuffer.dataAsByteBuffer()); + } catch (InvalidProtocolBufferException e) { + throw new IllegalStateException("Failed to parse FunctionDef proto", e); + } + } + } + + return functionDef; + } + + /** Get the first-level dependencies of the function. */ + public synchronized List getDependencies() { + if (dependencies == null) { + Set deps = new LinkedHashSet<>(); + + for (NodeDef node : getFunctionDef().getNodeDefList()) { + if (node.getOp().equals(ConcreteFunction.CALL_OP) + || node.getOp().equals(ConcreteFunction.STATEFUL_CALL_OP)) { + deps.add(node.getAttrMap().get("f").getFunc().getName()); + } + } + dependencies = Collections.unmodifiableList(new ArrayList<>(deps)); + } + + return dependencies; + } + + /** Get whether the function is stateful (whether it has stateful ops). */ + public synchronized boolean isStateful() { + if (stateful == null) { + stateful = + getFunctionDef().getSignature().getIsStateful() + || getFunctionDef().getNodeDefList().stream() + .anyMatch(x -> TensorFlow.isOpStateful(x.getOp())); + } + return stateful; + } + + /** Get the name of the function. */ + public synchronized String getName() { + if (name == null) { + try (PointerScope scope = new PointerScope()) { + return TF_FunctionName(nativeHandle).getString(); + } + } + + return name; + } + + synchronized Set getAllDependencies(Collection availableFunctions) { + Map fnMap = + availableFunctions.stream().collect(Collectors.toMap(NativeFunction::getName, e -> e)); + Set done = new LinkedHashSet<>(1 + getDependencies().size()); + + Queue todo = new ArrayDeque<>(1 + getDependencies().size()); + todo.add(this); + + while (!todo.isEmpty()) { + NativeFunction next = todo.remove(); + + if (!done.add(next.getName())) { + continue; + } + + for (String dep : next.getDependencies()) { + if (!done.contains(dep)) { + NativeFunction fn = fnMap.get(dep); + + if (fn == null) { + throw new IllegalStateException( + "Function " + dep + " is required, but not present in graph."); + } + + todo.add(fn); + } + } + } + + done.remove(getName()); + + return done.stream() + .map(fnMap::get) + .map(NativeFunction::getNativeHandle) + .collect(Collectors.toSet()); + } + + private final TF_Function nativeHandle; + + private FunctionDef functionDef = null; + private List dependencies = null; + private Boolean stateful = null; + private String name = null; +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/OperationBuilder.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/OperationBuilder.java index a487d8b9237..569f37c8f4a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/OperationBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/OperationBuilder.java @@ -1,18 +1,18 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import org.tensorflow.ndarray.Shape; @@ -49,7 +49,7 @@ public interface OperationBuilder { * *

The OperationBuilder is not usable after build() returns. */ - Operation build(); + Operation build(); /** * Add the output of another operation as the next input of the operation being built. @@ -57,7 +57,7 @@ public interface OperationBuilder { * @param input {@link Output} supposed to be the input of the operation being built. * @return the OperationBuilder instance for chaining. */ - OperationBuilder addInput(Output input); + OperationBuilder addInput(Output input); /** * Add the outputs of another operation as the next inputs of the operation being built. @@ -65,7 +65,7 @@ public interface OperationBuilder { * @param inputs list of {@link Output} supposed to be the inputs of the operation being built. * @return the OperationBuilder instance for chaining. */ - OperationBuilder addInputList(Output[] inputs); + OperationBuilder addInputList(Output[] inputs); /** * Ensure that the operation does not execute before the control operation does. @@ -80,7 +80,7 @@ public interface OperationBuilder { * @param control operation that must be executed before running this operation. * @return the OperationBuilder instance for chaining. */ - OperationBuilder addControlInput(Operation control); + OperationBuilder addControlInput(Operation control); /** * Set the device requested for computing the operation being built. @@ -88,7 +88,7 @@ public interface OperationBuilder { * @param device the requested device, as a string * @return the OperationBuilder instance for chaining. */ - OperationBuilder setDevice(String device); + OperationBuilder setDevice(String device); /** * Set the string values of an attribute of the operation being built. @@ -97,7 +97,7 @@ public interface OperationBuilder { * @param value attribute values * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, String[] value); + OperationBuilder setAttr(String name, String[] value); /** * Set the string value of an attribute of the operation being built. @@ -106,7 +106,7 @@ public interface OperationBuilder { * @param value attribute value * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, String value); + OperationBuilder setAttr(String name, String value); /** * Set the byte values of an attribute of the operation being built. @@ -115,7 +115,7 @@ public interface OperationBuilder { * @param value attribute values * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, byte[] value); + OperationBuilder setAttr(String name, byte[] value); /** * Set the long value of an attribute of the operation being built. @@ -124,7 +124,7 @@ public interface OperationBuilder { * @param value attribute value * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, long value); + OperationBuilder setAttr(String name, long value); /** * Set the long values of an attribute of the operation being built. @@ -133,7 +133,7 @@ public interface OperationBuilder { * @param value attribute values * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, long[] value); + OperationBuilder setAttr(String name, long[] value); /** * Set the float value of an attribute of the operation being built. @@ -142,7 +142,7 @@ public interface OperationBuilder { * @param value attribute value * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, float value); + OperationBuilder setAttr(String name, float value); /** * Set the float values of an attribute of the operation being built. @@ -151,7 +151,7 @@ public interface OperationBuilder { * @param value attribute values * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, float[] value); + OperationBuilder setAttr(String name, float[] value); /** * Set the boolean value of an attribute of the operation being built. @@ -160,7 +160,7 @@ public interface OperationBuilder { * @param value attribute value * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, boolean value); + OperationBuilder setAttr(String name, boolean value); /** * Set the boolean values of an attribute of the operation being built. @@ -169,7 +169,7 @@ public interface OperationBuilder { * @param value attribute values * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, boolean[] value); + OperationBuilder setAttr(String name, boolean[] value); /** * Set the type value of an attribute of the operation being built. @@ -178,7 +178,7 @@ public interface OperationBuilder { * @param value attribute value * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, DataType value); + OperationBuilder setAttr(String name, DataType value); /** * Set the type values of an attribute of the operation being built. @@ -187,7 +187,7 @@ public interface OperationBuilder { * @param value attribute values * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, DataType[] value); + OperationBuilder setAttr(String name, DataType[] value); /** * Set the tensor value of an attribute of the operation being built. @@ -196,7 +196,7 @@ public interface OperationBuilder { * @param value attribute value * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, Tensor value); + OperationBuilder setAttr(String name, Tensor value); /** * Set the tensor values of an attribute of the operation being built. @@ -205,7 +205,7 @@ public interface OperationBuilder { * @param value attribute values * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, Tensor[] value); + OperationBuilder setAttr(String name, Tensor[] value); /** * Set the shape value of an attribute of the operation being built. @@ -214,7 +214,7 @@ public interface OperationBuilder { * @param value attribute value * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, Shape value); + OperationBuilder setAttr(String name, Shape value); /** * Set the shape values of an attribute of the operation being built. @@ -223,5 +223,25 @@ public interface OperationBuilder { * @param value attribute values * @return the OperationBuilder instance for chaining. */ - OperationBuilder setAttr(String name, Shape[] value); + OperationBuilder setAttr(String name, Shape[] value); + + /** + * Set the function value of an attribute of the operation being built. Also attaches the function + * and dependencies to the execution environment. + * + * @param name attribute name + * @param value attribute value + * @return the OperationBuilder instance for chaining. + */ + OperationBuilder setAttr(String name, ConcreteFunction value); + + /** + * Set the function values of an attribute of the operation being built. Also attaches the + * functions and dependencies to the execution environment. + * + * @param name attribute name + * @param value attribute value + * @return the OperationBuilder instance for chaining. + */ + OperationBuilder setAttr(String name, ConcreteFunction[] value); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java index 6992e5eee37..3a6433701e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java @@ -1,18 +1,18 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.tensorflow.internal.c_api.global.tensorflow.TF_LoadSessionFromSavedModel; @@ -25,11 +25,13 @@ import java.io.OutputStream; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.stream.Collectors; import org.bytedeco.javacpp.BytePointer; import org.bytedeco.javacpp.PointerPointer; @@ -50,6 +52,9 @@ /** * SavedModelBundle represents a model loaded from storage. * + *

All operations on a loaded bundle, and any functions from it, share the same underlying + * session. The session is initialized when loaded. + * *

The model consists of a description of the computation (a {@link Graph}), a {@link Session} * with tensors (e.g., parameters or variables in the graph) initialized to values saved in storage, * and a description of the model as a The concrete function carries a signature (i.e. a list of user-friendly input and outputs - * names to a graph) and a valid session to a graph to be saved in the model. + *

The function carries a signature (i.e. a list of user-friendly input and outputs names to + * a graph) and a valid session to a graph to be saved in the model. * *

Note:Eventually, TensorFlow for Java will support the export of functions objects like - * the Python API does but right now, only session-centric models are supported (i.e. models that - * has a single main graph and one or more signatures). These models are compatible with those - * exported by TensorFlow 1.x or by TensorFlow 2.x estimators. - * - *
Therefore, all functions exported in a model should share the same session at the moment - * or an exception will be thrown.
+ * the Python API does but right now, only session-centric models are supported (i.e. models + * that has a single main graph and one or more signatures). These models are compatible with + * those exported by TensorFlow 1.x or by TensorFlow 2.x estimators.
+ * Therefore, all functions exported in a model should share the same session at the moment or + * an exception will be thrown. This applies to sessions set via {@link + * #withSession(Session)} as well, the exporter can only even have one session. * * @param function a function carrying a signature and a valid session to the graph to be saved * @return this object - * @throws IllegalArgumentException if a function with the same name has already been added to the model - * @throws UnsupportedOperationException if this function does not share the same session with the other - * functions added to this model + * @throws IllegalArgumentException if a function with the same name has already been added to + * the model + * @throws UnsupportedOperationException if the session is already set to a different session */ - public Exporter withFunction(ConcreteFunction function) { + public Exporter withFunction(SessionFunction function) { Signature signature = function.signature(); if (functions.containsKey(signature.key())) { - throw new IllegalArgumentException("Function \"" + signature.key() + "\" was already added to the model"); + throw new IllegalArgumentException( + "Function \"" + signature.key() + "\" was already added to the model"); + } + if (session != null && session != function.session()) { + throw new UnsupportedOperationException( + "This exporter already has a session that differs from the passed function's session"); } + + session = function.session(); functions.put(signature.key(), function); + metaGraphDefBuilder.putSignatureDef(signature.key(), signature.asSignatureDef()); + return this; + } + + /** + * Save multiple functions. Wrapper around {@link #withFunction(SessionFunction)}. All functions + * must have the same session. + * + * @param functions the functions to export + * @return this object + * @throws IllegalArgumentException if a function with the same name has already been added to + * the model + * @throws UnsupportedOperationException if the session is already set to a different session + * @see #withFunction(SessionFunction) + */ + public Exporter withFunctions(SessionFunction... functions) { + for (SessionFunction f : functions) { + withFunction(f); + } + return this; + } + + /** + * Add a signature to the model. This wraps the signature in a {@link SessionFunction} using the + * exporter's already-set session. As such, either {@link #withSession(Session)} or {@link + * #withFunction(SessionFunction)} must be called before this method. + * + * @throws IllegalStateException if no session has been set + * @return this + */ + public Exporter withSignature(Signature signature) { if (session == null) { - session = function.session(); - } else if (session != function.session()) { - throw new UnsupportedOperationException("Saving multiple functions with different graphs/sessions is not supported yet."); + throw new IllegalStateException( + "Session has not been set yet, you must call withSession or withFunction first."); + } + return withFunction(session.function(signature)); + } + + /** + * Add multiple signatures to the model. Wraps {@link #withSignature(Signature)} + * + *

Either {@link #withSession(Session)} or {@link * #withFunction(SessionFunction)} must + * be called before this method, and the session set there will be used for these + * signatures. + * + * @throws IllegalStateException if no session has been set + * @return this + * @see #withSession(Session) + */ + public Exporter withSignatures(Signature... signatures) { + for (Signature s : signatures) { + withSignature(s); } - metaGraphDefBuilder.putSignatureDef(signature.key(), signature.asSignatureDef()); return this; } @@ -178,7 +252,7 @@ public Exporter withFunction(ConcreteFunction function) { * @throws IOException if saved model or variable state cannot be written on disk */ public void export() throws IOException { - if (functions.isEmpty() || session == null) { + if (functions.isEmpty()) { throw new IllegalStateException("Model should contain at least one valid function"); } Graph graph = session.graph(); @@ -187,10 +261,11 @@ public void export() throws IOException { // new ops to the graph for saving and restoring the variables. SaverDef saverDef = graph.saverDef(); - MetaGraphDef.Builder metaGraphDef = metaGraphDefBuilder - .setSaverDef(saverDef) - .setGraphDef(graph.toGraphDef()) - .setMetaInfoDef(MetaInfoDef.newBuilder().addAllTags(Arrays.asList(tags))); + MetaGraphDef.Builder metaGraphDef = + metaGraphDefBuilder + .setSaverDef(saverDef) + .setGraphDef(graph.toGraphDef()) + .setMetaInfoDef(MetaInfoDef.newBuilder().addAllTags(Arrays.asList(tags))); functions.forEach((k, f) -> metaGraphDef.putSignatureDef(k, f.signature().asSignatureDef())); // Make sure saved model directories exist @@ -213,10 +288,10 @@ public void export() throws IOException { } private final String exportDir; - private String[] tags = { DEFAULT_TAG }; + private String[] tags = {DEFAULT_TAG}; private final MetaGraphDef.Builder metaGraphDefBuilder = MetaGraphDef.newBuilder(); - private final Map functions = new LinkedHashMap<>(); private Session session; + private final Map functions = new LinkedHashMap<>(); } /** @@ -289,9 +364,7 @@ public Session session() { return session; } - /** - * Return the signature of all functions available in this saved model. - */ + /** Return the signature of all functions available in this saved model. */ public List signatures() { return functions.values().stream().map(f -> f.signature()).collect(Collectors.toList()); } @@ -304,13 +377,14 @@ public List signatures() { * Map outputTensorMap = myFunction.call(session, inputTensorMap); * }

* + * All functions use the bundle's underlying session. + * * @param signatureKey name of the {@code SignatureDef} in the saved model. * @return object that can be used to make calls to a function - * @throws IllegalArgumentException if {@code signatureKey} is not found in this - * saved model. + * @throws IllegalArgumentException if {@code signatureKey} is not found in this saved model. */ - public ConcreteFunction function(String signatureKey) { - ConcreteFunction function = functions.get(signatureKey); + public TensorFunction function(String signatureKey) { + SessionFunction function = functions.get(signatureKey); if (function == null) { throw new IllegalArgumentException( String.format("Function with signature [%s] not found", signatureKey)); @@ -318,24 +392,37 @@ public ConcreteFunction function(String signatureKey) { return function; } + /** + * Get all functions in the bundle. + * + *

All functions use the bundle's underlying session. + */ + public List functions() { + return new ArrayList<>(functions.values()); + } + /** * Invokes the default function directly from this model. * *

The default function selection is done based on the first of the following conditions that * is true: + * *

    - *
  • The function is the only signature available attached to the main graph of this saved model
  • - *
  • The function is mapped to the default signature name, which is "serving_default"
  • + *
  • The function is the only signature available attached to the main graph of this saved + * model + *
  • The function is mapped to the default signature name, which is "serving_default" *
* *

Caller is responsible for closing all returned Tensors. * + *

This uses the model's underlying session + * * @param arguments list of input tensors, mapped by their signature name * @return list of output tensors, mapped by the signature name * @throws IllegalArgumentException if no function can be selected by default */ public Map call(Map arguments) { - ConcreteFunction function = null; + SessionFunction function = null; if (functions.size() == 1) { function = functions.values().iterator().next(); } else { @@ -360,13 +447,17 @@ public void close() { private final Graph graph; private final Session session; private final MetaGraphDef metaGraphDef; - private final Map functions; + private final Map functions; - private SavedModelBundle(Graph graph, Session session, MetaGraphDef metaGraphDef, Map functions) { + private SavedModelBundle( + Graph graph, Session session, MetaGraphDef metaGraphDef, Map signatures) { this.graph = graph; this.session = session; this.metaGraphDef = metaGraphDef; - this.functions = functions; + this.functions = + signatures.entrySet().stream() + .collect( + Collectors.toMap(Entry::getKey, e -> new SessionFunction(e.getValue(), session))); } /** @@ -385,11 +476,17 @@ private static SavedModelBundle fromHandle( // Note that the saved model will remain the owner of the graph and the session, meaning // that the functions do not need to be closed by the user and if it does, it should have // no effect. - final Map functions = new HashMap<>(metaGraphDef.getSignatureDefCount()); - metaGraphDef.getSignatureDefMap().forEach((signatureName, signatureDef) -> { - Signature signature = new Signature(signatureName, signatureDef); - functions.put(signatureName, ConcreteFunction.create(signature, session)); - }); + final Map functions = new HashMap<>(metaGraphDef.getSignatureDefCount()); + + metaGraphDef + .getSignatureDefMap() + .forEach( + (signatureName, signatureDef) -> { + if (!functions.containsKey(signatureName)) { + Signature signature = new Signature(signatureName, signatureDef); + functions.put(signatureName, signature); + } + }); return new SavedModelBundle(graph, session, metaGraphDef, functions); } @@ -412,14 +509,22 @@ private static SavedModelBundle load( // load the session TF_Graph graph = TF_NewGraph(); TF_Buffer metagraphDef = TF_Buffer.newBuffer(); - TF_Session session = TF_LoadSessionFromSavedModel( - opts, runOpts, new BytePointer(exportDir), new PointerPointer(tags), - tags.length, graph, metagraphDef, status); + TF_Session session = + TF_LoadSessionFromSavedModel( + opts, + runOpts, + new BytePointer(exportDir), + new PointerPointer(tags), + tags.length, + graph, + metagraphDef, + status); status.throwExceptionIfNotOK(); // handle the result try { - bundle = fromHandle(graph, session, MetaGraphDef.parseFrom(metagraphDef.dataAsByteBuffer())); + bundle = + fromHandle(graph, session, MetaGraphDef.parseFrom(metagraphDef.dataAsByteBuffer())); } catch (InvalidProtocolBufferException e) { throw new TensorFlowException("Cannot parse MetaGraphDef protocol buffer", e); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java index 58fb62b5fee..fd0b390bc28 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java @@ -1,18 +1,18 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.tensorflow.Graph.resolveOutputs; @@ -23,6 +23,7 @@ import com.google.protobuf.InvalidProtocolBufferException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import org.bytedeco.javacpp.BytePointer; import org.bytedeco.javacpp.Pointer; import org.bytedeco.javacpp.PointerPointer; @@ -89,9 +90,11 @@ public Session(Graph g) { * Construct a new session with the associated {@link Graph} and configuration options. * * @param g The {@link Graph} the created Session will operate on. - * @param config Configuration parameters for the session specified as a ConfigProto - * protocol buffer. - * @throws IllegalArgumentException if the config is not a valid serialization of the ConfigProto protocol buffer. + * @param config Configuration parameters for the session specified as a ConfigProto + * protocol buffer. + * @throws IllegalArgumentException if the config is not a valid serialization of the ConfigProto + * protocol buffer. */ public Session(Graph g, ConfigProto config) { graph = g; @@ -104,9 +107,7 @@ public Session(Graph g, ConfigProto config) { } } - /** - * Wrap an existing session with the associated {@link Graph}. - */ + /** Wrap an existing session with the associated {@link Graph}. */ Session(Graph g, TF_Session nativeHandle) { graph = g; this.nativeHandle = nativeHandle; @@ -144,20 +145,22 @@ public void close() { * Run {@link Operation}s and evaluate {@link Tensor Tensors}. * *

A Runner runs the necessary graph fragments to execute every {@link Operation} required to - * evaluate the {@link Tensor Tensors} to fetch. The {@link #feed(String, int, Tensor)} call allows callers to - * override the value of {@link Tensor Tensors} in the graph by substituting the provided {@link Tensor Tensors} for - * the outputs of the operations provided to {@link #feed(String, int, Tensor)}. + * evaluate the {@link Tensor Tensors} to fetch. The {@link #feed(String, int, Tensor)} call + * allows callers to override the value of {@link Tensor Tensors} in the graph by substituting the + * provided {@link Tensor Tensors} for the outputs of the operations provided to {@link + * #feed(String, int, Tensor)}. */ public final class Runner { /** * Avoid evaluating {@code operation} and substitute {@code t} for the value it produces. * - * @param operation Is either the string name of the operation, in which case this method is a shorthand for {@code - * feed(operation, 0)}, or it is a string of the form - * operation_name:output_index , in which case this method acts like {@code - * feed(operation_name, output_index)}. These colon-separated names are commonly used in the {@code SignatureDef} - * protocol buffer messages that are included in {@link SavedModelBundle#metaGraphDef()}. + * @param operation Is either the string name of the operation, in which case this method is a + * shorthand for {@code feed(operation, 0)}, or it is a string of the form + * operation_name:output_index , in which case this method acts like {@code + * feed(operation_name, output_index)}. These colon-separated names are commonly used in the + * {@code SignatureDef} protocol buffer messages that are included in {@link + * SavedModelBundle#metaGraphDef()}. * @param t the tensor substituting the operation * @return this session runner * @throws IllegalArgumentException if no output exists with the provided name @@ -167,8 +170,8 @@ public Runner feed(String operation, Tensor t) { } /** - * Avoid evaluating the {@code index}-th output of {@code operation} by substituting {@code t} for the value it - * produces. + * Avoid evaluating the {@code index}-th output of {@code operation} by substituting {@code t} + * for the value it produces. * *

Operations in a {@link Graph} can have multiple outputs, {@code index} identifies which * one {@code t} is being provided for. @@ -187,7 +190,8 @@ public Runner feed(String operation, int index, Tensor t) { } /** - * Use {@code t} instead of the Tensor referred to by executing the operation referred to by {@code operand}. + * Use {@code t} instead of the Tensor referred to by executing the operation referred to by + * {@code operand}. * * @param operand the node in the graph representing the operation to substitute * @param t the tensor substituting the operation @@ -195,8 +199,12 @@ public Runner feed(String operation, int index, Tensor t) { */ public Runner feed(Operand operand, Tensor t) { if (operand.env() != graph) { - throw new IllegalStateException("Can't feed value for operand " + operand + ", it is from " + - (operand.env().isEager() ? "an eager session" : "a different graph") + "."); + throw new IllegalStateException( + "Can't feed value for operand " + + operand + + ", it is from " + + (operand.env().isEager() ? "an eager session" : "a different graph") + + "."); } inputs.add(operand.asOutput()); @@ -207,13 +215,14 @@ public Runner feed(Operand operand, Tensor t) { /** * Make {@link #run()} return the output of {@code operation}. * - * If the output is a resource variable, will fetch the value. + *

If the output is a resource variable, will fetch the value. * - * @param operation Is either the string name of the operation, in which case this method is a shorthand for {@code - * fetch(operation, 0)}, or it is a string of the form - * operation_name:output_index , in which case this method acts like {@code - * fetch(operation_name, output_index)}. These colon-separated names are commonly used in the {@code SignatureDef} - * protocol buffer messages that are included in {@link SavedModelBundle#metaGraphDef()}. + * @param operation Is either the string name of the operation, in which case this method is a + * shorthand for {@code fetch(operation, 0)}, or it is a string of the form + * operation_name:output_index , in which case this method acts like {@code + * fetch(operation_name, output_index)}. These colon-separated names are commonly used in + * the {@code SignatureDef} protocol buffer messages that are included in {@link + * SavedModelBundle#metaGraphDef()}. * @return this session runner * @throws IllegalArgumentException if no output exists with the provided name */ @@ -224,7 +233,7 @@ public Runner fetch(String operation) { /** * Make {@link #run()} return the {@code index}-th output of {@code operation}. * - * If the output is a resource variable, will fetch the value. + *

If the output is a resource variable, will fetch the value. * *

Operations in a {@link Graph} can have multiple outputs, {@code index} identifies which * one to return. @@ -242,15 +251,19 @@ public Runner fetch(String operation, int index) { /** * Makes {@link #run()} return the Tensor referred to by {@code output}. * - * If {@code output} is a resource variable, will fetch the value. + *

If {@code output} is a resource variable, will fetch the value. * * @param output the node to fetch the tensor from * @return this session runner */ public Runner fetch(Output output) { if (output.env() != graph) { - throw new IllegalStateException("Can't fetch output " + output + ", it is from " + - (output.env().isEager() ? "an eager session" : "a different graph") + "."); + throw new IllegalStateException( + "Can't fetch output " + + output + + ", it is from " + + (output.env().isEager() ? "an eager session" : "a different graph") + + "."); } if (output.dataType() == DataType.DT_RESOURCE) { @@ -275,8 +288,11 @@ public Runner fetch(Output output) { } if (read == null) { - read = Ops.create(graph).withSubScope("session_reads").withName(output.op().name() + "_read") - .readVariableOp(output, TensorTypeRegistry.find(valueDt).type()); + read = + Ops.create(graph) + .withSubScope("session_reads") + .withName(output.op().name() + "_read") + .readVariableOp(output, TensorTypeRegistry.find(valueDt).type()); } outputs.add(read.asOutput()); @@ -289,7 +305,7 @@ public Runner fetch(Output output) { /** * Makes {@link #run()} return the Tensor referred to by the output of {@code operand}. * - * If {@code operand} is a resource variable, will fetch the value. + *

If {@code operand} is a resource variable, will fetch the value. * * @param operand the node to fetch the tensor from, as an operand * @return this session runner @@ -299,7 +315,8 @@ public Runner fetch(Operand operand) { } /** - * Make {@link #run()} execute {@code operation}, but not return any evaluated {@link Tensor Tensors}. + * Make {@link #run()} execute {@code operation}, but not return any evaluated {@link Tensor + * Tensors}. * * @param operation the string name of the operation to execute * @return this session runner @@ -310,7 +327,8 @@ public Runner addTarget(String operation) { } /** - * Make {@link #run()} execute {@code operation}, but not return any evaluated {@link Tensor Tensors}. + * Make {@link #run()} execute {@code operation}, but not return any evaluated {@link Tensor + * Tensors}. * * @param operation the operation to execute * @return this session runner @@ -319,8 +337,12 @@ public Runner addTarget(String operation) { */ public Runner addTarget(Operation operation) { if (operation.env() != graph) { - throw new IllegalStateException("Can't target operation " + operation + ", it is from " + - (operation.env().isEager() ? "an eager session" : "a different graph") + "."); + throw new IllegalStateException( + "Can't target operation " + + operation + + ", it is from " + + (operation.env().isEager() ? "an eager session" : "a different graph") + + "."); } targets.add((GraphOperation) operation); return this; @@ -340,7 +362,8 @@ public Runner addTarget(Op op) { * Set options (typically for debugging) for this run. * *

The options are presented as a RunOptions protocol buffer. + * href="https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto">RunOptions + * protocol buffer. * * @param options a {@code RunOptions} proto * @return this session runner @@ -354,11 +377,13 @@ public Runner setOptions(RunOptions options) { * Execute the graph fragments necessary to compute all requested fetches. * *

WARNING: The caller assumes ownership of all returned {@link Tensor Tensors}, i.e., - * the caller must call {@link Tensor#close} on all elements of the returned list to free up resources. + * the caller must call {@link Tensor#close} on all elements of the returned list to free up + * resources. * *

TODO(ashankar): Reconsider the return type here. Two things in particular: (a) Make it - * easier for the caller to cleanup (perhaps returning something like AutoCloseableList in SessionTest.java), and - * (b) Evaluate whether the return value should be a list, or maybe a {@code Map}? + * easier for the caller to cleanup (perhaps returning something like AutoCloseableList in + * SessionTest.java), and (b) Evaluate whether the return value should be a list, or maybe a + * {@code Map}? * *

TODO(andrewmyers): It would also be good if whatever is returned here made it easier to * extract output tensors in a type-safe way. @@ -373,7 +398,8 @@ public List run() { * Execute graph fragments to compute requested fetches and return metadata about the run. * *

This is exactly like {@link #run()}, but in addition to the requested Tensors, also - * returns metadata about the graph execution in the form of a RunMetadata + * returns metadata about the graph execution in the form of a RunMetadata * protocol buffer. * * @return list of resulting tensors fetched by this session runner, with execution metadata @@ -474,9 +500,7 @@ public void close() { private RunOptions runOptions = null; } - /** - * Create a Runner to execute graph operations and evaluate Tensors. - */ + /** Create a Runner to execute graph operations and evaluate Tensors. */ public Runner runner() { return new Runner(); } @@ -504,6 +528,24 @@ public void run(Op op) { runner().addTarget(op.op()).run(); } + /** + * Create a new session function, backed by this session. + * + * @param signature the signature of the function. + */ + public SessionFunction function(Signature signature) { + return new SessionFunction(signature, this); + } + + /** + * Create and call a function, returning the results. + * + * @param signature the signature of the function + * @param arguments the arguments to call with. + */ + public Map run(Signature signature, Map arguments) { + return function(signature).call(arguments); + } /** * Execute the graph's initializers. @@ -511,9 +553,12 @@ public void run(Op op) { *

This method is equivalent to {@code session.run(Ops.create(session.graph).init())}. */ public void runInit() { - Runner runner = runner(); - graph.initializers().forEach(runner::addTarget); - runner.run(); + List initializers = graph.initializers(); + if (!initializers.isEmpty()) { + Runner runner = runner(); + initializers.forEach(runner::addTarget); + runner.run(); + } } /** @@ -524,14 +569,15 @@ public void runInit() { * mymodel/myvariables/variables, then the generated files will be located under * mymodel/myvariables and named variables.data-*-of-* * - *

Note that this method might alter the underlying graph if it is the first time that one - * of its sessions is saved, see {@link Graph#saverDef()} for more details. + *

Note that this method might alter the underlying graph if it is the first time that one of + * its sessions is saved, see {@link Graph#saverDef()} for more details. * * @param prefix prefix to the variable files to save */ public void save(String prefix) { SaverDef saverDef = graph.saverDef(); - runner().addTarget(saverDef.getSaveTensorName()) + runner() + .addTarget(saverDef.getSaveTensorName()) .feed(saverDef.getFilenameTensorName(), TString.scalarOf(prefix)) .run(); } @@ -539,19 +585,20 @@ public void save(String prefix) { /** * Restore the actual state of the variables of this session's graph. * - *

{@code prefix} is the path where the files containing the variables state live, - * followed by the filename prefix. For example, if {@code prefix} is set to - * mymodel/myvariables/variables, then the files are loaded from - * mymodel/myvariables and named variables.data-*-of-* + *

{@code prefix} is the path where the files containing the variables state live, followed by + * the filename prefix. For example, if {@code prefix} is set to + * mymodel/myvariables/variables, then the files are loaded from mymodel/myvariables + * and named variables.data-*-of-* * - *

Note that this method might alter the underlying graph if it is the first time that one - * of its sessions is saved, see {@link Graph#saverDef()} for more details. + *

Note that this method might alter the underlying graph if it is the first time that one of + * its sessions is saved, see {@link Graph#saverDef()} for more details. * * @param prefix prefix to restore from */ public void restore(String prefix) { SaverDef saverDef = graph.saverDef(); - runner().addTarget(saverDef.getRestoreOpName()) + runner() + .addTarget(saverDef.getRestoreOpName()) .feed(saverDef.getFilenameTensorName(), TString.scalarOf(prefix)) .run(); } @@ -563,16 +610,15 @@ public void restore(String prefix) { */ public static final class Run { - /** - * Tensors from requested fetches. - */ + /** Tensors from requested fetches. */ public List outputs; /** * Metadata about the run. * *

A RunMetadata protocol buffer. + * href="https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto">RunMetadata + * protocol buffer. */ public RunMetadata metadata; } @@ -639,21 +685,22 @@ private static void delete(TF_Session handle) { * * @param handle to the C API TF_Session object (Session.nativeHandle) * @param runOptions A RunOptions protocol buffer, or null - * @param inputTensorHandles together with inputOpHandles and inputOpIndices specifies the values that are being "fed" - * (do not need to be computed) during graph execution. inputTensorHandles[i] (which corresponds to a - * Tensor.nativeHandle) is considered to be the inputOpIndices[i]-th output of the Operation inputOpHandles[i]. Thus, - * it is required that inputOpHandles.length == inputOpIndices.length == inputTensorHandles.length. + * @param inputTensorHandles together with inputOpHandles and inputOpIndices specifies the values + * that are being "fed" (do not need to be computed) during graph execution. + * inputTensorHandles[i] (which corresponds to a Tensor.nativeHandle) is considered to be the + * inputOpIndices[i]-th output of the Operation inputOpHandles[i]. Thus, it is required that + * inputOpHandles.length == inputOpIndices.length == inputTensorHandles.length. * @param inputOpHandles (see inputOpIndices) * @param inputOpIndices (see inputTensorHandles) * @param outputOpHandles (see outputOpIndices) - * @param outputOpIndices together with outputOpHandles identifies the set of values that should be computed. The - * outputOpIndices[i]-th output of the Operation outputOpHandles[i], It is required that outputOpHandles.length == - * outputOpIndices.length. - * @param targetOpHandles is the set of Operations in the graph that are to be executed but whose output will not be - * returned + * @param outputOpIndices together with outputOpHandles identifies the set of values that should + * be computed. The outputOpIndices[i]-th output of the Operation outputOpHandles[i], It is + * required that outputOpHandles.length == outputOpIndices.length. + * @param targetOpHandles is the set of Operations in the graph that are to be executed but whose + * output will not be returned * @param wantRunMetadata indicates whether metadata about this execution should be returned. - * @param outputTensors will be filled in with tensors to the outputs requested. It is required that outputs.length == - * outputOpHandles.length. + * @param outputTensors will be filled in with tensors to the outputs requested. It is required + * that outputs.length == outputOpHandles.length. * @return if wantRunMetadata is true, a RunMetadata protocol buffer, false otherwise. */ private static RunMetadata run( diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SessionFunction.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SessionFunction.java new file mode 100644 index 00000000000..07bc418ac51 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SessionFunction.java @@ -0,0 +1,127 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ +package org.tensorflow; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * A callable function backed by a session. All calls of this function will be ran on the same + * session. + * + *

Does no resource management, the session and all returned tensors are the caller's + * responsibility. + * + *

Does not initialize the session, since it may be shared. + */ +public class SessionFunction implements TensorFunction { + + private final Signature signature; + private final Session session; + + public SessionFunction(Signature signature, Session session) { + this.signature = signature; + this.session = session; + + signature + .getInputs() + .forEach( + (name, description) -> { + TensorFunction.validateDescription(description, session.graph(), name, "Input"); + }); + + signature + .getInputs() + .forEach( + (name, description) -> { + TensorFunction.validateDescription(description, session.graph(), name, "Output"); + }); + } + + public static SessionFunction create(Signature signature, Session session) { + return new SessionFunction(signature, session); + } + + /** + * Save this function using {@link SavedModelBundle}. + * + *

This is identical to calling {@code + * SavedModelBundle.exporter(exportDir).withFunction(this).export()}. + * + * @param exportDir the directory path containing a saved model. + * @throws IOException if saved model or variable state cannot be written on disk + */ + public void save(String exportDir) throws IOException { + SavedModelBundle.exporter(exportDir).withFunction(this).export(); + } + + @Override + public Signature signature() { + return signature; + } + + public Session session() { + return session; + } + + /** + * Get a new function with the same signature, but backed by a new session. + * + * @param session the new backing session. + */ + public SessionFunction withNewSession(Session session) { + return new SessionFunction(signature, session); + } + + @Override + public Map call(Map arguments) { + Session.Runner runner = session.runner(); + signature + .getInputs() + .forEach( + (argName, operand) -> { + if (!arguments.containsKey(argName)) { + throw new IllegalArgumentException( + "No argument found for parameter \"" + argName + "\""); + } + Tensor value = arguments.get(argName); + + if (value == null) { + throw new IllegalArgumentException( + "Can't pass null as an argument to a function. Argument \"" + + argName + + "\" was null."); + } + + runner.feed(operand.name, value); + }); + + signature.getOutputs().values().forEach(x -> runner.fetch(x.name)); + + List results = runner.run(); + + Map outputs = new LinkedHashMap<>(results.size()); + int i = 0; + for (String outputName : signature.outputNames()) { + outputs.put(outputName, results.get(i)); + i++; + } + + return outputs; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java index 66b4dad4132..41fab27e068 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java @@ -1,21 +1,22 @@ -/* - * Copyright 2020 The TensorFlow Authors. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. +/* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= */ package org.tensorflow; -import java.util.HashMap; +import java.util.Collections; +import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; import org.tensorflow.ndarray.Shape; @@ -26,27 +27,32 @@ import org.tensorflow.proto.framework.TensorShapeProto.Dim; /** - * Describe the inputs and outputs of an executable entity, such as a {@link ConcreteFunction}, among - * other useful metadata. + * Describe the inputs and outputs of an executable entity, such as a {@link ConcreteFunction}, + * among other useful metadata. */ -public class Signature { +public class Signature { /** The default signature key, when not provided */ public static final String DEFAULT_KEY = "serving_default"; public static class TensorDescription { + + /** The name of the tensor's operand in the graph */ + public final String name; + /** The data type of the tensor */ public final DataType dataType; + + /** The shape of the tensor */ public final Shape shape; - public TensorDescription(DataType dataType, Shape shape) { + public TensorDescription(DataType dataType, Shape shape, String name) { this.dataType = dataType; this.shape = shape; + this.name = name; } } - /** - * Builds a new function signature. - */ + /** Builds a new function signature. */ public static class Builder { /** @@ -76,12 +82,30 @@ public Builder key(String key) { */ public Builder input(String inputName, Operand input) { if (signatureBuilder.containsInputs(inputName)) { - throw new IllegalArgumentException("\"" + inputName + "\" is already being mapped to another input"); + throw new IllegalArgumentException( + "\"" + inputName + "\" is already being mapped to another input"); } signatureBuilder.putInputs(inputName, toTensorInfo(input.asOutput())); return this; } + /** + * Register a tensor as an input of the function. + * + * @param inputName user-friendly name for this input tensor + * @param input input tensor info + * @return this builder + * @throws IllegalArgumentException if {@code inputName} is already mapped to another input + */ + Builder input(String inputName, TensorInfo input) { + if (signatureBuilder.containsInputs(inputName)) { + throw new IllegalArgumentException( + "\"" + inputName + "\" is already being mapped to another input"); + } + signatureBuilder.putInputs(inputName, input); + return this; + } + /** * Register a tensor as an output of the function. * @@ -92,12 +116,30 @@ public Builder input(String inputName, Operand input) { */ public Builder output(String outputName, Operand output) { if (signatureBuilder.containsOutputs(outputName)) { - throw new IllegalArgumentException("\"" + outputName + "\" is already being mapped to another output"); + throw new IllegalArgumentException( + "\"" + outputName + "\" is already being mapped to another output"); } signatureBuilder.putOutputs(outputName, toTensorInfo(output.asOutput())); return this; } + /** + * Register a tensor as an output of the function. + * + * @param outputName user-friendly name for this output tensor + * @param output output tensor + * @return this builder + * @throws IllegalArgumentException if {@code outputName} is already mapped to another output + */ + Builder output(String outputName, TensorInfo output) { + if (signatureBuilder.containsOutputs(outputName)) { + throw new IllegalArgumentException( + "\"" + outputName + "\" is already being mapped to another output"); + } + signatureBuilder.putOutputs(outputName, output); + return this; + } + /** * Provide extensible name information enabling third-party users to mark a signature as * supporting a particular method @@ -110,9 +152,7 @@ public Builder methodName(String methodName) { return this; } - /** - * Returns a signature from the provided data. - */ + /** Returns a signature from the provided data. */ public Signature build() { return new Signature(key, signatureBuilder.build()); } @@ -134,44 +174,41 @@ private static TensorInfo toTensorInfo(Output operand) { private final SignatureDef.Builder signatureBuilder = SignatureDef.newBuilder(); } - /** - * Returns a new builder for creating a signature - */ + /** Returns a new builder for creating a signature */ public static Builder builder() { return new Builder(); } /** - * Return the key of this signature + * Returns a new builder for creating a signature, with the methodName and key set to {@code name} */ + public static Builder builder(String name) { + return new Builder().methodName(name).key(name); + } + + /** Return the key of this signature */ public String key() { return key; } - /** - * Returns the method name of this signature (e.g. as exposed by TF serving) or null if none - */ + /** Returns the method name of this signature (e.g. as exposed by TF serving) or null if none */ public String methodName() { return signatureDef.getMethodName().isEmpty() ? null : signatureDef.getMethodName(); } - /** - * Returns the names of the inputs in this signature - */ + /** Returns the names of the inputs in this signature */ public Set inputNames() { return signatureDef.getInputsMap().keySet(); } - /** - * Returns the names of the outputs in this signature - */ + /** Returns the names of the outputs in this signature */ public Set outputNames() { return signatureDef.getOutputsMap().keySet(); } @Override public String toString() { - StringBuilder strBuilder = new StringBuilder("Signature for \"" + key +"\":\n"); + StringBuilder strBuilder = new StringBuilder("Signature for \"" + key + "\":\n"); if (!methodName().isEmpty()) { strBuilder.append("\tMethod: \"").append(methodName()).append("\"\n"); } @@ -186,30 +223,40 @@ public String toString() { return strBuilder.toString(); } - private Map buildTensorDescriptionMap(Map dataMapIn) { - Map dataTypeMap = new HashMap<>(); - dataMapIn.forEach((a, b) -> { - long[] tensorDims = b.getTensorShape().getDimList().stream().mapToLong(d -> d.getSize()).toArray(); - Shape tensorShape = Shape.of(tensorDims); - dataTypeMap.put(a, new TensorDescription(b.getDtype(), - tensorShape)); - }); - return dataTypeMap; + private Map buildTensorDescriptionMap( + Map dataMapIn) { + Map dataTypeMap = new LinkedHashMap<>(); + dataMapIn.forEach( + (name, info) -> { + long[] tensorDims = + info.getTensorShape().getDimList().stream().mapToLong(d -> d.getSize()).toArray(); + Shape tensorShape = Shape.of(tensorDims); + dataTypeMap.put( + name, new TensorDescription(info.getDtype(), tensorShape, info.getName())); + }); + return Collections.unmodifiableMap(dataTypeMap); } /** - * Returns the names of the inputs in this signature mapped to their expected data type and shape - * @return + * Returns the names of the inputs in this signature mapped to their expected data type, shape, + * and operand name */ public Map getInputs() { - return buildTensorDescriptionMap(signatureDef.getInputsMap()); + if (inputMap == null) { + inputMap = buildTensorDescriptionMap(signatureDef.getInputsMap()); + } + return inputMap; } /** - * Returns the names of the outputs in this signature mapped to their expected data type and shape + * Returns the names of the outputs in this signature mapped to their expected data type, shape, + * and operand name */ public Map getOutputs() { - return buildTensorDescriptionMap(signatureDef.getOutputsMap()); + if (outputMap == null) { + outputMap = buildTensorDescriptionMap(signatureDef.getOutputsMap()); + } + return outputMap; } Signature(String key, SignatureDef signatureDef) { @@ -223,21 +270,25 @@ SignatureDef asSignatureDef() { private final String key; private final SignatureDef signatureDef; + private Map inputMap; + private Map outputMap; private static void printTensorInfo(Map tensorMap, StringBuilder strBuilder) { - tensorMap.forEach((key, tensorInfo) -> { - strBuilder.append("\t\t\"") - .append(key) - .append("\": dtype=") - .append(tensorInfo.getDtype().name()) - .append(", shape=("); - for (int i = 0; i < tensorInfo.getTensorShape().getDimCount(); ++i) { - strBuilder.append(tensorInfo.getTensorShape().getDim(i).getSize()); - if (i < tensorInfo.getTensorShape().getDimCount() - 1) { - strBuilder.append(", "); - } - } - strBuilder.append(")\n"); - }); + tensorMap.forEach( + (key, tensorInfo) -> { + strBuilder + .append("\t\t\"") + .append(key) + .append("\": dtype=") + .append(tensorInfo.getDtype().name()) + .append(", shape=("); + for (int i = 0; i < tensorInfo.getTensorShape().getDimCount(); ++i) { + strBuilder.append(tensorInfo.getTensorShape().getDim(i).getSize()); + if (i < tensorInfo.getTensorShape().getDimCount() - 1) { + strBuilder.append(", "); + } + } + strBuilder.append(")\n"); + }); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java index de481d256a3..23f4c62bc7f 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java @@ -1,18 +1,18 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.tensorflow.internal.c_api.global.tensorflow.TF_DeleteBuffer; @@ -23,6 +23,8 @@ import static org.tensorflow.internal.c_api.global.tensorflow.TF_Version; import com.google.protobuf.InvalidProtocolBufferException; +import java.util.Set; +import java.util.stream.Collectors; import org.bytedeco.javacpp.PointerScope; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.c_api.TF_Buffer; @@ -56,6 +58,20 @@ public static OpList registeredOpList() { } } + private static Set statefulOps; + + public static synchronized boolean isOpStateful(String opType) { + if (statefulOps == null) { + statefulOps = + registeredOpList().getOpList().stream() + .filter(x -> x.getIsStateful()) + .map(x -> x.getName()) + .collect(Collectors.toSet()); + } + + return statefulOps.contains(opType); + } + /** * Load the dynamic library in filename and register the operations and kernels present in that * library. diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFunction.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFunction.java new file mode 100644 index 00000000000..0304d786494 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFunction.java @@ -0,0 +1,129 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ +package org.tensorflow; + +import java.util.LinkedHashMap; +import java.util.Map; +import org.tensorflow.Signature.TensorDescription; + +/** A function that can be called with tensors. */ +public interface TensorFunction { + + /** Returns the signature of this function */ + Signature signature(); + + /** + * Invokes a function using the default eager session. + * + *

Caller is responsible for closing all Tensors. + * + * @param arguments list of tensors to pass in input to the function, mapped by their signature + * name + * @return output tensors resulting from the execution of the function, mapped by their signature + * name + * @throws IllegalArgumentException if the passed arguments don't match up to the function's + * parameters. + */ + Map call(Map arguments); + + /** + * Invokes a function with a single input and output using the default eager session. + * + *

Caller is responsible for closing all Tensors. + * + * @param tensor input tensor + * @return output tensor + * @throws IllegalArgumentException if there are multiple input or output parameters defined in + * the function + */ + default Tensor call(Tensor tensor) { + if (signature().inputNames().size() > 1) { + throw new IllegalArgumentException( + "Can't use call(Tensor) on function \"" + + signature().methodName() + + "\" with more than one input."); + } + if (signature().inputNames().size() < 1) { + throw new IllegalArgumentException( + "Can't use call(Tensor) on function \"" + + signature().methodName() + + "\" with no inputs."); + } + if (signature().outputNames().size() > 1) { + throw new IllegalArgumentException( + "Can't use call(Tensor) on function \"" + + signature().methodName() + + "\" with more than one output."); + } + if (signature().outputNames().size() < 1) { + throw new IllegalArgumentException( + "Can't use call(Tensor) on function \"" + + signature().methodName() + + "\" with no outputs."); + } + + String inputName = signature().inputNames().iterator().next(); + String outputName = signature().outputNames().iterator().next(); + + Map inputMap = new LinkedHashMap<>(); + inputMap.put(inputName, tensor); + + return call(inputMap).get(outputName); + } + + static Operand validateDescription( + TensorDescription description, Graph graph, String name, String prefix) { + Output operand = graph.output(description.name); + if (operand == null) { + throw new IllegalArgumentException( + prefix + + " \"" + + name + + "\"'s operand \"" + + description.name + + "\" does not exist on the graph."); + } + + if (operand.dataType() != description.dataType) { + throw new IllegalArgumentException( + prefix + + " \"" + + name + + "\"'s operand \"" + + description.name + + "\" has data type " + + operand.dataType() + + " in the graph, but the signature requires data type " + + description.dataType + + "."); + } + + if (!operand.shape().isCompatibleWith(description.shape)) { + throw new IllegalArgumentException( + prefix + + " \"" + + name + + "\"'s operand \"" + + description.name + + "\" has shape " + + operand.shape() + + ", which is incompatible with the signature's required shape of " + + description.shape + + "."); + } + return operand; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/AbstractTF_Function.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/AbstractTF_Function.java new file mode 100644 index 00000000000..a3647b5671d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/AbstractTF_Function.java @@ -0,0 +1,53 @@ +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ +package org.tensorflow.internal.c_api; + +import static org.tensorflow.internal.c_api.global.tensorflow.TF_DeleteFunction; + +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public abstract class AbstractTF_Function extends Pointer { + + protected static class DeleteDeallocator extends TF_Function implements Deallocator { + + DeleteDeallocator(TF_Function s) { + super(s); + } + + @Override + public void deallocate() { + if (!isNull()) { + TF_DeleteFunction(this); + } + setNull(); + } + } + + public AbstractTF_Function(Pointer p) { + super(p); + } + + public TF_Function withDeallocator() { + return this.deallocator(new DeleteDeallocator((TF_Function) this)); + } + + /** Calls the deallocator, if registered, otherwise has no effect. */ + public void delete() { + deallocate(); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java index 17bf9dbf79a..66dead59967 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java @@ -1,5 +1,4 @@ -/* - Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +13,6 @@ limitations under the License. ======================================================================= */ - package org.tensorflow.internal.c_api.presets; import java.util.List; @@ -28,204 +26,363 @@ import org.bytedeco.javacpp.tools.InfoMap; import org.bytedeco.javacpp.tools.InfoMapper; -/** - * - * @author Samuel Audet - */ +/** @author Samuel Audet */ @Properties( value = { - @Platform( - value = {"linux", "macosx", "windows"}, - compiler = "cpp11", - include = { - "tensorflow/core/platform/ctstring_internal.h", - "tensorflow/core/platform/ctstring.h", - "tensorflow/core/util/port.h", - "tensorflow/c/tf_attrtype.h", - "tensorflow/c/c_api_macros.h", - "tensorflow/c/tf_datatype.h", - "tensorflow/c/tf_status.h", - "tensorflow/c/tf_tensor.h", - "tensorflow/c/tf_tstring.h", - "tensorflow/c/c_api.h", -// "tensorflow/c/env.h", - "tensorflow/c/kernels.h", - "tensorflow/c/ops.h", - "tensorflow/c/eager/c_api.h" - }, - link = "tensorflow_cc@.2", - preload = {"iomp5", "mklml", "mklml_intel", "tensorflow_framework@.2"}, - preloadresource = "/org/bytedeco/mkldnn/", - resource = {"LICENSE", "THIRD_PARTY_TF_JNI_LICENSES"} - ), - @Platform( - value = "windows", - preload = { - "api-ms-win-crt-locale-l1-1-0", "api-ms-win-crt-string-l1-1-0", "api-ms-win-crt-stdio-l1-1-0", "api-ms-win-crt-math-l1-1-0", - "api-ms-win-crt-heap-l1-1-0", "api-ms-win-crt-runtime-l1-1-0", "api-ms-win-crt-convert-l1-1-0", "api-ms-win-crt-environment-l1-1-0", - "api-ms-win-crt-time-l1-1-0", "api-ms-win-crt-filesystem-l1-1-0", "api-ms-win-crt-utility-l1-1-0", "api-ms-win-crt-multibyte-l1-1-0", - "api-ms-win-core-string-l1-1-0", "api-ms-win-core-errorhandling-l1-1-0", "api-ms-win-core-timezone-l1-1-0", "api-ms-win-core-file-l1-1-0", - "api-ms-win-core-namedpipe-l1-1-0", "api-ms-win-core-handle-l1-1-0", "api-ms-win-core-file-l2-1-0", "api-ms-win-core-heap-l1-1-0", - "api-ms-win-core-libraryloader-l1-1-0", "api-ms-win-core-synch-l1-1-0", "api-ms-win-core-processthreads-l1-1-0", - "api-ms-win-core-processenvironment-l1-1-0", "api-ms-win-core-datetime-l1-1-0", "api-ms-win-core-localization-l1-2-0", - "api-ms-win-core-sysinfo-l1-1-0", "api-ms-win-core-synch-l1-2-0", "api-ms-win-core-console-l1-1-0", "api-ms-win-core-debug-l1-1-0", - "api-ms-win-core-rtlsupport-l1-1-0", "api-ms-win-core-processthreads-l1-1-1", "api-ms-win-core-file-l1-2-0", "api-ms-win-core-profile-l1-1-0", - "api-ms-win-core-memory-l1-1-0", "api-ms-win-core-util-l1-1-0", "api-ms-win-core-interlocked-l1-1-0", "ucrtbase", - "vcruntime140", "vcruntime140_1", "msvcp140", "concrt140", "vcomp140", "msvcr120", "libiomp5md", "mklml", "tensorflow_framework" - } - ), - @Platform( - value = "windows-x86", - preloadpath = { - "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/redist/x86/Microsoft.VC140.CRT/", - "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/redist/x86/Microsoft.VC140.OpenMP/", - "C:/Program Files (x86)/Windows Kits/10/Redist/ucrt/DLLs/x86/" - } - ), - @Platform( - value = "windows-x86_64", - preloadpath = { - "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/redist/x64/Microsoft.VC140.CRT/", - "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/redist/x64/Microsoft.VC140.OpenMP/", - "C:/Program Files (x86)/Windows Kits/10/Redist/ucrt/DLLs/x64/" - } - ), - @Platform( - value = {"linux", "macosx", "windows"}, - extension = {"-mkl", "-gpu", "-mkl-gpu"} - ) + @Platform( + value = {"linux", "macosx", "windows"}, + compiler = "cpp11", + include = { + "tensorflow/core/platform/ctstring_internal.h", + "tensorflow/core/platform/ctstring.h", + "tensorflow/core/util/port.h", + "tensorflow/c/tf_attrtype.h", + "tensorflow/c/c_api_macros.h", + "tensorflow/c/tf_datatype.h", + "tensorflow/c/tf_status.h", + "tensorflow/c/tf_tensor.h", + "tensorflow/c/tf_tstring.h", + "tensorflow/c/c_api.h", + // "tensorflow/c/env.h", + "tensorflow/c/kernels.h", + "tensorflow/c/ops.h", + "tensorflow/c/eager/c_api.h" + }, + link = "tensorflow_cc@.2", + preload = {"iomp5", "mklml", "mklml_intel", "tensorflow_framework@.2"}, + preloadresource = "/org/bytedeco/mkldnn/", + resource = {"LICENSE", "THIRD_PARTY_TF_JNI_LICENSES"}), + @Platform( + value = "windows", + preload = { + "api-ms-win-crt-locale-l1-1-0", + "api-ms-win-crt-string-l1-1-0", + "api-ms-win-crt-stdio-l1-1-0", + "api-ms-win-crt-math-l1-1-0", + "api-ms-win-crt-heap-l1-1-0", + "api-ms-win-crt-runtime-l1-1-0", + "api-ms-win-crt-convert-l1-1-0", + "api-ms-win-crt-environment-l1-1-0", + "api-ms-win-crt-time-l1-1-0", + "api-ms-win-crt-filesystem-l1-1-0", + "api-ms-win-crt-utility-l1-1-0", + "api-ms-win-crt-multibyte-l1-1-0", + "api-ms-win-core-string-l1-1-0", + "api-ms-win-core-errorhandling-l1-1-0", + "api-ms-win-core-timezone-l1-1-0", + "api-ms-win-core-file-l1-1-0", + "api-ms-win-core-namedpipe-l1-1-0", + "api-ms-win-core-handle-l1-1-0", + "api-ms-win-core-file-l2-1-0", + "api-ms-win-core-heap-l1-1-0", + "api-ms-win-core-libraryloader-l1-1-0", + "api-ms-win-core-synch-l1-1-0", + "api-ms-win-core-processthreads-l1-1-0", + "api-ms-win-core-processenvironment-l1-1-0", + "api-ms-win-core-datetime-l1-1-0", + "api-ms-win-core-localization-l1-2-0", + "api-ms-win-core-sysinfo-l1-1-0", + "api-ms-win-core-synch-l1-2-0", + "api-ms-win-core-console-l1-1-0", + "api-ms-win-core-debug-l1-1-0", + "api-ms-win-core-rtlsupport-l1-1-0", + "api-ms-win-core-processthreads-l1-1-1", + "api-ms-win-core-file-l1-2-0", + "api-ms-win-core-profile-l1-1-0", + "api-ms-win-core-memory-l1-1-0", + "api-ms-win-core-util-l1-1-0", + "api-ms-win-core-interlocked-l1-1-0", + "ucrtbase", + "vcruntime140", + "vcruntime140_1", + "msvcp140", + "concrt140", + "vcomp140", + "msvcr120", + "libiomp5md", + "mklml", + "tensorflow_framework" + }), + @Platform( + value = "windows-x86", + preloadpath = { + "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/redist/x86/Microsoft.VC140.CRT/", + "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/redist/x86/Microsoft.VC140.OpenMP/", + "C:/Program Files (x86)/Windows Kits/10/Redist/ucrt/DLLs/x86/" + }), + @Platform( + value = "windows-x86_64", + preloadpath = { + "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/redist/x64/Microsoft.VC140.CRT/", + "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/redist/x64/Microsoft.VC140.OpenMP/", + "C:/Program Files (x86)/Windows Kits/10/Redist/ucrt/DLLs/x64/" + }), + @Platform( + value = {"linux", "macosx", "windows"}, + extension = {"-mkl", "-gpu", "-mkl-gpu"}) }, target = "org.tensorflow.internal.c_api", global = "org.tensorflow.internal.c_api.global.tensorflow") @NoException public class tensorflow implements LoadEnabled, InfoMapper { - @Override public void init(ClassProperties properties) { - String platform = properties.getProperty("platform"); - String extension = properties.getProperty("platform.extension"); - List preloads = properties.get("platform.preload"); - List resources = properties.get("platform.preloadresource"); - List preloadpaths = properties.get("platform.preloadpath"); - - String vcredistdir = System.getenv("VCToolsRedistDir"); - if (vcredistdir != null && vcredistdir.length() > 0) { - switch (platform) { - case "windows-x86": - preloadpaths.add(0, vcredistdir + "\\x86\\Microsoft.VC142.CRT"); - preloadpaths.add(1, vcredistdir + "\\x86\\Microsoft.VC142.OpenMP"); - preloadpaths.add(2, vcredistdir + "\\x86\\Microsoft.VC141.CRT"); - preloadpaths.add(3, vcredistdir + "\\x86\\Microsoft.VC141.OpenMP"); - break; - case "windows-x86_64": - preloadpaths.add(0, vcredistdir + "\\x64\\Microsoft.VC142.CRT"); - preloadpaths.add(1, vcredistdir + "\\x64\\Microsoft.VC142.OpenMP"); - preloadpaths.add(2, vcredistdir + "\\x64\\Microsoft.VC141.CRT"); - preloadpaths.add(3, vcredistdir + "\\x64\\Microsoft.VC141.OpenMP"); - break; - default: - // not Windows - } - } + @Override + public void init(ClassProperties properties) { + String platform = properties.getProperty("platform"); + String extension = properties.getProperty("platform.extension"); + List preloads = properties.get("platform.preload"); + List resources = properties.get("platform.preloadresource"); + List preloadpaths = properties.get("platform.preloadpath"); - // Only apply this at load time - if (!Loader.isLoadLibraries()) { - return; - } + String vcredistdir = System.getenv("VCToolsRedistDir"); + if (vcredistdir != null && vcredistdir.length() > 0) { + switch (platform) { + case "windows-x86": + preloadpaths.add(0, vcredistdir + "\\x86\\Microsoft.VC142.CRT"); + preloadpaths.add(1, vcredistdir + "\\x86\\Microsoft.VC142.OpenMP"); + preloadpaths.add(2, vcredistdir + "\\x86\\Microsoft.VC141.CRT"); + preloadpaths.add(3, vcredistdir + "\\x86\\Microsoft.VC141.OpenMP"); + break; + case "windows-x86_64": + preloadpaths.add(0, vcredistdir + "\\x64\\Microsoft.VC142.CRT"); + preloadpaths.add(1, vcredistdir + "\\x64\\Microsoft.VC142.OpenMP"); + preloadpaths.add(2, vcredistdir + "\\x64\\Microsoft.VC141.CRT"); + preloadpaths.add(3, vcredistdir + "\\x64\\Microsoft.VC141.OpenMP"); + break; + default: + // not Windows + } + } - // Let users enable loading of the full version of MKL - String load = System.getProperty("org.bytedeco.openblas.load", - System.getProperty("org.bytedeco.mklml.load", "")).toLowerCase(); + // Only apply this at load time + if (!Loader.isLoadLibraries()) { + return; + } - int i = 0; - if (load.equals("mkl") || load.equals("mkl_rt")) { - String[] libs = {"iomp5", "libiomp5md", "mkl_core", "mkl_avx", "mkl_avx2", "mkl_avx512", "mkl_avx512_mic", - "mkl_def", "mkl_mc", "mkl_mc3", "mkl_intel_lp64", "mkl_intel_thread", "mkl_gnu_thread", "mkl_rt"}; - for (i = 0; i < libs.length; i++) { - preloads.add(i, libs[i] + "#" + libs[i]); - } - load = "mkl_rt"; - resources.add("/org/bytedeco/mkl/"); - } + // Let users enable loading of the full version of MKL + String load = + System.getProperty( + "org.bytedeco.openblas.load", System.getProperty("org.bytedeco.mklml.load", "")) + .toLowerCase(); - if (load.length() > 0) { - if (platform.startsWith("linux")) { - preloads.add(i, load + "#mklml_intel"); - } else if (platform.startsWith("macosx")) { - preloads.add(i, load + "#mklml"); - } else if (platform.startsWith("windows")) { - preloads.add(i, load + "#mklml"); - } - } + int i = 0; + if (load.equals("mkl") || load.equals("mkl_rt")) { + String[] libs = { + "iomp5", + "libiomp5md", + "mkl_core", + "mkl_avx", + "mkl_avx2", + "mkl_avx512", + "mkl_avx512_mic", + "mkl_def", + "mkl_mc", + "mkl_mc3", + "mkl_intel_lp64", + "mkl_intel_thread", + "mkl_gnu_thread", + "mkl_rt" + }; + for (i = 0; i < libs.length; i++) { + preloads.add(i, libs[i] + "#" + libs[i]); + } + load = "mkl_rt"; + resources.add("/org/bytedeco/mkl/"); + } - // Only apply this at load time since we don't want to copy the CUDA libraries here - if (!Loader.isLoadLibraries() || extension == null || !extension.endsWith("-gpu")) { - return; - } - String[] libs = {"cudart", "cublasLt", "cublas", "cufft", "curand", "cusolver", "cusparse", "cudnn", "nccl", "nvrtc", "myelin", "nvinfer", - "cudnn_ops_infer", "cudnn_ops_train", "cudnn_adv_infer", "cudnn_adv_train", "cudnn_cnn_infer", "cudnn_cnn_train"}; - for (String lib : libs) { - if (platform.startsWith("linux")) { - lib += lib.startsWith("cudnn") ? "@.8" - : lib.equals("nccl") ? "@.2" - : lib.equals("myelin") ? "@.1" - : lib.equals("nvinfer") ? "@.7" - : lib.equals("cufft") || lib.equals("curand") || lib.equals("cusolver") ? "@.10" - : lib.equals("cudart") ? "@.11.0" - : lib.equals("nvrtc") ? "@.11.0" - : "@.11"; - } else if (platform.startsWith("windows")) { - lib += lib.startsWith("cudnn") ? "64_8" - : lib.equals("nccl") ? "64_2" - : lib.equals("myelin") ? "64_1" - : lib.equals("nvinfer") ? "64_7" - : lib.equals("cufft") || lib.equals("curand") || lib.equals("cusolver") ? "64_10" - : lib.equals("cudart") ? "64_110" - : lib.equals("nvrtc") ? "64_110_0" - : "64_11"; - } else { - continue; // no CUDA - } - if (!preloads.contains(lib)) { - preloads.add(i++, lib); - } - } - if (i > 0) { - resources.add("/org/bytedeco/cuda/"); - resources.add("/org/bytedeco/tensorrt/"); - } + if (load.length() > 0) { + if (platform.startsWith("linux")) { + preloads.add(i, load + "#mklml_intel"); + } else if (platform.startsWith("macosx")) { + preloads.add(i, load + "#mklml"); + } else if (platform.startsWith("windows")) { + preloads.add(i, load + "#mklml"); + } } - public void map(InfoMap infoMap) { - infoMap.put(new Info("TF_CAPI_EXPORT", "TF_Bool").cppTypes().annotations()) - .put(new Info("TF_Buffer::data").javaText("public native @Const Pointer data(); public native TF_Buffer data(Pointer data);")) - .put(new Info("TF_Status").pointerTypes("TF_Status").base("org.tensorflow.internal.c_api.AbstractTF_Status")) - .put(new Info("TF_Buffer").pointerTypes("TF_Buffer").base("org.tensorflow.internal.c_api.AbstractTF_Buffer")) - .put(new Info("TF_Tensor").pointerTypes("TF_Tensor").base("org.tensorflow.internal.c_api.AbstractTF_Tensor")) - .put(new Info("TF_Session").pointerTypes("TF_Session").base("org.tensorflow.internal.c_api.AbstractTF_Session")) - .put(new Info("TF_SessionOptions").pointerTypes("TF_SessionOptions").base("org.tensorflow.internal.c_api.AbstractTF_SessionOptions")) - .put(new Info("TF_Graph").pointerTypes("TF_Graph").base("org.tensorflow.internal.c_api.AbstractTF_Graph")) - .put(new Info("TF_Graph::graph").javaText("public native @MemberGetter @ByRef Graph graph();")) - .put(new Info("TF_Graph::refiner").javaText("public native @MemberGetter @ByRef ShapeRefiner refiner();")) - .put(new Info("TF_ImportGraphDefOptions").pointerTypes("TF_ImportGraphDefOptions").base("org.tensorflow.internal.c_api.AbstractTF_ImportGraphDefOptions")) - .put(new Info("TF_Operation", "TF_WhileParams", "TFE_MonitoringCounterCell", "TFE_MonitoringSamplerCell", - "TFE_MonitoringCounter0", "TFE_MonitoringCounter1", "TFE_MonitoringCounter2", - "TFE_MonitoringIntGaugeCell", "TFE_MonitoringStringGaugeCell", "TFE_MonitoringBoolGaugeCell", - "TFE_MonitoringIntGauge0", "TFE_MonitoringIntGauge1", "TFE_MonitoringIntGauge2", - "TFE_MonitoringStringGauge0", "TFE_MonitoringStringGauge1", "TFE_MonitoringStringGauge2", - "TFE_MonitoringBoolGauge0", "TFE_MonitoringBoolGauge1", "TFE_MonitoringBoolGauge2", - "TFE_MonitoringSampler0", "TFE_MonitoringSampler1", "TFE_MonitoringSampler2").purify()) - .put(new Info("TF_Operation::node").javaText("public native @MemberGetter @ByRef Node node();")) - .put(new Info("TFE_MonitoringCounterCell::cell").javaText("public native @MemberGetter @ByRef CounterCell cell();")) - .put(new Info("TFE_MonitoringSamplerCell::cell").javaText("public native @MemberGetter @ByRef SamplerCell cell();")) - .put(new Info("TFE_MonitoringIntGaugeCell::cell").javaText("public native @MemberGetter @ByRef IntGaugeCell cell();")) - .put(new Info("TFE_MonitoringStringGaugeCell::cell").javaText("public native @MemberGetter @ByRef StringGaugeCell cell();")) - .put(new Info("TFE_MonitoringBoolGaugeCell::cell").javaText("public native @MemberGetter @ByRef BoolGaugeCell cell();")) - .put(new Info("TFE_Context").pointerTypes("TFE_Context").base("org.tensorflow.internal.c_api.AbstractTFE_Context")) - .put(new Info("TFE_ContextOptions").pointerTypes("TFE_ContextOptions").base("org.tensorflow.internal.c_api.AbstractTFE_ContextOptions")) - .put(new Info("TFE_Context::context").javaText("@MemberGetter public native @ByRef EagerContext context();")) - .put(new Info("TFE_Op").pointerTypes("TFE_Op").base("org.tensorflow.internal.c_api.AbstractTFE_Op")) - .put(new Info("TFE_Op::operation").javaText("@MemberGetter public native @ByRef EagerOperation operation();")) - .put(new Info("TFE_TensorHandle").pointerTypes("TFE_TensorHandle").base("org.tensorflow.internal.c_api.AbstractTFE_TensorHandle")) - .put(new Info("TF_ShapeInferenceContextDimValueKnown", "TFE_NewTensorHandle(const tensorflow::Tensor&, TF_Status*)").skip()); + // Only apply this at load time since we don't want to copy the CUDA libraries here + if (!Loader.isLoadLibraries() || extension == null || !extension.endsWith("-gpu")) { + return; + } + String[] libs = { + "cudart", + "cublasLt", + "cublas", + "cufft", + "curand", + "cusolver", + "cusparse", + "cudnn", + "nccl", + "nvrtc", + "myelin", + "nvinfer", + "cudnn_ops_infer", + "cudnn_ops_train", + "cudnn_adv_infer", + "cudnn_adv_train", + "cudnn_cnn_infer", + "cudnn_cnn_train" + }; + for (String lib : libs) { + if (platform.startsWith("linux")) { + lib += + lib.startsWith("cudnn") + ? "@.8" + : lib.equals("nccl") + ? "@.2" + : lib.equals("myelin") + ? "@.1" + : lib.equals("nvinfer") + ? "@.7" + : lib.equals("cufft") || lib.equals("curand") || lib.equals("cusolver") + ? "@.10" + : lib.equals("cudart") + ? "@.11.0" + : lib.equals("nvrtc") ? "@.11.0" : "@.11"; + } else if (platform.startsWith("windows")) { + lib += + lib.startsWith("cudnn") + ? "64_8" + : lib.equals("nccl") + ? "64_2" + : lib.equals("myelin") + ? "64_1" + : lib.equals("nvinfer") + ? "64_7" + : lib.equals("cufft") || lib.equals("curand") || lib.equals("cusolver") + ? "64_10" + : lib.equals("cudart") + ? "64_110" + : lib.equals("nvrtc") ? "64_110_0" : "64_11"; + } else { + continue; // no CUDA + } + if (!preloads.contains(lib)) { + preloads.add(i++, lib); + } } + if (i > 0) { + resources.add("/org/bytedeco/cuda/"); + resources.add("/org/bytedeco/tensorrt/"); + } + } + + @Override + public void map(InfoMap infoMap) { + infoMap + .put(new Info("TF_CAPI_EXPORT", "TF_Bool").cppTypes().annotations()) + .put( + new Info("TF_Buffer::data") + .javaText( + "public native @Const Pointer data(); public native TF_Buffer data(Pointer data);")) + .put( + new Info("TF_Status") + .pointerTypes("TF_Status") + .base("org.tensorflow.internal.c_api.AbstractTF_Status")) + .put( + new Info("TF_Buffer") + .pointerTypes("TF_Buffer") + .base("org.tensorflow.internal.c_api.AbstractTF_Buffer")) + .put( + new Info("TF_Tensor") + .pointerTypes("TF_Tensor") + .base("org.tensorflow.internal.c_api.AbstractTF_Tensor")) + .put( + new Info("TF_Session") + .pointerTypes("TF_Session") + .base("org.tensorflow.internal.c_api.AbstractTF_Session")) + .put( + new Info("TF_SessionOptions") + .pointerTypes("TF_SessionOptions") + .base("org.tensorflow.internal.c_api.AbstractTF_SessionOptions")) + .put( + new Info("TF_Graph") + .pointerTypes("TF_Graph") + .base("org.tensorflow.internal.c_api.AbstractTF_Graph")) + .put( + new Info("TF_Graph::graph") + .javaText("public native @MemberGetter @ByRef Graph graph();")) + .put( + new Info("TF_Graph::refiner") + .javaText("public native @MemberGetter @ByRef ShapeRefiner refiner();")) + .put( + new Info("TF_Function") + .pointerTypes("TF_Function") + .base("org.tensorflow.internal.c_api.AbstractTF_Function")) + .put( + new Info("TF_ImportGraphDefOptions") + .pointerTypes("TF_ImportGraphDefOptions") + .base("org.tensorflow.internal.c_api.AbstractTF_ImportGraphDefOptions")) + .put( + new Info( + "TF_Operation", + "TF_WhileParams", + "TFE_MonitoringCounterCell", + "TFE_MonitoringSamplerCell", + "TFE_MonitoringCounter0", + "TFE_MonitoringCounter1", + "TFE_MonitoringCounter2", + "TFE_MonitoringIntGaugeCell", + "TFE_MonitoringStringGaugeCell", + "TFE_MonitoringBoolGaugeCell", + "TFE_MonitoringIntGauge0", + "TFE_MonitoringIntGauge1", + "TFE_MonitoringIntGauge2", + "TFE_MonitoringStringGauge0", + "TFE_MonitoringStringGauge1", + "TFE_MonitoringStringGauge2", + "TFE_MonitoringBoolGauge0", + "TFE_MonitoringBoolGauge1", + "TFE_MonitoringBoolGauge2", + "TFE_MonitoringSampler0", + "TFE_MonitoringSampler1", + "TFE_MonitoringSampler2") + .purify()) + .put( + new Info("TF_Operation::node") + .javaText("public native @MemberGetter @ByRef Node node();")) + .put( + new Info("TFE_MonitoringCounterCell::cell") + .javaText("public native @MemberGetter @ByRef CounterCell cell();")) + .put( + new Info("TFE_MonitoringSamplerCell::cell") + .javaText("public native @MemberGetter @ByRef SamplerCell cell();")) + .put( + new Info("TFE_MonitoringIntGaugeCell::cell") + .javaText("public native @MemberGetter @ByRef IntGaugeCell cell();")) + .put( + new Info("TFE_MonitoringStringGaugeCell::cell") + .javaText("public native @MemberGetter @ByRef StringGaugeCell cell();")) + .put( + new Info("TFE_MonitoringBoolGaugeCell::cell") + .javaText("public native @MemberGetter @ByRef BoolGaugeCell cell();")) + .put( + new Info("TFE_Context") + .pointerTypes("TFE_Context") + .base("org.tensorflow.internal.c_api.AbstractTFE_Context")) + .put( + new Info("TFE_ContextOptions") + .pointerTypes("TFE_ContextOptions") + .base("org.tensorflow.internal.c_api.AbstractTFE_ContextOptions")) + .put( + new Info("TFE_Context::context") + .javaText("@MemberGetter public native @ByRef EagerContext context();")) + .put( + new Info("TFE_Op") + .pointerTypes("TFE_Op") + .base("org.tensorflow.internal.c_api.AbstractTFE_Op")) + .put( + new Info("TFE_Op::operation") + .javaText("@MemberGetter public native @ByRef EagerOperation operation();")) + .put( + new Info("TFE_TensorHandle") + .pointerTypes("TFE_TensorHandle") + .base("org.tensorflow.internal.c_api.AbstractTFE_TensorHandle")) + .put( + new Info( + "TF_ShapeInferenceContextDimValueKnown", + "TFE_NewTensorHandle(const tensorflow::Tensor&, TF_Status*)") + .skip()); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Function.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Function.java new file mode 100644 index 00000000000..255a62e1253 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Function.java @@ -0,0 +1,58 @@ +/* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ +package org.tensorflow.op.core; + +import java.util.Map; +import org.tensorflow.ConcreteFunction; +import org.tensorflow.Operand; +import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.Operator; + +/** Ops for calling {@link ConcreteFunction}. */ +@Operator(name = "call") +public abstract class Function { + + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. + * + * @param scope the scope to call the function in + * @param arguments the arguments to the call + * @return the outputs of the function + * @see ConcreteFunction#call(Ops, Map) + */ + @Endpoint + public static Map> call( + Scope scope, ConcreteFunction function, Map> arguments) { + return function.call(scope, arguments); + } + + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. Only works for functions with a single input and output. + * + * @param scope the scope to call the function in + * @param argument the argument to the call + * @return the output of the function + * @see ConcreteFunction#call(Ops, Operand) + */ + @Endpoint + public static Operand call(Scope scope, ConcreteFunction function, Operand argument) { + return function.call(scope, argument); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/ConcreteFunctionTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/ConcreteFunctionTest.java index b2b2c34e223..64c33f451fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/ConcreteFunctionTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/ConcreteFunctionTest.java @@ -1,28 +1,31 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import java.util.Arrays; import org.junit.jupiter.api.Test; import org.tensorflow.op.Ops; import org.tensorflow.op.core.Init; import org.tensorflow.op.core.Placeholder; import org.tensorflow.op.math.Add; import org.tensorflow.op.math.Sub; +import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; public class ConcreteFunctionTest { @@ -30,7 +33,7 @@ public class ConcreteFunctionTest { private static Signature plusFive(Ops tf) { Placeholder input = tf.placeholder(TFloat32.class); Add output = tf.math.add(input, tf.constant(5.0f)); - Init init = tf.init(); // for native resource management tests + Init init = tf.init(); // for native resource management tests return Signature.builder().key("plusFive").input("x", input).output("y", output).build(); } @@ -40,11 +43,25 @@ private static Signature minusTwo(Ops tf) { return Signature.builder().key("minusTwo").input("x", input).output("y", output).build(); } + @SuppressWarnings("unchecked") + private static Signature plusFiveMinusTwo(Ops tf) { + Placeholder input = tf.placeholder(TFloat32.class); + try (ConcreteFunction plusFive = ConcreteFunction.create(ConcreteFunctionTest::plusFive); + ConcreteFunction minusTwo = ConcreteFunction.create(ConcreteFunctionTest::minusTwo)) { + Operand result = (Operand) minusTwo.call(tf, plusFive.call(tf, input)); + return Signature.builder() + .key("plusFiveMinusTwo") + .input("x", input) + .output("y", result) + .build(); + } + } + @Test public void createFunction() { try (ConcreteFunction f = ConcreteFunction.create(ConcreteFunctionTest::plusFive); TFloat32 x = TFloat32.scalarOf(3.0f)) { - assertEquals(8.0f, ((TFloat32)f.call(x)).getFloat()); + assertEquals(8.0f, ((TFloat32) f.call(x)).getFloat()); } } @@ -54,7 +71,7 @@ public void createFunctionFromGraph() { Signature signature = plusFive(Ops.create(g)); try (ConcreteFunction f = ConcreteFunction.create(signature, g); TFloat32 x = TFloat32.scalarOf(3.0f)) { - assertEquals(8.0f, ((TFloat32)f.call(x)).getFloat()); + assertEquals(8.0f, ((TFloat32) f.call(x)).getFloat()); } } } @@ -66,7 +83,7 @@ public void createFunctionFromSession() { try (Session s = new Session(g)) { try (ConcreteFunction f = ConcreteFunction.create(signature, s); TFloat32 x = TFloat32.scalarOf(3.0f)) { - assertEquals(8.0f, ((TFloat32)f.call(x)).getFloat()); + assertEquals(8.0f, ((TFloat32) f.call(x)).getFloat()); } } } @@ -77,45 +94,109 @@ public void chainFunctions() { try (ConcreteFunction f1 = ConcreteFunction.create(ConcreteFunctionTest::plusFive); ConcreteFunction f2 = ConcreteFunction.create(ConcreteFunctionTest::minusTwo); TFloat32 x = TFloat32.scalarOf(3.0f)) { - assertEquals(6.0f, ((TFloat32)f2.call(f1.call(x))).getFloat()); + assertEquals(6.0f, ((TFloat32) f2.call(f1.call(x))).getFloat()); } } @Test - public void closingFunctionReleaseAllResourcesItOwns() { - Graph g; - Session s; - try (ConcreteFunction f = ConcreteFunction.create(ConcreteFunctionTest::plusFive)) { - g = f.graph(); - s = f.session(); + public void getGraphFunctions() { + try (ConcreteFunction function = ConcreteFunction.create(ConcreteFunctionTest::plusFive); + Graph g = new Graph()) { + Ops tf = Ops.create(g); + tf.call(function, tf.constant(3f)); + + ConcreteFunction attached = g.getFunction(function.getDefinedName()); + assertNotNull(attached); + + try (TFloat32 x = TFloat32.scalarOf(10f); + TFloat32 y = (TFloat32) attached.call(x)) { + assertEquals(15f, y.getFloat()); + } } - assertThrows(IllegalStateException.class, () -> s.run("Add")); - assertThrows(IllegalStateException.class, () -> g.toGraphDef()); } @Test - public void closingFunctionCreatedFromGraphOnlyReleaseResourcesItOwns() { - try (Graph g = new Graph()) { - Signature signature = plusFive(Ops.create(g)); - Session s; - try (ConcreteFunction f = ConcreteFunction.create(signature, g)) { - s = f.session(); + public void testNestedFunctionEager() { + try (EagerSession sess = EagerSession.create(); + ConcreteFunction function = + ConcreteFunction.create(ConcreteFunctionTest::plusFiveMinusTwo)) { + Ops tf = Ops.create(sess); + Operand a = tf.constant(10f); + Operand result = (Operand) function.call(tf, a); + try (TFloat32 t = result.asTensor()) { + assertEquals(13f, t.getFloat()); } - assertThrows(IllegalStateException.class, () -> s.run(Init.DEFAULT_NAME)); - g.toGraphDef(); // check that graph is still valid } } @Test - public void closingFunctionCreatedFromSessionDoesNotReleaseResources() { - try (Graph g = new Graph()) { - Signature signature = plusFive(Ops.create(g)); - try (Session s = new Session(g)) { - try (ConcreteFunction f = ConcreteFunction.create(signature, s)) { - } - s.run(Init.DEFAULT_NAME); // check that session is still valid + public void testNestedFunctionGraph() { + try (Graph graph = new Graph(); + ConcreteFunction function = + ConcreteFunction.create(ConcreteFunctionTest::plusFiveMinusTwo)) { + Ops tf = Ops.create(graph); + Operand a = tf.constant(10f); + Operand result = (Operand) function.call(tf, a); + try (Session sess = new Session(graph); + TFloat32 t = (TFloat32) sess.runner().fetch(result).run().get(0)) { + assertEquals(13f, t.getFloat()); + } + } + } + + private static Signature square(Ops tf) { + Placeholder input = tf.placeholder(TFloat32.class); + Operand output = tf.math.square(input); + return Signature.builder() + .methodName("square") + .key("square") + .input("x", input) + .output("y", output) + .build(); + } + + // call op gradients are not defined in c++ + // @Test + public void testGradientsGraph() { + try (Graph g = new Graph(); + ConcreteFunction square = ConcreteFunction.create(ConcreteFunctionTest::square); + Session s = new Session(g)) { + Ops tf = Ops.create(g); + + Output x1 = tf.placeholder(TFloat32.class).output(); + Output x2 = tf.placeholder(TFloat32.class).output(); + Output y0 = (Output) square.call(tf, x1); + Output y1 = (Output) square.call(tf, y0); + Output y2 = tf.math.addN(Arrays.asList(y0, x2)).sum(); + + Output[] grads0 = g.addGradients(y1, new Output[] {x1}); + assertNotNull(grads0); + assertEquals(1, grads0.length); + assertEquals(DataType.DT_FLOAT, grads0[0].dataType()); + + Output[] grads1 = g.addGradients(y2, new Output[] {x1, x2}); + assertNotNull(grads1); + assertEquals(2, grads1.length); + assertEquals(DataType.DT_FLOAT, grads1[0].dataType()); + assertEquals(DataType.DT_FLOAT, grads1[1].dataType()); + + try (TFloat32 c1 = TFloat32.scalarOf(3.0f); + TFloat32 c2 = TFloat32.scalarOf(2.0f); + AutoCloseableList outputs = + new AutoCloseableList<>( + s.runner() + .feed(x1, c1) + .feed(x2, c2) + .fetch(grads0[0]) + .fetch(grads1[0]) + .fetch(grads1[1]) + .run())) { + + assertEquals(3, outputs.size()); + assertEquals(108.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); + assertEquals(6.0f, ((TFloat32) outputs.get(1)).getFloat(), 0.0f); + assertEquals(1.0f, ((TFloat32) outputs.get(2)).getFloat(), 0.0f); } - g.toGraphDef(); // check that graph is still valid } } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationBuilderTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationBuilderTest.java index b39ecec9881..b694e0e5a39 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationBuilderTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationBuilderTest.java @@ -1,18 +1,18 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.junit.jupiter.api.Assertions.fail; @@ -134,7 +134,8 @@ public void setAttrs() { .addInput(tf.constant(10.00000f).asOutput()) .setAttr("tolerance", 0.1f) .build(); - // Missing tests: list(string), list(byte), list(bool), list(type) + // Missing tests: list(string), list(byte), list(bool), list(type), list(func) + // func is done via ConcreteFunction execution } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java index 33ae979ccbd..d0e79534d2c 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java @@ -1,18 +1,18 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -106,7 +106,8 @@ public void setAttr() { .setAttr("pooling_ratio", new float[] {1.0f, 1.44f, 1.73f, 1.0f}) .build(); assertTrue(hasNode(g, "FloatList")); - // Missing tests: float, list(dtype), list(tensor), list(string), list(bool) + // Missing tests: float, list(dtype), list(tensor), list(string), list(bool), list(func) + // func is done via ConcreteFunction execution } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java index 032c835c0cc..1561842a689 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java @@ -1,18 +1,18 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -55,8 +55,12 @@ public class SavedModelBundleTest { static { try { - SAVED_MODEL_PATH = Paths.get(SavedModelBundleTest.class.getResource("/saved_model").toURI()).toString(); - SAVED_MODEL_PY_PATH = Paths.get(SavedModelBundleTest.class.getResource("/saved_model_using_python/model").toURI()).toString(); + SAVED_MODEL_PATH = + Paths.get(SavedModelBundleTest.class.getResource("/saved_model").toURI()).toString(); + SAVED_MODEL_PY_PATH = + Paths.get( + SavedModelBundleTest.class.getResource("/saved_model_using_python/model").toURI()) + .toString(); } catch (URISyntaxException e) { throw new RuntimeException(e); } @@ -85,38 +89,84 @@ public void loadNonExistentBundle() { @Test public void loader() { - try (SavedModelBundle bundle = SavedModelBundle.loader(SAVED_MODEL_PATH) - .withTags("serve") - .withConfigProto(sillyConfigProto()) - .withRunOptions(sillyRunOptions()) - .load()) { + try (SavedModelBundle bundle = + SavedModelBundle.loader(SAVED_MODEL_PATH) + .withTags("serve") + .withConfigProto(sillyConfigProto()) + .withRunOptions(sillyRunOptions()) + .load()) { assertNotNull(bundle.session()); assertNotNull(bundle.graph()); assertNotNull(bundle.metaGraphDef()); } } + @Test + public void exportMultipleFunctions() throws IOException { + Path testFolder = Files.createTempDirectory("tf-saved-model-export-test"); + float reducedSum; + try (Graph g = new Graph()) { + Ops tf = Ops.create(g); + Signature f1Signature = buildGraphWithVariables(tf, Shape.of(1, 1)); + Signature f2Signature = buildIdentityGraph(tf, "identity"); + try (Session s = new Session(g); ) { + SessionFunction f1 = SessionFunction.create(f1Signature, s); + SessionFunction f2 = SessionFunction.create(f2Signature, s); + s.runInit(); + try (TFloat32 x = TFloat32.tensorOf(StdArrays.ndCopyOf(new float[] {2, 2})); + TFloat32 t = (TFloat32) f1.call(x)) { + reducedSum = t.getFloat(); + } + SavedModelBundle.exporter(testFolder.toString()).withFunction(f1).withFunction(f2).export(); + } + } + try (SavedModelBundle model = SavedModelBundle.load(testFolder.toString())) { + assertEquals(2, model.signatures().size()); + TensorFunction f1 = model.function(Signature.DEFAULT_KEY); + assertNotNull(f1); + try (TFloat32 x = TFloat32.tensorOf(StdArrays.ndCopyOf(new float[] {2, 2})); + TFloat32 t = (TFloat32) f1.call(x)) { + assertEquals(reducedSum, t.getFloat(), EPSILON); + } + TensorFunction f2 = model.function("identity"); + assertNotNull(f2); + try (TFloat32 x = TFloat32.scalarOf(10.0f); + TFloat32 t = (TFloat32) f2.call(x)) { + assertEquals(10.0f, t.getFloat(), 0.0f); + } + try { + model.function("NoSuchFunction"); + fail(); + } catch (IllegalArgumentException e) { + // as expected + } + } + } + @Test public void exportFunctionWithVariables() throws IOException { Path testFolder = Files.createTempDirectory("tf-saved-model-export-test"); float reducedSum; - FloatNdArray xValue = StdArrays.ndCopyOf(new float[][]{{0, 1, 2}, {3, 4, 5}}); + FloatNdArray xValue = StdArrays.ndCopyOf(new float[][] {{0, 1, 2}, {3, 4, 5}}); Shape xyShape = Shape.of(2, 3L); - try (ConcreteFunction f = ConcreteFunction.create(tf -> buildGraphWithVariables(tf, xyShape))) { + try (Graph g = new Graph(); + Session session = new Session(g)) { + Ops tf = Ops.create(g); + SessionFunction f = session.function(buildGraphWithVariables(tf, xyShape)); // Init variable state by running the Init operation directly - f.session().run(Init.DEFAULT_NAME); + session.runInit(); // Call the graph and remember the result of computation for later try (TFloat32 xTensor = TFloat32.tensorOf(xValue); - TFloat32 zTensor = (TFloat32)f.call(xTensor)) { + TFloat32 zTensor = (TFloat32) f.call(xTensor)) { reducedSum = zTensor.getFloat(); } // Save/export the model (which is a single function in this case) f.save(testFolder.toString()); } assertTrue(Files.exists(testFolder.resolve(Paths.get("variables", "variables.index")))); - assertTrue(Files - .exists(testFolder.resolve(Paths.get("variables", "variables.data-00000-of-00001")))); + assertTrue( + Files.exists(testFolder.resolve(Paths.get("variables", "variables.data-00000-of-00001")))); assertTrue(Files.exists(testFolder.resolve("saved_model.pb"))); // Reload the model just saved and validate its data @@ -125,10 +175,11 @@ public void exportFunctionWithVariables() throws IOException { assertNotNull(savedModel.metaGraphDef()); assertNotNull(savedModel.metaGraphDef().getSaverDef()); assertEquals(1, savedModel.metaGraphDef().getSignatureDefCount()); - assertEquals(Signature.DEFAULT_KEY, + assertEquals( + Signature.DEFAULT_KEY, savedModel.metaGraphDef().getSignatureDefMap().keySet().iterator().next()); - ConcreteFunction function = savedModel.function(Signature.DEFAULT_KEY); + TensorFunction function = savedModel.function(Signature.DEFAULT_KEY); assertNotNull(function); Signature signature = function.signature(); @@ -155,12 +206,13 @@ public void exportFunctionWithVariables() throws IOException { try (TFloat32 xTensor = TFloat32.tensorOf(xValue)) { // Call the saved model function and make sure it returns the same result as before - try (TFloat32 zTensor = (TFloat32)function.call(xTensor)) { + try (TFloat32 zTensor = (TFloat32) function.call(xTensor)) { assertEquals(reducedSum, zTensor.getFloat(), EPSILON); } // Now call the same function directly from the model try (TFloat32 zTensor = - (TFloat32)savedModel.call(Collections.singletonMap("input", xTensor)).get("reducedSum")) { + (TFloat32) + savedModel.call(Collections.singletonMap("input", xTensor)).get("reducedSum")) { assertEquals(reducedSum, zTensor.getFloat(), EPSILON); } } @@ -168,73 +220,27 @@ public void exportFunctionWithVariables() throws IOException { } @Test - public void exportMultipleFunctions() throws IOException { + public void cannotExportMultipleFunctionsWithDifferentSessions() throws IOException { Path testFolder = Files.createTempDirectory("tf-saved-model-export-test"); - float reducedSum; - try (Graph g = new Graph()) { + try (Graph g = new Graph(); + Session s1 = new Session(g); + Session s2 = new Session(g)) { Ops tf = Ops.create(g); Signature f1Signature = buildGraphWithVariables(tf, Shape.of(1, 1)); Signature f2Signature = buildIdentityGraph(tf, "identity"); - try (Session s = new Session(g); - ConcreteFunction f1 = ConcreteFunction.create(f1Signature, s); - ConcreteFunction f2 = ConcreteFunction.create(f2Signature, s)) { - f1.session().run(Init.DEFAULT_NAME); - try (TFloat32 x = TFloat32.tensorOf(StdArrays.ndCopyOf(new float[]{2, 2})); - TFloat32 t = (TFloat32)f1.call(x)) { - reducedSum = t.getFloat(); - } - SavedModelBundle.exporter(testFolder.toString()) - .withFunction(f1) - .withFunction(f2) - .export(); - } - } - try (SavedModelBundle model = SavedModelBundle.load(testFolder.toString())) { - assertEquals(2, model.signatures().size()); - ConcreteFunction f1 = model.function(Signature.DEFAULT_KEY); - assertNotNull(f1); - try (TFloat32 x = TFloat32.tensorOf(StdArrays.ndCopyOf(new float[]{2, 2})); - TFloat32 t = (TFloat32)f1.call(x)) { - assertEquals(reducedSum, t.getFloat(), EPSILON); - } - ConcreteFunction f2 = model.function("identity"); - assertNotNull(f2); - try (TFloat32 x = TFloat32.scalarOf(10.0f); - TFloat32 t = (TFloat32)f2.call(x)) { - assertEquals(10.0f, t.getFloat(), 0.0f); - } + SessionFunction f1 = s1.function(f1Signature); + SessionFunction f2 = s2.function(f2Signature); + s1.runInit(); + s2.runInit(); try { - model.function("NoSuchFunction"); + SavedModelBundle.exporter(testFolder.toString()).withFunction(f1).withFunction(f2).export(); fail(); - } catch (IllegalArgumentException e) { + } catch (UnsupportedOperationException e) { // as expected } } } - @Test - public void cannotExportMultipleFunctionsWithDifferentSessions() throws IOException { - Path testFolder = Files.createTempDirectory("tf-saved-model-export-test"); - try (Graph g = new Graph()) { - Ops tf = Ops.create(g); - Signature f1Signature = buildGraphWithVariables(tf, Shape.of(1, 1)); - Signature f2Signature = buildIdentityGraph(tf, "identity"); - try (ConcreteFunction f1 = ConcreteFunction.create(f1Signature, g); - ConcreteFunction f2 = ConcreteFunction.create(f2Signature, g)) { - f1.session().run(Init.DEFAULT_NAME); - try { - SavedModelBundle.exporter(testFolder.toString()) - .withFunction(f1) - .withFunction(f2) - .export(); - fail(); - } catch (UnsupportedOperationException e) { - // as expected - } - } - } - } - @Test public void cannotExportMultipleFunctionsWithSameSignatureKey() throws IOException { Path testFolder = Files.createTempDirectory("tf-saved-model-export-test"); @@ -242,15 +248,12 @@ public void cannotExportMultipleFunctionsWithSameSignatureKey() throws IOExcepti Ops tf = Ops.create(g); Signature f1Signature = buildGraphWithVariables(tf, Shape.of(1, 1)); Signature f2Signature = buildIdentityGraph(tf, Signature.DEFAULT_KEY); - try (Session s = new Session(g); - ConcreteFunction f1 = ConcreteFunction.create(f1Signature, s); - ConcreteFunction f2 = ConcreteFunction.create(f2Signature, s)) { - f1.session().run(Init.DEFAULT_NAME); + try (Session s = new Session(g); ) { + SessionFunction f1 = SessionFunction.create(f1Signature, s); + SessionFunction f2 = SessionFunction.create(f2Signature, s); + s.runInit(); try { - SavedModelBundle.exporter(testFolder.toString()) - .withFunction(f1) - .withFunction(f2) - .export(); + SavedModelBundle.exporter(testFolder.toString()).withFunctions(f1, f2).export(); fail(); } catch (IllegalArgumentException e) { // as expected @@ -261,24 +264,21 @@ public void cannotExportMultipleFunctionsWithSameSignatureKey() throws IOExcepti @Test public void cannotExportOrImportInvalidTags() { - assertThrows(IllegalArgumentException.class, () -> - SavedModelBundle.loader("/").withTags(null) - ); - assertThrows(IllegalArgumentException.class, () -> - SavedModelBundle.loader("/").withTags(new String[]{"tag", null}) - ); - assertThrows(IllegalArgumentException.class, () -> - SavedModelBundle.loader("/").withTags(new String[]{"tag", ""}) - ); - assertThrows(IllegalArgumentException.class, () -> - SavedModelBundle.exporter("/").withTags(null) - ); - assertThrows(IllegalArgumentException.class, () -> - SavedModelBundle.exporter("/").withTags(new String[]{"tag", null}) - ); - assertThrows(IllegalArgumentException.class, () -> - SavedModelBundle.exporter("/").withTags(new String[]{"tag", ""}) - ); + assertThrows(IllegalArgumentException.class, () -> SavedModelBundle.loader("/").withTags(null)); + assertThrows( + IllegalArgumentException.class, + () -> SavedModelBundle.loader("/").withTags(new String[] {"tag", null})); + assertThrows( + IllegalArgumentException.class, + () -> SavedModelBundle.loader("/").withTags(new String[] {"tag", ""})); + assertThrows( + IllegalArgumentException.class, () -> SavedModelBundle.exporter("/").withTags(null)); + assertThrows( + IllegalArgumentException.class, + () -> SavedModelBundle.exporter("/").withTags(new String[] {"tag", null})); + assertThrows( + IllegalArgumentException.class, + () -> SavedModelBundle.exporter("/").withTags(new String[] {"tag", ""})); } @Test @@ -289,10 +289,11 @@ public void pythonTfFunction() { * Test model was created in python * Signature name used for saving 'add', argument names 'a' and 'b' */ - ConcreteFunction add = bundle.function("add"); + TensorFunction add = bundle.function("add"); Map args = new HashMap<>(); try (TFloat32 a = TFloat32.scalarOf(10.0f); TFloat32 b = TFloat32.scalarOf(15.5f)) { + System.out.println(add.signature()); args.put("a", a); args.put("b", b); Map result = add.call(args); @@ -304,12 +305,15 @@ public void pythonTfFunction() { args.clear(); // variable unwrapping happens in Session, which is used by ConcreteFunction.call - ConcreteFunction getVariable = bundle.function("get_variable"); + TensorFunction getVariable = bundle.function("get_variable"); try (TFloat32 dummy = TFloat32.scalarOf(1.0f)) { - args.put("dummy",dummy); + args.put("dummy", dummy); // TF functions always require an input, so we supply a dummy one here // This test actually checks that resource variables can be loaded correctly. - try (TFloat32 v = (TFloat32) getVariable.call(args) + try (TFloat32 v = + (TFloat32) + getVariable + .call(args) .get(getVariable.signature().outputNames().iterator().next())) { assertEquals(2f, v.getFloat()); } @@ -319,8 +323,9 @@ public void pythonTfFunction() { private static Signature buildGraphWithVariables(Ops tf, Shape xShape) { Placeholder x = tf.placeholder(TFloat32.class, Placeholder.shape(xShape)); - Variable y = tf.withName("variable") - .variable(tf.random.randomUniform(tf.constant(xShape), TFloat32.class)); + Variable y = + tf.withName("variable") + .variable(tf.random.randomUniform(tf.constant(xShape), TFloat32.class)); ReduceSum z = tf.reduceSum(tf.math.add(x, y), tf.array(0, 1)); Init init = tf.init(); return Signature.builder().input("input", x).output("reducedSum", z).build(); @@ -333,9 +338,7 @@ private static Signature buildIdentityGraph(Ops tf, String signatureKey) { } private static RunOptions sillyRunOptions() { - return RunOptions.newBuilder() - .setTraceLevel(RunOptions.TraceLevel.FULL_TRACE) - .build(); + return RunOptions.newBuilder().setTraceLevel(RunOptions.TraceLevel.FULL_TRACE).build(); } private static ConfigProto sillyConfigProto() { diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java index 4223a03ee23..8a3e64c3336 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java @@ -1,18 +1,18 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ package org.tensorflow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -43,18 +43,33 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; -/** - * Unit tests for {@link org.tensorflow.Session}. - */ +/** Unit tests for {@link org.tensorflow.Session}. */ public class SessionTest { + @Test + public void runUsingFunction() { + try (Graph g = new Graph(); + Session s = new Session(g)) { + Ops tf = Ops.create(g); + transpose_A_times_X(tf, new int[][] {{2}, {3}}); + Signature sig = + Signature.builder("sess").input("X", g.output("X")).output("Y", g.output("Y")).build(); + SessionFunction func = s.function(sig); + + try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][] {{5}, {7}})); + TInt32 y = (TInt32) func.call(x)) { + assertEquals(31, y.getInt(0, 0)); + } + } + } + @Test public void runUsingOperationNames() { try (Graph g = new Graph(); Session s = new Session(g)) { Ops tf = Ops.create(g); - transpose_A_times_X(tf, new int[][]{{2}, {3}}); - try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][]{{5}, {7}})); + transpose_A_times_X(tf, new int[][] {{2}, {3}}); + try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][] {{5}, {7}})); AutoCloseableList outputs = new AutoCloseableList<>(s.runner().feed("X", x).fetch("Y").run())) { assertEquals(1, outputs.size()); @@ -68,10 +83,10 @@ public void runUsingOperationHandles() { try (Graph g = new Graph(); Session s = new Session(g)) { Ops tf = Ops.create(g); - transpose_A_times_X(tf, new int[][]{{2}, {3}}); + transpose_A_times_X(tf, new int[][] {{2}, {3}}); Output feed = g.operation("X").output(0); Output fetch = g.operation("Y").output(0); - try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][]{{5}, {7}})); + try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][] {{5}, {7}})); AutoCloseableList outputs = new AutoCloseableList<>(s.runner().feed(feed, x).fetch(fetch).run())) { assertEquals(1, outputs.size()); @@ -95,12 +110,9 @@ public void runUsingColonSeparatedNames() { } // Feed using colon separated names. try (TInt32 fed = TInt32.vectorOf(4, 3, 2, 1); - TInt32 fetched = (TInt32) s.runner() - .feed("Split:0", fed) - .feed("Split:1", fed) - .fetch("Add") - .run() - .get(0)) { + TInt32 fetched = + (TInt32) + s.runner().feed("Split:0", fed).feed("Split:1", fed).fetch("Add").run().get(0)) { assertEquals(NdArrays.vectorOf(8, 6, 4, 2), fetched); } } @@ -111,13 +123,14 @@ public void runWithMetadata() { try (Graph g = new Graph(); Session s = new Session(g)) { Ops tf = Ops.create(g); - transpose_A_times_X(tf, new int[][]{{2}, {3}}); - try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][]{{5}, {7}}))) { - Session.Run result = s.runner() - .feed("X", x) - .fetch("Y") - .setOptions(fullTraceRunOptions()) - .runAndFetchMetadata(); + transpose_A_times_X(tf, new int[][] {{2}, {3}}); + try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][] {{5}, {7}}))) { + Session.Run result = + s.runner() + .feed("X", x) + .fetch("Y") + .setOptions(fullTraceRunOptions()) + .runAndFetchMetadata(); // Sanity check on outputs. AutoCloseableList outputs = new AutoCloseableList<>(result.outputs); assertEquals(1, outputs.size()); @@ -163,8 +176,7 @@ public void failOnUseAfterClose() { @Test public void createWithConfigProto() { try (Graph g = new Graph(); - Session s = new Session(g, singleThreadConfigProto())) { - } + Session s = new Session(g, singleThreadConfigProto())) {} } @Test @@ -219,10 +231,12 @@ public void saveAndRestore() throws IOException { Path testFolder = Files.createTempDirectory("tf-session-save-restore-test"); try (Graph g = new Graph()) { Ops tf = Ops.create(g); - Variable x = tf.withName("x") - .variable(tf.random.randomUniform(tf.constant(Shape.of(3, 3L)), TFloat32.class)); - Variable y = tf.withName("y") - .variable(tf.random.randomUniform(tf.constant(Shape.of(3, 3L)), TFloat32.class)); + Variable x = + tf.withName("x") + .variable(tf.random.randomUniform(tf.constant(Shape.of(3, 3L)), TFloat32.class)); + Variable y = + tf.withName("y") + .variable(tf.random.randomUniform(tf.constant(Shape.of(3, 3L)), TFloat32.class)); Init init = tf.init(); try (Session s = new Session(g)) { @@ -234,9 +248,10 @@ public void saveAndRestore() throws IOException { restoredGraph.importGraphDef(graphDef); try (Session restoredSession = new Session(restoredGraph)) { restoredSession.restore(testFolder.resolve("checkpoint").toString()); - try (AutoCloseableList oldList = new AutoCloseableList<>(s.runner().fetch("x").fetch("y").run()); - AutoCloseableList newList = new AutoCloseableList<>( - restoredSession.runner().fetch("x").fetch("y").run())) { + try (AutoCloseableList oldList = + new AutoCloseableList<>(s.runner().fetch("x").fetch("y").run()); + AutoCloseableList newList = + new AutoCloseableList<>(restoredSession.runner().fetch("x").fetch("y").run())) { assertEquals(oldList.get(0), newList.get(0)); assertEquals(oldList.get(1), newList.get(1)); } @@ -265,7 +280,6 @@ public static void testFetchVariable() { try (TInt32 value = (TInt32) s.runner().addTarget(assign).fetch(variable).run().get(0)) { assertEquals(2, value.getInt()); } - } } @@ -295,14 +309,11 @@ public static void testFetchVariableReusingRead() { } assertEquals(0, numOperations(g) - ops); - } } private static RunOptions fullTraceRunOptions() { - return RunOptions.newBuilder() - .setTraceLevel(RunOptions.TraceLevel.FULL_TRACE) - .build(); + return RunOptions.newBuilder().setTraceLevel(RunOptions.TraceLevel.FULL_TRACE).build(); } private static ConfigProto singleThreadConfigProto() { @@ -313,10 +324,11 @@ private static ConfigProto singleThreadConfigProto() { } private static void transpose_A_times_X(Ops tf, int[][] a) { - tf.withName("Y").linalg.matMul( - tf.withName("A").constant(a), - tf.withName("X").placeholder(TInt32.class), - MatMul.transposeA(true).transposeB(false) - ); + tf.withName("Y") + .linalg + .matMul( + tf.withName("A").constant(a), + tf.withName("X").placeholder(TInt32.class), + MatMul.transposeA(true).transposeB(false)); } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/FunctionTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/FunctionTest.java new file mode 100644 index 00000000000..be4386698fa --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/FunctionTest.java @@ -0,0 +1,67 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ +package org.tensorflow.op.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; +import org.tensorflow.ConcreteFunction; +import org.tensorflow.EagerSession; +import org.tensorflow.Graph; +import org.tensorflow.Operand; +import org.tensorflow.Session; +import org.tensorflow.Signature; +import org.tensorflow.op.Ops; +import org.tensorflow.op.math.Add; +import org.tensorflow.types.TFloat32; + +/** Tests for GraphFunction and it's ops */ +public class FunctionTest { + + private static Signature plusFive(Ops tf) { + Placeholder input = tf.placeholder(TFloat32.class); + Add output = tf.math.add(input, tf.constant(5.0f)); + Init init = tf.init(); // for native resource management tests + return Signature.builder().key("plusFive").input("x", input).output("y", output).build(); + } + + @Test + public void testConcreteFunctionEager() { + try (EagerSession sess = EagerSession.create(); + ConcreteFunction function = ConcreteFunction.create(FunctionTest::plusFive)) { + Ops tf = Ops.create(sess); + Operand a = tf.constant(10f); + Operand result = (Operand) function.call(tf, a); + try (TFloat32 t = result.asTensor()) { + assertEquals(15f, t.getFloat()); + } + } + } + + @Test + public void testConcreteFunctionGraph() { + try (Graph graph = new Graph(); + ConcreteFunction function = ConcreteFunction.create(FunctionTest::plusFive)) { + Ops tf = Ops.create(graph); + Operand a = tf.constant(10f); + Operand result = (Operand) function.call(tf, a); + try (Session sess = new Session(graph); + TFloat32 t = (TFloat32) sess.runner().fetch(result).run().get(0)) { + assertEquals(15f, t.getFloat()); + } + } + } +} From 19e1c8d4aff3bea8b3a4d875165c89a3771c5344 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Tue, 1 Jun 2021 14:58:06 -0700 Subject: [PATCH 32/60] Spotless updates (#331) Signed-off-by: Ryan Nett --- .github/workflows/ci.yml | 26 ++++++++++++++++++++++++++ pom.xml | 29 +++++------------------------ 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f993a57e48e..72c045818fe 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -40,6 +40,32 @@ jobs: - name: Run lint checks run: | mvn compiler:compile -Pdev,jdk11 -B -U -e + check-format: + if: github.event_name == 'pull_request' + runs-on: ubuntu-latest + container: centos:7 + steps: + - name: Checkout repository + uses: actions/checkout@v1 + - name: Install environment + run: | + yum -y update + yum -y install centos-release-scl-rh epel-release + yum -y install java-11-openjdk-devel devtoolset-7 + echo Downloading Maven + curl -L https://archive.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz -o $HOME/apache-maven-3.6.3-bin.tar.gz + tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ + ln -sf /opt/apache-maven-3.6.3/bin/mvn /usr/bin/mvn + - name: Build project + run: | + source scl_source enable devtoolset-7 || true + export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac))))) + echo $JAVA_HOME + mvn -version + mvn clean install -Pdev,jdk11 -B -U -e -Dlint.skip=true + - name: Run format checks + run: | + mvn spotless:check -Pdev,jdk11 -B -U -e prepare: runs-on: ubuntu-latest outputs: diff --git a/pom.xml b/pom.xml index 7288f3661b3..f9b94e8164a 100644 --- a/pom.xml +++ b/pom.xml @@ -230,11 +230,12 @@ + - format - - (,16) - + check-format @@ -340,26 +341,6 @@ - - - -/* Copyright $YEAR The TensorFlow Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ - - From 23d6f0b709fc3122e5fe9ff63eb052750428d913 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Wed, 2 Jun 2021 17:54:13 -0400 Subject: [PATCH 33/60] activations, constraints, initializers, losses, regularizers: move Ops param from CTOR to call method (#329) * Move Ops from CTOR to call method * Move Ops from CTOR to call method * Move Ops from CTOR to call method * JavaDoc fixes including Dataset * Results of Run mvn spotless:apply --- .../activations/AbstractActivation.java | 46 ++ .../framework/activations/Activation.java | 45 +- .../tensorflow/framework/activations/ELU.java | 34 +- .../framework/activations/Exponential.java | 23 +- .../framework/activations/HardSigmoid.java | 31 +- .../framework/activations/Linear.java | 18 +- .../framework/activations/ReLU.java | 30 +- .../framework/activations/SELU.java | 21 +- .../framework/activations/Sigmoid.java | 21 +- .../framework/activations/Softmax.java | 22 +- .../framework/activations/Softplus.java | 21 +- .../framework/activations/Softsign.java | 21 +- .../framework/activations/Swish.java | 11 +- .../framework/activations/Tanh.java | 14 +- .../constraints/AbstractConstraint.java | 89 +++ .../framework/constraints/Constraint.java | 88 +-- .../framework/constraints/MaxNorm.java | 34 +- .../framework/constraints/MinMaxNorm.java | 34 +- .../framework/constraints/NonNeg.java | 15 +- .../framework/constraints/UnitNorm.java | 35 +- .../tensorflow/framework/data/Dataset.java | 678 ++++++++++-------- .../framework/data/DatasetOptional.java | 58 +- .../initializers/BaseInitializer.java | 21 +- .../framework/initializers/Constant.java | 31 +- .../framework/initializers/Glorot.java | 12 +- .../tensorflow/framework/initializers/He.java | 16 +- .../framework/initializers/Identity.java | 30 +- .../framework/initializers/Initializer.java | 7 +- .../framework/initializers/LeCun.java | 15 +- .../framework/initializers/Ones.java | 20 +- .../framework/initializers/Orthogonal.java | 21 +- .../framework/initializers/RandomNormal.java | 26 +- .../framework/initializers/RandomUniform.java | 31 +- .../initializers/TruncatedNormal.java | 23 +- .../initializers/VarianceScaling.java | 32 +- .../framework/initializers/Zeros.java | 17 +- .../framework/losses/BinaryCrossentropy.java | 83 +-- .../losses/CategoricalCrossentropy.java | 139 ++-- .../framework/losses/CategoricalHinge.java | 40 +- .../framework/losses/CosineSimilarity.java | 115 ++- .../tensorflow/framework/losses/Hinge.java | 52 +- .../tensorflow/framework/losses/Huber.java | 61 +- .../framework/losses/KLDivergence.java | 50 +- .../tensorflow/framework/losses/LogCosh.java | 54 +- .../org/tensorflow/framework/losses/Loss.java | 78 +- .../framework/losses/MeanAbsoluteError.java | 44 +- .../losses/MeanAbsolutePercentageError.java | 45 +- .../framework/losses/MeanSquaredError.java | 44 +- .../losses/MeanSquaredLogarithmicError.java | 44 +- .../tensorflow/framework/losses/Poisson.java | 54 +- .../framework/losses/Reduction.java | 2 +- .../losses/SparseCategoricalCrossentropy.java | 86 ++- .../framework/losses/SquaredHinge.java | 57 +- .../framework/losses/impl/AbstractLoss.java | 89 +++ .../org/tensorflow/framework/metrics/AUC.java | 114 ++- .../framework/metrics/Accuracy.java | 12 +- .../framework/metrics/BinaryAccuracy.java | 12 +- .../metrics/CategoricalAccuracy.java | 23 +- .../metrics/CategoricalCrossentropy.java | 24 +- .../framework/metrics/FalseNegatives.java | 42 +- .../framework/metrics/FalsePositives.java | 42 +- .../tensorflow/framework/metrics/MeanIoU.java | 25 +- .../framework/metrics/MeanRelativeError.java | 18 +- .../framework/metrics/MeanTensor.java | 13 +- .../framework/metrics/Precision.java | 86 ++- .../framework/metrics/PrecisionAtRecall.java | 11 +- .../tensorflow/framework/metrics/Recall.java | 41 +- .../framework/metrics/RecallAtPrecision.java | 10 +- .../metrics/RootMeanSquaredError.java | 10 +- .../metrics/SensitivityAtSpecificity.java | 24 +- .../metrics/SparseCategoricalAccuracy.java | 13 +- .../metrics/SpecificityAtSensitivity.java | 24 +- .../org/tensorflow/framework/metrics/Sum.java | 8 +- .../metrics/TopKCategoricalAccuracy.java | 8 +- .../framework/metrics/TrueNegatives.java | 42 +- .../framework/metrics/TruePositives.java | 42 +- .../impl/ConfusionMatrixConditionCount.java | 38 +- .../framework/metrics/impl/LossMetric.java | 2 +- .../metrics/impl/MeanMetricWrapper.java | 15 +- .../framework/metrics/impl/MetricsHelper.java | 137 ++-- .../impl/SensitivitySpecificityBase.java | 21 +- .../framework/metrics/impl/SetsOps.java | 28 +- .../framework/metrics/impl/SymbolicShape.java | 50 +- .../metrics/impl/WeightsBroadcastOps.java | 45 +- .../regularizers/AbstractRegularizer.java | 63 ++ .../tensorflow/framework/regularizers/L1.java | 33 +- .../framework/regularizers/L1L2.java | 39 +- .../tensorflow/framework/regularizers/L2.java | 33 +- .../framework/regularizers/Regularizer.java | 67 +- .../regularizers/RegularizerLoss.java | 31 +- .../framework/activations/ELUTest.java | 33 +- .../activations/ExponentialTest.java | 28 +- .../activations/HardSigmoidTest.java | 28 +- .../framework/activations/LinearTest.java | 28 +- .../framework/activations/ReLUTest.java | 58 +- .../framework/activations/SELUTest.java | 39 +- .../framework/activations/SigmoidTest.java | 27 +- .../framework/activations/SoftmaxTest.java | 47 +- .../framework/activations/SoftplusTest.java | 24 +- .../framework/activations/SoftsignTest.java | 24 +- .../framework/activations/SwishTest.java | 28 +- .../framework/activations/TanhTest.java | 48 +- .../framework/constraints/MaxNormTest.java | 13 +- .../framework/constraints/MinMaxNormTest.java | 9 +- .../framework/constraints/NonNegTest.java | 8 +- .../framework/constraints/UnitNormTest.java | 8 +- .../framework/initializers/ConstantTest.java | 72 +- .../framework/initializers/GlorotTest.java | 57 +- .../framework/initializers/HeTest.java | 57 +- .../framework/initializers/IdentityTest.java | 34 +- .../framework/initializers/LeCunTest.java | 50 +- .../framework/initializers/OnesTest.java | 78 +- .../initializers/OrthogonalTest.java | 34 +- .../initializers/RandomNormalTest.java | 33 +- .../initializers/RandomUniformTest.java | 38 +- .../initializers/TruncatedNormalTest.java | 33 +- .../initializers/VarianceScalingTest.java | 73 +- .../framework/initializers/ZerosTest.java | 72 +- .../losses/BinaryCrossentropyTest.java | 82 +-- .../losses/CategoricalCrossentropyTest.java | 66 +- .../losses/CategoricalHingeTest.java | 32 +- .../losses/CosineSimilarityTest.java | 35 +- .../framework/losses/HingeTest.java | 34 +- .../framework/losses/HuberTest.java | 30 +- .../framework/losses/KLDivergenceTest.java | 25 +- .../framework/losses/LogCoshTest.java | 25 +- .../losses/MeanAbsoluteErrorTest.java | 45 +- .../MeanAbsolutePercentageErrorTest.java | 40 +- .../losses/MeanSquaredErrorTest.java | 45 +- .../MeanSquaredLogarithmicErrorTest.java | 45 +- .../framework/losses/PoissonTest.java | 25 +- .../SparseCategoricalCrossentropyTest.java | 54 +- .../framework/losses/SquaredHingeTest.java | 34 +- .../optimizers/GradientDescentTest.java | 57 +- .../framework/regularizers/L1L2Test.java | 42 +- .../framework/regularizers/L1Test.java | 26 +- .../framework/regularizers/L2Test.java | 26 +- .../regularizers/RegularizerLossTest.java | 8 +- 138 files changed, 2865 insertions(+), 3066 deletions(-) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/activations/AbstractActivation.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/AbstractConstraint.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/AbstractLoss.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/AbstractRegularizer.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/AbstractActivation.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/AbstractActivation.java new file mode 100644 index 00000000000..335b8697273 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/AbstractActivation.java @@ -0,0 +1,46 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.activations; + +import org.tensorflow.op.Ops; +import org.tensorflow.types.family.TNumber; + +/** Abstract base class for Activations */ +public abstract class AbstractActivation implements Activation { + + /** The TensorFlow Ops */ + protected Ops tf; + + /** Creates the abstract class for an AbstractActivation */ + protected AbstractActivation() {} + + /** + * Gets the TensorFlow Ops + * + * @return the TensorFlow Ops + */ + protected Ops getTF() { + return this.tf; + } + + /** + * Sets the TensorFlow Ops + * + * @param tf the TensorFlow Ops + */ + protected void setTF(Ops tf) { + this.tf = tf; + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activation.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activation.java index e1482a51a8a..f73c6678ab3 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activation.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activation.java @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,50 +19,19 @@ import org.tensorflow.types.family.TNumber; /** - * Abstract base class for Activations + * Interface for Activations * - *

Note: The {@link #tf} attribute must be set prior to invoking the call method. See - * {@link #setTF(Ops)} and the constructor {@link #Activation(Ops)}. - * - * @param the data type of the activation + * @param the data type of the input and the result */ -public abstract class Activation { - - /** The TensorFlow Ops */ - protected Ops tf; - - /** - * Creates the abstract class for an Activation - * - * @param tf the TensorFlow Ops - */ - protected Activation(Ops tf) { - this.tf = tf; - } - - /** - * Sets the TensorFlow Ops - * - * @param tf the TensorFlow Ops - */ - protected void setTF(Ops tf) { - this.tf = tf; - } - - /** - * Gets the TensorFlow Ops - * - * @return the TensorFlow Ops - */ - protected Ops getTF() { - return this.tf; - } +@FunctionalInterface +public interface Activation { /** * Gets the calculation operation for the activation. * + * @param tf the TensorFlow Ops * @param input the input tensor * @return The operand for the activation */ - public abstract Operand call(Operand input); + Operand call(Ops tf, Operand input); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ELU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ELU.java index 2f2f16f2752..bd019a60df1 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ELU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ELU.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.TBool; @@ -44,53 +46,41 @@ * Operand<TFloat32> result = elu.call(input); *

* - * @param the data type of the activation * @see Clevert et al, 2016, Fast and Accurate Deep * Network Learning by Exponential Linear Units (ELUs) */ -public class ELU extends Activation { +public class ELU extends AbstractActivation { private static final double ALPHA_DEFAULT = 1.0; /** A scalar, slope of negative section. */ private final double alpha; - /** - * Creates a new ELU with alpha={@link #ALPHA_DEFAULT}. - * - * @param tf the TensorFlow Ops - */ - public ELU(Ops tf) { - this(tf, ALPHA_DEFAULT); + /** Creates a new ELU with alpha={@link #ALPHA_DEFAULT}. */ + public ELU() { + this(ALPHA_DEFAULT); } /** * Creates a new ELU * - * @param tf the TensorFlow Ops * @param alpha A scalar, slope of negative section. It controls the value to which an ELU * saturates for negative net inputs. */ - public ELU(Ops tf, double alpha) { - super(tf); + public ELU(double alpha) { + super(); this.alpha = alpha; } - /** - * Gets the calculation operation for the activation. - * - * @param input the input tensor - * @return The operand for the activation - */ + /** {@inheritDoc} */ @Override - public Operand call(Operand input) { - + public Operand call(Ops tf, Operand input) { Operand result = tf.nn.elu(input); if (alpha == 1.0) return result; else { Class inputType = input.type(); - Operand y = tf.math.mul(result, tf.dtypes.cast(tf.constant(alpha), inputType)); - Operand cond = tf.math.greater(result, tf.dtypes.cast(tf.constant(0), inputType)); + Operand y = tf.math.mul(result, cast(tf, tf.constant(alpha), inputType)); + Operand cond = tf.math.greater(result, cast(tf, tf.constant(0), inputType)); return tf.select(cond, result, y); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Exponential.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Exponential.java index d5fdff36c61..8398ada6362 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Exponential.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Exponential.java @@ -30,28 +30,17 @@ * Operand<TFloat32> result = exp.call(input); * // result is [0.04978707f, 0.36787945f, 1.f, 2.7182817f, 20.085537f] *
- * - * @param the data type of the activation */ -public class Exponential extends Activation { +public class Exponential extends AbstractActivation { - /** - * Creates an Exponential activation. - * - * @param tf the TensorFlow Ops - */ - public Exponential(Ops tf) { - super(tf); + /** Creates an Exponential activation. */ + public Exponential() { + super(); } - /** - * Calculates the Exponential activation. - * - * @param input the input tensor - * @return an Operand for the exponential activation: exp(x). - */ + /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { return tf.math.exp(input); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/HardSigmoid.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/HardSigmoid.java index 0b7cf573b8e..4365e0cd14a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/HardSigmoid.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/HardSigmoid.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TFloating; @@ -40,34 +42,23 @@ * Operand<TFloat32> result = hardSigmoid.call(input); * // result is [0.f , 0.3f, 0.5f, 0.7f, 1.f] * - * - * @param the data type of the result */ -public class HardSigmoid extends Activation { +public class HardSigmoid extends AbstractActivation { - /** - * Creates Hard sigmoid activation. - * - * @param tf the TensorFlow Ops - */ - public HardSigmoid(Ops tf) { - super(tf); + /** Creates Hard sigmoid activation. */ + public HardSigmoid() { + super(); } - /** - * Gets the calculation operation for the activation. - * - * @param input the input tensor - * @return The operand for the activation - */ + /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { Class inputType = input.type(); - Operand point2 = tf.dtypes.cast(tf.constant(0.2), inputType); - Operand point5 = tf.dtypes.cast(tf.constant(0.5), inputType); + Operand point2 = cast(tf, tf.constant(0.2), inputType); + Operand point5 = cast(tf, tf.constant(0.5), inputType); Operand x = tf.math.add(tf.math.mul(input, point2), point5); return tf.clipByValue( - x, tf.dtypes.cast(tf.constant(0), inputType), tf.dtypes.cast(tf.constant(1), inputType)); + x, cast(tf, tf.constant(0), inputType), cast(tf, tf.constant(1), inputType)); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Linear.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Linear.java index d907397995d..d1a5eede616 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Linear.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Linear.java @@ -19,9 +19,9 @@ import org.tensorflow.types.family.TNumber; /** - * Linear activation function (pass-through). + * Linear activation function (pass-through). * - *

The linear activation returns its input. It is also known as the Identity activation function.

+ *

The linear activation returns its input. It is also known as the Identity activation function. * *

For example: * @@ -33,20 +33,16 @@ * // result is [-3.0f,-1.0f, 0.0f,1.0f,3.0f] * */ -public class Linear extends Activation { +public class Linear extends AbstractActivation { - /** - * Creates a linear activation. - * - * @param tf the TensorFlow Ops - */ - public Linear(Ops tf) { - super(tf); + /** Creates a linear activation. */ + public Linear() { + super(); } /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { return input; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ReLU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ReLU.java index aef6ebf2992..44dd3bc3b46 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ReLU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ReLU.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.op.math.Greater; @@ -58,7 +60,7 @@ * * @param the data type of the result */ -public class ReLU extends Activation { +public class ReLU extends AbstractActivation { public static final float ALPHA_DEFAULT = 0.0f; public static final float MAX_VALUE_DEFAULT = Float.NaN; @@ -71,24 +73,21 @@ public class ReLU extends Activation { /** * Creates a new ReLU with alpha={@link #ALPHA_DEFAULT}, maxValue={@link #MAX_VALUE_DEFAULT}, * threshold={@link #THRESHOLD_DEFAULT}, - * - * @param tf the TensorFlow Ops */ - public ReLU(Ops tf) { - this(tf, ALPHA_DEFAULT, MAX_VALUE_DEFAULT, THRESHOLD_DEFAULT); + public ReLU() { + this(ALPHA_DEFAULT, MAX_VALUE_DEFAULT, THRESHOLD_DEFAULT); } /** * Creates a new ReLU * - * @param tf the TensorFlow Ops * @param alpha governs the slope for values lower than the threshold. * @param maxValue sets the saturation threshold (the largest value the function will return). * @param threshold the threshold value of the activation function below which values will be * damped or set to zero. */ - public ReLU(Ops tf, float alpha, float maxValue, float threshold) { - super(tf); + public ReLU(float alpha, float maxValue, float threshold) { + super(); this.alpha = alpha; this.maxValue = maxValue; this.threshold = threshold; @@ -96,7 +95,7 @@ public ReLU(Ops tf, float alpha, float maxValue, float threshold) { /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { Class inputType = input.type(); boolean clipMax = !Float.isNaN(maxValue); @@ -108,7 +107,7 @@ public Operand call(Operand input) { if (threshold != 0) { negativePart = tf.nn.relu( - tf.math.add(tf.math.neg(input), tf.dtypes.cast(tf.constant(threshold), inputType))); + tf.math.add(tf.math.neg(input), cast(tf, tf.constant(threshold), inputType))); } else { negativePart = tf.nn.relu(tf.math.neg(input)); } @@ -117,8 +116,8 @@ public Operand call(Operand input) { Operand lInput; if (threshold != 0) { // computes input for input > threshold else 0 - Greater greater = tf.math.greater(input, tf.dtypes.cast(tf.constant(threshold), inputType)); - lInput = tf.math.mul(input, tf.dtypes.cast(greater, inputType)); + Greater greater = tf.math.greater(input, cast(tf, tf.constant(threshold), inputType)); + lInput = tf.math.mul(input, cast(tf, greater, inputType)); } else if (maxValue == 6) { // if no threshold, then can use nn.relu6 native TF op for performance lInput = tf.nn.relu6(input); @@ -127,15 +126,14 @@ public Operand call(Operand input) { lInput = tf.nn.relu(input); } if (clipMax) { - Operand lmaxValue = tf.dtypes.cast(tf.constant(maxValue), inputType); - Operand zero = tf.dtypes.cast(tf.constant(0), inputType); + Operand lmaxValue = cast(tf, tf.constant(maxValue), inputType); + Operand zero = cast(tf, tf.constant(0), inputType); lInput = tf.clipByValue(lInput, zero, lmaxValue); } if (alpha != 0.) { lInput = - tf.math.sub( - lInput, tf.math.mul(tf.dtypes.cast(tf.constant(alpha), inputType), negativePart)); + tf.math.sub(lInput, tf.math.mul(cast(tf, tf.constant(alpha), inputType), negativePart)); } return lInput; } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/SELU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/SELU.java index f24731049fb..a28052486e5 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/SELU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/SELU.java @@ -45,25 +45,16 @@ * @param the data type of the activation * @see Klambauer et al., 2017 */ -public class SELU extends Activation { +public class SELU extends AbstractActivation { - /** - * Creates a Scaled Exponential Linear Unit (SELU) activation. - * - * @param tf the TensorFlow Ops - */ - public SELU(Ops tf) { - super(tf); + /** Creates a Scaled Exponential Linear Unit (SELU) activation. */ + public SELU() { + super(); } - /** - * Gets the calculation operation for the activation. - * - * @param input the input tensor - * @return The operand for the activation - */ + /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { return tf.nn.selu(input); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Sigmoid.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Sigmoid.java index 5d507b38483..02b2daae4d6 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Sigmoid.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Sigmoid.java @@ -41,25 +41,16 @@ * * @param the data type of the activation */ -public class Sigmoid extends Activation { +public class Sigmoid extends AbstractActivation { - /** - * Creates a Sigmoid activation. - * - * @param tf the TensorFlow Ops - */ - public Sigmoid(Ops tf) { - super(tf); + /** Creates a Sigmoid activation. */ + public Sigmoid() { + super(); } - /** - * Gets the calculation operation for the activation. - * - * @param input the input tensor - * @return The operand for the activation - */ + /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { return tf.math.sigmoid(input); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softmax.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softmax.java index 154e1ecc84a..3aa67a179ad 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softmax.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softmax.java @@ -38,7 +38,7 @@ * * @param the data type of the activation */ -public class Softmax extends Activation { +public class Softmax extends AbstractActivation { private static final int AXIS_DEFAULT = -1; @@ -47,32 +47,24 @@ public class Softmax extends Activation { /** * Creates a softmax activation where the default axis is {@link #AXIS_DEFAULT} which indicates * the last dimension. - * - * @param tf the TensorFlow Ops */ - public Softmax(Ops tf) { - this(tf, AXIS_DEFAULT); + public Softmax() { + this(AXIS_DEFAULT); } /** * Creates a Softmax activation * - * @param tf the TensorFlow Ops * @param axis The dimension softmax would be performed on. */ - public Softmax(Ops tf, int axis) { - super(tf); + public Softmax(int axis) { + super(); this.axis = axis; } - /** - * Gets the calculation operation for the activation. - * - * @param input the input tensor - * @return The operand for the activation - */ + /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { Shape shape = input.shape(); int numDimensions = shape.numDimensions(); if (numDimensions == 2) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softplus.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softplus.java index 65a183ea047..8533de7852c 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softplus.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softplus.java @@ -32,25 +32,16 @@ * // 1.3132616e+00f, 2.0000000e+01f] * */ -public class Softplus extends Activation { +public class Softplus extends AbstractActivation { - /** - * Creates a Softplus activation function. - * - * @param tf the TensorFlow Ops - */ - public Softplus(Ops tf) { - super(tf); + /** Creates a Softplus activation function. */ + public Softplus() { + super(); } - /** - * Gets the calculation operation for the activation. - * - * @param input the input tensor - * @return The operand for the activation - */ + /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { return tf.math.softplus(input); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softsign.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softsign.java index 1f691e71862..249fa6077cd 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softsign.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softsign.java @@ -33,25 +33,16 @@ * * @param the data type of the activation */ -public class Softsign extends Activation { +public class Softsign extends AbstractActivation { - /** - * Creates a Softsign activation. - * - * @param tf the TensorFlow Ops - */ - public Softsign(Ops tf) { - super(tf); + /** Creates a Softsign activation. */ + public Softsign() { + super(); } - /** - * Gets the calculation operation for the activation. - * - * @param input the input tensor - * @return The operand for the activation - */ + /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { return tf.nn.softsign(input); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Swish.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Swish.java index d9f73a422d5..5007dd34555 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Swish.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Swish.java @@ -40,7 +40,7 @@ * @param the data type of the activation * @see Ramachandran et al., 2017 */ -public class Swish extends Activation { +public class Swish extends AbstractActivation { /** * Creates a Swish activation, swish(x) = x * sigmoid(x). @@ -48,17 +48,14 @@ public class Swish extends Activation { *

Swish activation function which returns x*sigmoid(x). It is a smooth, * non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is * unbounded above and bounded below. - * - * @param tf the TensorFlow Ops */ - public Swish(Ops tf) { - super(tf); + public Swish() { + super(); } /** {@inheritDoc} */ @Override - public Operand call(Operand input) { - + public Operand call(Ops tf, Operand input) { // TODO Python Keras returns a "grad", which is an optimization not implemented in Java. return tf.math.mul(input, tf.math.sigmoid(input)); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Tanh.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Tanh.java index 4fe02eed048..37d4d811a0d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Tanh.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Tanh.java @@ -33,20 +33,16 @@ * * @param the data type of the activation */ -public class Tanh extends Activation { +public class Tanh extends AbstractActivation { - /** - * Creates a Hyperbolic tangent activation. - * - * @param tf the TensorFlow Ops - */ - public Tanh(Ops tf) { - super(tf); + /** Creates a Hyperbolic tangent activation. */ + public Tanh() { + super(); } /** {@inheritDoc} */ @Override - public Operand call(Operand input) { + public Operand call(Ops tf, Operand input) { return tf.math.tanh(input); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/AbstractConstraint.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/AbstractConstraint.java new file mode 100644 index 00000000000..15db0d4b1e0 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/AbstractConstraint.java @@ -0,0 +1,89 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.constraints; + +import static org.tensorflow.framework.utils.CastHelper.cast; + +import org.tensorflow.Operand; +import org.tensorflow.op.Ops; +import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.types.family.TNumber; + +/** Base class for Constraints. AbstractConstraint subclasses impose constraints on weight values */ +public abstract class AbstractConstraint implements Constraint { + + public static final float EPSILON = 1e-7f; + + /** Creates a AbstractConstraint */ + public AbstractConstraint() {} + + /** + * Gets the element-wise square root. + * + * @param tf the TensorFlow Ops + * @param x the input Operand. + * @return the element-wise square root. + * @param The data type for the operand and result. + * @throws IllegalArgumentException if x is null + */ + protected Operand sqrt(Ops tf, Operand x) { + if (x == null) throw new IllegalArgumentException("Operand x must not be null"); + Class type = x.type(); + Operand zero = cast(tf, tf.constant(0), type); + Operand inf = cast(tf, tf.constant(Double.POSITIVE_INFINITY), type); + return tf.math.sqrt(tf.clipByValue(x, zero, inf)); + } + + /** + * Gets the element-wise value clipping. + * + * @param tf the TensorFlow Ops + * @param x the Operand to clip + * @param minValue the minimum value + * @param maxValue the maximum value + * @return the operand with clipped values + * @param The data type for the operand and result. + * @throws IllegalArgumentException if x is null + */ + protected Operand clip( + Ops tf, Operand x, double minValue, double maxValue) { + if (x == null) throw new IllegalArgumentException("Operand x must not be null"); + Class type = x.type(); + + double min = Math.min(minValue, maxValue); + double max = Math.max(minValue, maxValue); + + Operand minValueConstant = cast(tf, tf.constant(min), type); + Operand maxValueConstant = cast(tf, tf.constant(max), type); + return tf.clipByValue(x, minValueConstant, maxValueConstant); + } + + /** + * Calculates the norm of the weights along the axes + * + * @param tf the TensorFlow Ops + * @param weights the weights used to calculate the norms + * @param axes the axes along which to calculate weight norms. + * @param the data type for the weights and the result + * @return the norms + * @throws IllegalArgumentException if weights is null + */ + protected Operand norm(Ops tf, Operand weights, int[] axes) { + if (weights == null) throw new IllegalArgumentException("weights must not be null"); + return sqrt( + tf, + tf.reduceSum(tf.math.square(weights), tf.constant(axes), ReduceSum.keepDims(Boolean.TRUE))); + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/Constraint.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/Constraint.java index 306361959bf..97640b19cf8 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/Constraint.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/Constraint.java @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,96 +16,16 @@ import org.tensorflow.Operand; import org.tensorflow.op.Ops; -import org.tensorflow.op.core.ReduceSum; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - -/** Base class for Constraints. Constraint subclasses impose constraints on weight values */ -public abstract class Constraint { - - public static final float EPSILON = 1e-7f; - - private final Ops tf; - - /** - * Creates a Constraint - * - * @param tf the TensorFlow Ops - */ - public Constraint(Ops tf) { - this.tf = tf; - } - +public interface Constraint { /** * Applies the constraint against the provided weights * + * @param tf the TensorFlow Ops * @param weights the weights * @return the constrained weights * @param the data type for weights and results. */ - public abstract Operand call(Operand weights); - - /** - * Gets the TensorFlow Ops - * - * @return the TensorFlow Ops - */ - public Ops getTF() { - return tf; - } - - /** - * Gets the element-wise square root. - * - * @param x the input Operand. - * @return the element-wise square root. - * @param The data type for the operand and result. - * @throws IllegalArgumentException if x is null - */ - protected Operand sqrt(Operand x) { - if (x == null) throw new IllegalArgumentException("Operand x must not be null"); - Class type = x.type(); - Operand zero = cast(tf, tf.constant(0), type); - Operand inf = cast(tf, tf.constant(Double.POSITIVE_INFINITY), type); - return tf.math.sqrt(tf.clipByValue(x, zero, inf)); - } - - /** - * Gets the element-wise value clipping. - * - * @param x the Operand to clip - * @param minValue the minimum value - * @param maxValue the maximum value - * @return the operand with clipped values - * @param The data type for the operand and result. - * @throws IllegalArgumentException if x is null - */ - protected Operand clip(Operand x, double minValue, double maxValue) { - if (x == null) throw new IllegalArgumentException("Operand x must not be null"); - Ops tf = getTF(); - Class type = x.type(); - - double min = Math.min(minValue, maxValue); - double max = Math.max(minValue, maxValue); - - Operand minValueConstant = cast(tf, tf.constant(min), type); - Operand maxValueConstant = cast(tf, tf.constant(max), type); - return tf.clipByValue(x, minValueConstant, maxValueConstant); - } - - /** - * Calculates the norm of the weights along the axes - * - * @param weights the weights used to calculate the norms - * @param axes the axes along which to calculate weight norms. - * @param the data type for the weights and the result - * @return the norms - * @throws IllegalArgumentException if weights is null - */ - protected Operand norm(Operand weights, int[] axes) { - if (weights == null) throw new IllegalArgumentException("weights must not be null"); - return sqrt( - tf.reduceSum(tf.math.square(weights), tf.constant(axes), ReduceSum.keepDims(Boolean.TRUE))); - } + Operand call(Ops tf, Operand weights); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/MaxNorm.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/MaxNorm.java index 1dae117b113..9bb99c47d07 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/MaxNorm.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/MaxNorm.java @@ -14,17 +14,17 @@ =======================================================================*/ package org.tensorflow.framework.constraints; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Constrains the weights incident to each hidden unit to have a norm less than or equal to a * desired value. */ -public class MaxNorm extends Constraint { +public class MaxNorm extends AbstractConstraint { public static final double MAX_VALUE_DEFAULT = 2.0; public static final int AXIS_DEFAULT = 0; @@ -36,54 +36,48 @@ public class MaxNorm extends Constraint { /** * Create a MaxNorm constraint using {@link #MAX_VALUE_DEFAULT} for the max value and {@link * #AXIS_DEFAULT} for the axis. - * - * @param tf the TensorFlow Ops */ - public MaxNorm(Ops tf) { - this(tf, MAX_VALUE_DEFAULT, AXIS_DEFAULT); + public MaxNorm() { + this(MAX_VALUE_DEFAULT, AXIS_DEFAULT); } /** * Create a MaxNorm constraint using {@link #AXIS_DEFAULT} for the axis. * - * @param tf the TensorFlow Ops * @param maxValue the maximum norm for the incoming weights. */ - public MaxNorm(Ops tf, double maxValue) { - this(tf, maxValue, AXIS_DEFAULT); + public MaxNorm(double maxValue) { + this(maxValue, AXIS_DEFAULT); } /** * Create a MaxNorm constraint * - * @param tf the TensorFlow Ops * @param maxValue the maximum norm for the incoming weights. * @param axis axis along which to calculate weight norms. */ - public MaxNorm(Ops tf, double maxValue, int axis) { - this(tf, maxValue, new int[] {axis}); + public MaxNorm(double maxValue, int axis) { + this(maxValue, new int[] {axis}); } /** * Create a MaxNorm constraint * - * @param tf the TensorFlow Ops * @param maxValue the maximum norm for the incoming weights. * @param axes axes along which to calculate weight norms. */ - public MaxNorm(Ops tf, double maxValue, int[] axes) { - super(tf); + public MaxNorm(double maxValue, int[] axes) { + super(); this.maxValue = maxValue; this.axes = axes; } /** {@inheritDoc} */ @Override - public Operand call(Operand weights) { - Ops tf = getTF(); + public Operand call(Ops tf, Operand weights) { Class type = weights.type(); - Operand norms = norm(weights, getAxes()); - Operand desired = clip(norms, 0f, this.getMaxValue()); + Operand norms = norm(tf, weights, getAxes()); + Operand desired = clip(tf, norms, 0f, this.getMaxValue()); return tf.math.mul( weights, tf.math.div(desired, tf.math.add(cast(tf, tf.constant(EPSILON), type), norms))); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/MinMaxNorm.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/MinMaxNorm.java index 04b21572e55..49b06744253 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/MinMaxNorm.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/MinMaxNorm.java @@ -14,14 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.constraints; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** Constrains the weights to have the norm between a lower bound and an upper bound. */ -public class MinMaxNorm extends Constraint { +public class MinMaxNorm extends AbstractConstraint { public static final double MIN_VALUE_DEFAULT = 0.0; public static final double MAX_VALUE_DEFAULT = 1.0; public static final double RATE_DEFAULT = 1.0; @@ -47,48 +47,43 @@ public class MinMaxNorm extends Constraint { * Create a MinMaxNorm constraint using {@link #MIN_VALUE_DEFAULT} for the min value, {@link * #MAX_VALUE_DEFAULT} for the max value, {@link #RATE_DEFAULT} for the rate and {@link * #AXIS_DEFAULT} for the axis - * - * @param tf the TensorFlow Ops */ - public MinMaxNorm(Ops tf) { - this(tf, MIN_VALUE_DEFAULT, MAX_VALUE_DEFAULT, RATE_DEFAULT, AXIS_DEFAULT); + public MinMaxNorm() { + this(MIN_VALUE_DEFAULT, MAX_VALUE_DEFAULT, RATE_DEFAULT, AXIS_DEFAULT); } /** * Create a MinMaxNorm constraint using {@link #RATE_DEFAULT} for the rate and {@link * #AXIS_DEFAULT} for the axis * - * @param tf the TensorFlow Ops * @param minValue the minimum norm for the incoming weights. * @param maxValue the maximum norm for the incoming weights. */ - public MinMaxNorm(Ops tf, double minValue, double maxValue) { - this(tf, minValue, maxValue, RATE_DEFAULT, AXIS_DEFAULT); + public MinMaxNorm(double minValue, double maxValue) { + this(minValue, maxValue, RATE_DEFAULT, AXIS_DEFAULT); } /** * Create a MinMaxNorm constraint * - * @param tf the TensorFlow Ops * @param minValue the minimum norm for the incoming weights. * @param maxValue the maximum norm for the incoming weights. * @param rate the rate for enforcing the constraint. * @param axis integer, axis along which to calculate weight norms. */ - public MinMaxNorm(Ops tf, double minValue, double maxValue, double rate, int axis) { - this(tf, minValue, maxValue, rate, new int[] {axis}); + public MinMaxNorm(double minValue, double maxValue, double rate, int axis) { + this(minValue, maxValue, rate, new int[] {axis}); } /** * Create a MinMaxNorm constraint * - * @param tf the TensorFlow Ops * @param minValue the minimum norm for the incoming weights. * @param maxValue the maximum norm for the incoming weights. * @param rate the rate for enforcing the constraint. * @param axes integer, axis along which to calculate weight norms. */ - public MinMaxNorm(Ops tf, double minValue, double maxValue, double rate, int[] axes) { - super(tf); + public MinMaxNorm(double minValue, double maxValue, double rate, int[] axes) { + super(); this.minValue = minValue; this.maxValue = maxValue; this.rate = rate; @@ -97,15 +92,14 @@ public MinMaxNorm(Ops tf, double minValue, double maxValue, double rate, int[] a /** {@inheritDoc} */ @Override - public Operand call(Operand weights) { + public Operand call(Ops tf, Operand weights) { Class type = weights.type(); - Ops tf = getTF(); - Operand norms = norm(weights, getAxes()); + Operand norms = norm(tf, weights, getAxes()); Operand desired = tf.math.add( tf.math.mul( tf.dtypes.cast(tf.constant(this.getRate()), type), - clip(norms, this.getMinValue(), this.getMaxValue())), + clip(tf, norms, this.getMinValue(), this.getMaxValue())), tf.math.mul( tf.math.sub( tf.dtypes.cast(tf.constant(1), type), diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/NonNeg.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/NonNeg.java index 0194b2fadb6..6a5677983fa 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/NonNeg.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/NonNeg.java @@ -19,21 +19,16 @@ import org.tensorflow.types.family.TNumber; /** Constrains the weights to be non-negative. */ -public class NonNeg extends Constraint { +public class NonNeg extends AbstractConstraint { - /** - * Create a NonNeg constraint - * - * @param tf the TensorFlow Ops - */ - public NonNeg(Ops tf) { - super(tf); + /** Create a NonNeg constraint */ + public NonNeg() { + super(); } /** {@inheritDoc} */ @Override - public Operand call(Operand weights) { - Ops tf = getTF(); + public Operand call(Ops tf, Operand weights) { Class type = weights.type(); return tf.math.mul( weights, diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/UnitNorm.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/UnitNorm.java index 70bb1a59785..8410605fab0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/UnitNorm.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/constraints/UnitNorm.java @@ -14,57 +14,50 @@ =======================================================================*/ package org.tensorflow.framework.constraints; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** Constrains the weights to have unit norm. */ -public class UnitNorm extends Constraint { +public class UnitNorm extends AbstractConstraint { public static final int AXIS_DEFAULT = 0; /** integer, axis along which to calculate weight norms. */ private final int[] axes; - /** - * Create a UnitNorm Constraint with the axis set to {@link #AXIS_DEFAULT} - * - * @param tf the TensorFlow Ops - */ - public UnitNorm(Ops tf) { - this(tf, AXIS_DEFAULT); + /** Create a UnitNorm AbstractConstraint with the axis set to {@link #AXIS_DEFAULT} */ + public UnitNorm() { + this(AXIS_DEFAULT); } /** - * Create a UnitNorm Constraint + * Create a UnitNorm AbstractConstraint * - * @param tf the TensorFlow Ops * @param axis axis along which to calculate weight norms. */ - public UnitNorm(Ops tf, int axis) { - this(tf, new int[] {axis}); + public UnitNorm(int axis) { + this(new int[] {axis}); } /** - * Create a UnitNorm Constraint + * Create a UnitNorm AbstractConstraint * - * @param tf the TensorFlow Ops * @param axes axes along which to calculate weight norms. */ - public UnitNorm(Ops tf, int[] axes) { - super(tf); + public UnitNorm(int[] axes) { + super(); this.axes = axes; } /** {@inheritDoc} */ @Override - public Operand call(Operand weights) { + public Operand call(Ops tf, Operand weights) { Class type = weights.type(); - Ops tf = getTF(); return tf.math.div( - weights, tf.math.add(cast(tf, tf.constant(EPSILON), type), norm(weights, getAxes()))); + weights, tf.math.add(cast(tf, tf.constant(EPSILON), type), norm(tf, weights, getAxes()))); } /** diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/data/Dataset.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/data/Dataset.java index 7ac73f616e2..8ae751823fe 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/data/Dataset.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/data/Dataset.java @@ -1,313 +1,365 @@ -/* - * Copyright 2020 The TensorFlow Authors. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.tensorflow.framework.data; - -import org.tensorflow.Operand; -import org.tensorflow.framework.data.impl.BatchDataset; -import org.tensorflow.framework.data.impl.MapDataset; -import org.tensorflow.framework.data.impl.SkipDataset; -import org.tensorflow.framework.data.impl.TFRecordDataset; -import org.tensorflow.framework.data.impl.TakeDataset; -import org.tensorflow.framework.data.impl.TensorSliceDataset; -import org.tensorflow.framework.data.impl.TextLineDataset; -import org.tensorflow.op.Op; -import org.tensorflow.op.Ops; -import org.tensorflow.ndarray.Shape; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.function.Function; -import org.tensorflow.types.family.TType; - -/** - * Represents a potentially large list of independent elements (samples), and allows iteration and - * transformations to be performed across these elements. - */ -public abstract class Dataset implements Iterable>> { - protected Ops tf; - private Operand variant; - private List> outputTypes; - private List outputShapes; - - public Dataset( - Ops tf, Operand variant, List> outputTypes, List outputShapes) { - if (tf == null) { - throw new IllegalArgumentException("Ops accessor cannot be null."); - } - - if (outputTypes.size() != outputShapes.size()) { - throw new IllegalArgumentException( - "`outputTypes` and " + "`outputShapes` must have the same size."); - } - - this.tf = tf; - this.variant = variant; - this.outputTypes = outputTypes; - this.outputShapes = outputShapes; - } - - protected Dataset(Dataset other) { - this.tf = other.tf; - this.variant = other.variant; - this.outputTypes = other.outputTypes; - this.outputShapes = other.outputShapes; - } - - /** - * Groups elements of this dataset into batches. - * - * @param batchSize The number of desired elements per batch - * @param dropLastBatch Whether to leave out the final batch if it has fewer than `batchSize` - * elements. - * @return A batched Dataset - */ - public final Dataset batch(long batchSize, boolean dropLastBatch) { - - List batchOutputShapes = new ArrayList<>(); - outputShapes.forEach(s -> batchOutputShapes.add(s.prepend(-1))); - - return new BatchDataset( - tf, - this.getVariant(), - tf.constant(batchSize), - tf.constant(dropLastBatch), - outputTypes, - batchOutputShapes); - } - - /** - * Groups elements of this dataset into batches. Includes the last batch, even if it has fewer - * than `batchSize` elements. - * - * @param batchSize The number of desired elements per batch - * @return A batched Dataset - */ - public final Dataset batch(long batchSize) { - return batch(batchSize, false); - } - - /** - * Returns a new `Dataset` which skips `count` initial elements from this dataset - * - * @param count The number of elements to `skip` to form the new dataset. - * @return A new Dataset with `count` elements removed. - */ - public final Dataset skip(long count) { - return new SkipDataset( - tf, this.getVariant(), tf.constant(count), this.getOutputTypes(), this.getOutputShapes()); - } - - /** - * Returns a new `Dataset` with only the first `count` elements from this dataset. - * - * @param count The number of elements to "take" from this dataset. - * @return A new Dataset containing the first `count` elements from this dataset. - */ - public final Dataset take(long count) { - return new TakeDataset( - tf, this.getVariant(), tf.constant(count), this.getOutputTypes(), this.getOutputShapes()); - } - - /** - * Returns a new Dataset which maps a function across all elements from this dataset, on a single - * component of each element. - * - *

For example, suppose each element is a {@code List>} with 2 components: (features, - * labels). - * - *

Calling {@code dataset.mapOneComponent(0, features -> tf.math.mul(features, tf.constant(2)))} will - * map the function over the `features` component of each element, multiplying each by 2. - * - * @param index The index of the component to transform. - * @param mapper The function to apply to the target component. - * @return A new Dataset applying `mapper` to the component at the chosen index. - */ - public Dataset mapOneComponent(int index, Function, Operand> mapper) { - return map( - outputs -> { - List> newComponents = new ArrayList<>(outputs); - newComponents.set(index, mapper.apply(outputs.get(index))); - return newComponents; - }); - } - - /** - * Returns a new Dataset which maps a function across all elements from this dataset, on all - * components of each element. - * - *

For example, suppose each element is a {@code List>} with 2 components: (features, - * labels). - * - *

Calling {@code dataset.mapAllComponents(component -> tf.math.mul(component, - * tf.constant(2)))} will map the function over the both the `features` and `labels` components of - * each element, multiplying them all by 2 - * - * @param mapper The function to apply to each component - * @return A new Dataset applying `mapper` to all components of each element. - */ - public Dataset mapAllComponents(Function, Operand> mapper) { - return map( - outputs -> { - List> mappedOutputs = new ArrayList<>(); - outputs.forEach(o -> mappedOutputs.add(mapper.apply(o))); - return mappedOutputs; - }); - } - - /** - * Returns a new Dataset which maps a function over all elements returned by this dataset. - * - *

For example, suppose each element is a {@code List>} with 2 components: (features, - * labels). - * - *

Calling - * - *

{@code
-   * dataset.map(components -> {
-   *      Operand features = components.get(0);
-   *      Operand labels   = components.get(1);
-   *
-   *      return Arrays.asList(
-   *        tf.math.mul(features, tf.constant(2)),
-   *        tf.math.mul(labels, tf.constant(5))
-   *      );
-   * });
-   * }
- * - * will map the function over the `features` and `labels` components, multiplying features by 2, - * and multiplying the labels by 5. - * - * @param mapper The function to apply to each element of this iterator. - * @return A new Dataset applying `mapper` to each element of this iterator. - */ - public Dataset map(Function>, List>> mapper) { - return new MapDataset(this, mapper); - } - - /** - * Creates an iterator which iterates through all batches of this Dataset in an eager fashion. - * Each batch is a list of components, returned as `Output` objects. - * - *

This method enables for-each iteration through batches when running in eager mode. For Graph - * mode batch iteration, see `makeOneShotIterator`. - * - * @return an Iterator through batches of this dataset. - */ - @Override - public Iterator>> iterator() { - return makeOneShotIterator().iterator(); - } - - /** - * Creates a `DatasetIterator` that can be used to iterate over elements of this dataset. - * - *

This iterator will have to be initialized with a call to `iterator.makeInitializer(Dataset)` - * before elements can be retreived in a loop. - * - * @return A new `DatasetIterator` based on this dataset's structure. - */ - public DatasetIterator makeInitializeableIterator() { - DatasetIterator iterator = DatasetIterator.fromStructure(tf, outputTypes, outputShapes); - iterator.makeInitializer(this); - return iterator; - } - - /** - * Creates a `DatasetIterator` that can be used to iterate over elements of this dataset. Using - * `makeOneShotIterator` ensures that the iterator is automatically initialized on this dataset. - * skips In graph mode, the initializer op will be added to the Graph's intitializer list, which - * must be run via `tf.init()`: - * - *

Ex: - * - *

-   *     try (Session session = new Session(graph) {
-   *         // Immediately run initializers
-   *         session.run(tf.init());
-   *     }
-   * 
- * - *

In eager mode, the initializer will be run automatically as a result of this call. - * - * @return A new `DatasetIterator` based on this dataset's structure. - */ - public DatasetIterator makeOneShotIterator() { - DatasetIterator iterator = makeInitializeableIterator(); - Op initializer = iterator.makeInitializer(this); - if (tf.scope().env().isGraph()) tf.initAdd(initializer); - return iterator; - } - - /** - * Creates an in-memory `Dataset` whose elements are slices of the given tensors. Each element of - * this dataset will be a {@code List>}, representing slices (e.g. batches) of the - * provided tensors. - * - * @param tf Ops Accessor - * @param tensors A list of {@code Operand} representing components of this dataset (e.g. - * features, labels) - * @param outputTypes A list of tensor type classes representing the data type of each component of - * this dataset. - * @return A new `Dataset` - */ - public static Dataset fromTensorSlices( - Ops tf, List> tensors, List> outputTypes) { - return new TensorSliceDataset(tf, tensors, outputTypes); - } - - public static Dataset tfRecordDataset( - Ops tf, String filename, String compressionType, long bufferSize) { - return new TFRecordDataset( - tf, tf.constant(filename), tf.constant(compressionType), tf.constant(bufferSize)); - } - - public static Dataset textLineDataset( - Ops tf, String filename, String compressionType, long bufferSize) { - return new TextLineDataset( - tf, tf.constant(filename), tf.constant(compressionType), tf.constant(bufferSize)); - } - - /** Get the variant tensor representing this dataset. */ - public Operand getVariant() { - return variant; - } - - /** Get a list of output types for each component of this dataset. */ - public List> getOutputTypes() { - return this.outputTypes; - } - - /** Get a list of shapes for each component of this dataset. */ - public List getOutputShapes() { - return this.outputShapes; - } - - public Ops getOpsInstance() { - return this.tf; - } - - @Override - public String toString() { - return "Dataset{" - + "outputTypes=" - + Arrays.toString(getOutputTypes().stream().map(Class::getSimpleName).toArray()) - + ", outputShapes=" - + Arrays.toString(getOutputShapes().stream().map(Shape::toString).toArray()) - + "}"; - } -} +/* + * Copyright 2020 The TensorFlow Authors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.tensorflow.framework.data; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; +import org.tensorflow.Operand; +import org.tensorflow.framework.data.impl.BatchDataset; +import org.tensorflow.framework.data.impl.MapDataset; +import org.tensorflow.framework.data.impl.SkipDataset; +import org.tensorflow.framework.data.impl.TFRecordDataset; +import org.tensorflow.framework.data.impl.TakeDataset; +import org.tensorflow.framework.data.impl.TensorSliceDataset; +import org.tensorflow.framework.data.impl.TextLineDataset; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Op; +import org.tensorflow.op.Ops; +import org.tensorflow.types.family.TType; + +/** + * Represents a potentially large list of independent elements (samples), and allows iteration and + * transformations to be performed across these elements. + */ +public abstract class Dataset implements Iterable>> { + protected Ops tf; + private Operand variant; + private List> outputTypes; + private List outputShapes; + + /** + * Creates a Dataset + * + * @param tf the TensorFlow Ops + * @param variant the tensor that represents the dataset. + * @param outputTypes a list of output types produced by this data set. + * @param outputShapes a list of output shapes produced by this data set. + */ + public Dataset( + Ops tf, + Operand variant, + List> outputTypes, + List outputShapes) { + if (tf == null) { + throw new IllegalArgumentException("Ops accessor cannot be null."); + } + + if (outputTypes.size() != outputShapes.size()) { + throw new IllegalArgumentException( + "`outputTypes` and " + "`outputShapes` must have the same size."); + } + + this.tf = tf; + this.variant = variant; + this.outputTypes = outputTypes; + this.outputShapes = outputShapes; + } + + /** + * Creates a Dataset that is a copy of another Dataset + * + * @param other the other Dataset + */ + protected Dataset(Dataset other) { + this.tf = other.tf; + this.variant = other.variant; + this.outputTypes = other.outputTypes; + this.outputShapes = other.outputShapes; + } + + /** + * Groups elements of this dataset into batches. + * + * @param batchSize The number of desired elements per batch + * @param dropLastBatch Whether to leave out the final batch if it has fewer than `batchSize` + * elements. + * @return A batched Dataset + */ + public final Dataset batch(long batchSize, boolean dropLastBatch) { + + List batchOutputShapes = new ArrayList<>(); + outputShapes.forEach(s -> batchOutputShapes.add(s.prepend(-1))); + + return new BatchDataset( + tf, + this.getVariant(), + tf.constant(batchSize), + tf.constant(dropLastBatch), + outputTypes, + batchOutputShapes); + } + + /** + * Groups elements of this dataset into batches. Includes the last batch, even if it has fewer + * than `batchSize` elements. + * + * @param batchSize The number of desired elements per batch + * @return A batched Dataset + */ + public final Dataset batch(long batchSize) { + return batch(batchSize, false); + } + + /** + * Returns a new `Dataset` which skips `count` initial elements from this dataset + * + * @param count The number of elements to `skip` to form the new dataset. + * @return A new Dataset with `count` elements removed. + */ + public final Dataset skip(long count) { + return new SkipDataset( + tf, this.getVariant(), tf.constant(count), this.getOutputTypes(), this.getOutputShapes()); + } + + /** + * Returns a new `Dataset` with only the first `count` elements from this dataset. + * + * @param count The number of elements to "take" from this dataset. + * @return A new Dataset containing the first `count` elements from this dataset. + */ + public final Dataset take(long count) { + return new TakeDataset( + tf, this.getVariant(), tf.constant(count), this.getOutputTypes(), this.getOutputShapes()); + } + + /** + * Returns a new Dataset which maps a function across all elements from this dataset, on a single + * component of each element. + * + *

For example, suppose each element is a {@code List>} with 2 components: + * (features, labels). + * + *

Calling {@code dataset.mapOneComponent(0, features -> tf.math.mul(features, + * tf.constant(2)))} will map the function over the `features` component of each element, + * multiplying each by 2. + * + * @param index The index of the component to transform. + * @param mapper The function to apply to the target component. + * @return A new Dataset applying `mapper` to the component at the chosen index. + */ + public Dataset mapOneComponent(int index, Function, Operand> mapper) { + return map( + outputs -> { + List> newComponents = new ArrayList<>(outputs); + newComponents.set(index, mapper.apply(outputs.get(index))); + return newComponents; + }); + } + + /** + * Returns a new Dataset which maps a function across all elements from this dataset, on all + * components of each element. + * + *

For example, suppose each element is a {@code List>} with 2 components: + * (features, labels). + * + *

Calling {@code dataset.mapAllComponents(component -> tf.math.mul(component, + * tf.constant(2)))} will map the function over the both the `features` and `labels` components of + * each element, multiplying them all by 2 + * + * @param mapper The function to apply to each component + * @return A new Dataset applying `mapper` to all components of each element. + */ + public Dataset mapAllComponents(Function, Operand> mapper) { + return map( + outputs -> { + List> mappedOutputs = new ArrayList<>(); + outputs.forEach(o -> mappedOutputs.add(mapper.apply(o))); + return mappedOutputs; + }); + } + + /** + * Returns a new Dataset which maps a function over all elements returned by this dataset. + * + *

For example, suppose each element is a {@code List>} with 2 components: + * (features, labels). + * + *

Calling + * + *

{@code
+   * dataset.map(components -> {
+   *      Operand features = components.get(0);
+   *      Operand labels   = components.get(1);
+   *
+   *      return Arrays.asList(
+   *        tf.math.mul(features, tf.constant(2)),
+   *        tf.math.mul(labels, tf.constant(5))
+   *      );
+   * });
+   * }
+ * + * will map the function over the `features` and `labels` components, multiplying features by 2, + * and multiplying the labels by 5. + * + * @param mapper The function to apply to each element of this iterator. + * @return A new Dataset applying `mapper` to each element of this iterator. + */ + public Dataset map(Function>, List>> mapper) { + return new MapDataset(this, mapper); + } + + /** + * Creates an iterator which iterates through all batches of this Dataset in an eager fashion. + * Each batch is a list of components, returned as `Output` objects. + * + *

This method enables for-each iteration through batches when running in eager mode. For Graph + * mode batch iteration, see `makeOneShotIterator`. + * + * @return an Iterator through batches of this dataset. + */ + @Override + public Iterator>> iterator() { + return makeOneShotIterator().iterator(); + } + + /** + * Creates a `DatasetIterator` that can be used to iterate over elements of this dataset. + * + *

This iterator will have to be initialized with a call to `iterator.makeInitializer(Dataset)` + * before elements can be retreived in a loop. + * + * @return A new `DatasetIterator` based on this dataset's structure. + */ + public DatasetIterator makeInitializeableIterator() { + DatasetIterator iterator = DatasetIterator.fromStructure(tf, outputTypes, outputShapes); + iterator.makeInitializer(this); + return iterator; + } + + /** + * Creates a `DatasetIterator` that can be used to iterate over elements of this dataset. Using + * `makeOneShotIterator` ensures that the iterator is automatically initialized on this dataset. + * skips In graph mode, the initializer op will be added to the Graph's intitializer list, which + * must be run via `tf.init()`: + * + *

Ex: + * + *

+   *     try (Session session = new Session(graph) {
+   *         // Immediately run initializers
+   *         session.run(tf.init());
+   *     }
+   * 
+ * + *

In eager mode, the initializer will be run automatically as a result of this call. + * + * @return A new `DatasetIterator` based on this dataset's structure. + */ + public DatasetIterator makeOneShotIterator() { + DatasetIterator iterator = makeInitializeableIterator(); + Op initializer = iterator.makeInitializer(this); + if (tf.scope().env().isGraph()) tf.initAdd(initializer); + return iterator; + } + + /** + * Creates an in-memory `Dataset` whose elements are slices of the given tensors. Each element of + * this dataset will be a {@code List>}, representing slices (e.g. batches) of the + * provided tensors. + * + * @param tf Ops Accessor + * @param tensors A list of {@code Operand} representing components of this dataset (e.g. + * features, labels) + * @param outputTypes A list of tensor type classes representing the data type of each component + * of this dataset. + * @return A new `Dataset` + */ + public static Dataset fromTensorSlices( + Ops tf, List> tensors, List> outputTypes) { + return new TensorSliceDataset(tf, tensors, outputTypes); + } + + /** + * Creates a TFRecordDataset from a file containing TFRecords + * + * @param tf the TensorFlow Ops + * @param filename the file name that holds the TFRecords + * @param compressionType the compresstion type for the file + * @param bufferSize the buffersize for processing the TFRecords file. + * @return a TFRecordDataset + */ + public static Dataset tfRecordDataset( + Ops tf, String filename, String compressionType, long bufferSize) { + return new TFRecordDataset( + tf, tf.constant(filename), tf.constant(compressionType), tf.constant(bufferSize)); + } + + /** + * Creates a TextLineDataset from a file containing one recored per ling. + * + * @param tf the TensorFlow Ops + * @param filename the file name that holds the data records + * @param compressionType the compresstion type for the file + * @param bufferSize the buffersize for processing the records file. + * @return a TextLineDataset + */ + public static Dataset textLineDataset( + Ops tf, String filename, String compressionType, long bufferSize) { + return new TextLineDataset( + tf, tf.constant(filename), tf.constant(compressionType), tf.constant(bufferSize)); + } + + /** + * Gets the variant tensor representing this dataset. + * + * @return the variant tensor representing this dataset. + */ + public Operand getVariant() { + return variant; + } + + /** + * Gets a list of output types for each component of this dataset. + * + * @return list of output types for each component of this dataset. + */ + public List> getOutputTypes() { + return this.outputTypes; + } + + /** + * Gets a list of shapes for each component of this dataset. + * + * @return a list of shapes for each component of this dataset. + */ + public List getOutputShapes() { + return this.outputShapes; + } + + /** + * Gets the TensorFlow Ops instance for this dataset + * + * @return the TensorFlow Ops instance for this dataset + */ + public Ops getOpsInstance() { + return this.tf; + } + + /** {@inheritDoc} */ + @Override + public String toString() { + return "Dataset{" + + "outputTypes=" + + Arrays.toString(getOutputTypes().stream().map(Class::getSimpleName).toArray()) + + ", outputShapes=" + + Arrays.toString(getOutputShapes().stream().map(Shape::toString).toArray()) + + "}"; + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/data/DatasetOptional.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/data/DatasetOptional.java index 6617c33eaf7..45da020b105 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/data/DatasetOptional.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/data/DatasetOptional.java @@ -15,13 +15,12 @@ */ package org.tensorflow.framework.data; +import java.util.ArrayList; +import java.util.List; import org.tensorflow.Operand; -import org.tensorflow.op.Ops; import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Ops; import org.tensorflow.types.TBool; - -import java.util.ArrayList; -import java.util.List; import org.tensorflow.types.family.TType; /** @@ -31,6 +30,11 @@ public class DatasetOptional { protected Ops tf; + /** + * Gets the optional variant for this Dataset + * + * @return the optional variant for this Dataset + */ public Operand getOptionalVariant() { return optionalVariant; } @@ -39,14 +43,30 @@ public Operand getOptionalVariant() { private List> outputTypes; private List outputShapes; + /** + * Creates a DatasetOptional dataset + * + * @param tf the TensorFlow Ops + * @param optionalVariant the tensor that represents the dataset. + * @param outputTypes a list of output types produced by this data set. + * @param outputShapes a list of output shapes produced by this data set. + */ public DatasetOptional( - Ops tf, Operand optionalVariant, List> outputTypes, List outputShapes) { + Ops tf, + Operand optionalVariant, + List> outputTypes, + List outputShapes) { this.tf = tf; this.optionalVariant = optionalVariant; this.outputTypes = outputTypes; this.outputShapes = outputShapes; } + /** + * Creates a Dataset that is a copy of another Dataset + * + * @param other the other Dataset + */ protected DatasetOptional(DatasetOptional other) { this.tf = other.tf; this.optionalVariant = other.optionalVariant; @@ -54,14 +74,20 @@ protected DatasetOptional(DatasetOptional other) { this.outputShapes = other.outputShapes; } - - - /** Whether this optional has a value. */ + /** + * Gets the indicator of whether this optional has a value. + * + * @return the indicator of whether this optional has a value. + */ public Operand hasValue() { return tf.data.optionalHasValue(optionalVariant).hasValue(); } - /** Returns the value of the dataset element represented by this optional, if it exists. */ + /** + * Returns the value of the dataset element represented by this optional, if it exists. + * + * @return the value of the dataset element represented by this optional, if it exists. + */ public List> getValue() { List> components = new ArrayList<>(); tf.data @@ -72,6 +98,15 @@ public List> getValue() { return components; } + /** + * Creates a DatasetOptional from components. + * + * @param tf the TensorFlow Ops + * @param components the components that constitute the DatasetOptional + * @param outputTypes a list of output types produced by this data set. + * @param outputShapes a list of output shapes produced by this data set. + * @return a a DatasetOptional + */ public static DatasetOptional fromComponents( Ops tf, List> components, @@ -81,6 +116,11 @@ public static DatasetOptional fromComponents( return new DatasetOptional(tf, optionalVariant, outputTypes, outputShapes); } + /** + * Gets the TensorFlow Ops instance for this dataset + * + * @return the TensorFlow Ops instance for this dataset + */ public Ops getOpsInstance() { return tf; } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/BaseInitializer.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/BaseInitializer.java index 9c1fa9ac287..56e3d310280 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/BaseInitializer.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/BaseInitializer.java @@ -14,29 +14,24 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.tensorflow.op.Ops; import org.tensorflow.types.family.TType; /** Abstract base class for all Initializers */ public abstract class BaseInitializer implements Initializer { - protected final Ops tf; + private final String name; - /** - * Creates an Initializer - * - * @param tf the TensorFlow Ops - */ - protected BaseInitializer(Ops tf) { - this.tf = tf; + /** Creates an Initializer */ + protected BaseInitializer() { + name = getClass().getSimpleName(); } /** - * Gets the TensorFlow Ops + * Gets the name for this initializer * - * @return the TensorFlow Ops + * @return the name for this initializer */ - public Ops getTF() { - return tf; + public String getName() { + return name; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Constant.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Constant.java index 4a2df86d74b..f8be105d357 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Constant.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Constant.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.initializers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.TBool; @@ -30,7 +32,7 @@ * Constant<TFloat32> initializer = * new org.tensorflow.framework.initializers.Constant<>(tf, 3f); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The Type for the call operation @@ -45,11 +47,10 @@ public class Constant extends BaseInitializer { /** * Creates an Initializer that generates tensors with a constant value. * - * @param tf the TensorFlow Ops * @param value a long value used for the constant. */ - public Constant(Ops tf, long value) { - super(tf); + public Constant(long value) { + super(); longValue = value; doubleValue = 0; booleanValue = false; @@ -59,11 +60,10 @@ public Constant(Ops tf, long value) { /** * Creates an Initializer that generates tensors with a constant value. * - * @param tf the TensorFlow Ops * @param value a double value used for the constant. */ - public Constant(Ops tf, double value) { - super(tf); + public Constant(double value) { + super(); doubleValue = value; longValue = 0; booleanValue = false; @@ -73,11 +73,10 @@ public Constant(Ops tf, double value) { /** * Creates an Initializer that generates tensors with a constant value. * - * @param tf the TensorFlow Ops * @param value a boolean value used for the constant. */ - public Constant(Ops tf, boolean value) { - super(tf); + public Constant(boolean value) { + super(); booleanValue = value; doubleValue = 0; longValue = 0; @@ -86,17 +85,19 @@ public Constant(Ops tf, boolean value) { /** {@inheritDoc} */ @Override - public Operand call(Operand dims, Class type) { + public Operand call(Ops tf, Operand dims, Class type) { + if (!TNumber.class.isAssignableFrom(type) && type != TBool.class) { - throw new IllegalArgumentException("Tensor type must be numeric or boolean: " + type.getSimpleName()); + throw new IllegalArgumentException( + "Tensor type must be numeric or boolean: " + type.getSimpleName()); } switch (valueType) { case LONG: - return tf.fill(dims, tf.dtypes.cast(tf.constant(longValue), type)); + return tf.fill(dims, cast(tf, tf.constant(longValue), type)); case DOUBLE: - return tf.fill(dims, tf.dtypes.cast(tf.constant(doubleValue), type)); + return tf.fill(dims, cast(tf, tf.constant(doubleValue), type)); default: - return tf.fill(dims, tf.dtypes.cast(tf.constant(booleanValue), type)); + return tf.fill(dims, cast(tf, tf.constant(booleanValue), type)); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Glorot.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Glorot.java index 894bd073758..4a39c3839f6 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Glorot.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Glorot.java @@ -15,7 +15,6 @@ package org.tensorflow.framework.initializers; -import org.tensorflow.op.Ops; import org.tensorflow.types.family.TFloating; /** @@ -43,7 +42,7 @@ * new org.tensorflow.framework.initializers.Glorot<>(tf, * Distribution.TRUNCATED_NORMAL, seed); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * *

Glorot Uniform: @@ -54,12 +53,14 @@ * new org.tensorflow.framework.initializers.Glorot<>(tf, * Distribution.UNIFORM, seed); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * *

NOTE: + * *

For a GlorotNormal equivalent initializer, use {@link * VarianceScaling.Distribution#TRUNCATED_NORMAL} for the distribution parameter. + * *

For a GlorotUniform equivalent initializer, use {@link VarianceScaling.Distribution#UNIFORM} * for the distribution parameter. * @@ -74,13 +75,12 @@ public class Glorot extends VarianceScaling { /** * Creates a Glorot initializer * - * @param tf the TensorFlow Ops * @param distribution The distribution type for the Glorot initializer. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. * @see VarianceScaling.Distribution */ - public Glorot(Ops tf, Distribution distribution, long seed) { - super(tf, SCALE, Mode.FAN_AVG, distribution, seed); + public Glorot(Distribution distribution, long seed) { + super(SCALE, Mode.FAN_AVG, distribution, seed); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/He.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/He.java index 3a91b72b0d0..4a9fa8a7849 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/He.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/He.java @@ -14,7 +14,6 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.tensorflow.op.Ops; import org.tensorflow.types.family.TFloating; /** @@ -38,7 +37,7 @@ * new org.tensorflow.framework.initializers.He<>(tf, * Distribution.TRUNCATED_NORMAL, seed);); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * *

He Uniform: @@ -49,14 +48,16 @@ * new org.tensorflow.framework.initializers.He<>(tf, * Distribution.UNIFORM, seed);); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * *

NOTE: + * *

For an HeNormal equivalent initializer, use {@link * VarianceScaling.Distribution#TRUNCATED_NORMAL} for the distribution parameter. - *

For an HeUniform equivalent initializer, use {@link VarianceScaling.Distribution#UNIFORM} - * for the distribution parameter. + * + *

If the distribution is UNIFORM, itraws samples from a uniform distribution within + *

If the distribution is UNIFORM, it draws samples from a uniform distribution within * [-limit, limit], where limit = Math.sqrt(3 / fanIn) (fanIn is * the number of input units in the weight tensor) * @@ -41,7 +40,7 @@ * new org.tensorflow.framework.initializers.LeCunNormal<>(tf, * Distribution.TRUNCATED_NORMAL, seed); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * *

LeCun Uniform: @@ -52,14 +51,15 @@ * new org.tensorflow.framework.initializers.LeCunNormal<>(tf, * Distribution.UNIFORM, seed); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * * * *

NOTE: * * - *

For a LeCunNormal equivalent initializer, use {@link VarianceScaling.Distribution#TRUNCATED_NORMAL} for the distribution parameter. * + *

For a LeCunNormal equivalent initializer, use {@link + * VarianceScaling.Distribution#TRUNCATED_NORMAL} for the distribution parameter. * * *

For a LeCunUniform equivalent initializer, use {@link VarianceScaling.Distribution#UNIFORM} * * for the distribution parameter. * @@ -79,12 +79,11 @@ public class LeCun extends VarianceScaling { /** * Creates a LeCunNormal Initializer * - * @param tf the TensorFlow Ops * @param distribution The distribution type for the Glorot initializer. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public LeCun(Ops tf, Distribution distribution, long seed) { - super(tf, 1.0, Mode.FAN_IN, distribution, seed); + public LeCun(Distribution distribution, long seed) { + super(1.0, Mode.FAN_IN, distribution, seed); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Ones.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Ones.java index b8eb0c418e9..ee7e483dd69 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Ones.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Ones.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.initializers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.TBool; @@ -30,7 +32,7 @@ * Ones<TFloat32> initializer = * new org.tensorflow.framework.initializers.Ones<>(tf); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The TType for the call operation @@ -46,21 +48,21 @@ public class Ones extends BaseInitializer { * Ones<TFloat32> initializer = * new org.tensorflow.framework.initializers.Ones<>(tf); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * - * - * @param tf the TensorFlow Ops */ - public Ones(Ops tf) { - super(tf); + public Ones() { + super(); } /** {@inheritDoc} */ @Override - public Operand call(Operand dims, Class type) { + public Operand call(Ops tf, Operand dims, Class type) { + if (!TNumber.class.isAssignableFrom(type) && type != TBool.class) { - throw new IllegalArgumentException("Tensor type must be numeric or boolean: " + type.getSimpleName()); + throw new IllegalArgumentException( + "Tensor type must be numeric or boolean: " + type.getSimpleName()); } - return tf.fill(dims, tf.dtypes.cast(tf.constant(1.0), type)); + return tf.fill(dims, cast(tf, tf.constant(1), type)); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java index a5b466e118e..240d915f97f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.initializers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.Output; import org.tensorflow.framework.utils.ShapeUtils; @@ -42,7 +44,7 @@ * Orthogonal<TFloat32, TFloat32> initializer = * new org.tensorflow.framework.initializers.Orthogonal<>(tf); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The TType for the call operation @@ -57,31 +59,30 @@ public class Orthogonal extends BaseInitializer { /** * Creates an Orthogonal Initializer using {@link #GAIN_DEFAULT} for the gain. * - * @param tf the TensorFlow Ops * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public Orthogonal(Ops tf, long seed) { - this(tf, GAIN_DEFAULT, seed); + public Orthogonal(long seed) { + this(GAIN_DEFAULT, seed); } /** * Creates an Orthogonal Initializer * - * @param tf the TensorFlow Ops * @param gain the gain to be applied to the Matrix. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public Orthogonal(Ops tf, double gain, long seed) { - super(tf); + public Orthogonal(double gain, long seed) { + super(); this.gain = gain; this.seed = seed; } /** {@inheritDoc} */ @Override - public Operand call(Operand dims, Class type) { + public Operand call(Ops tf, Operand dims, Class type) { + Shape dimsShape = ShapeUtils.toShape(tf.scope(), dims); if (dimsShape.numDimensions() < 2) { throw new IllegalArgumentException( @@ -101,10 +102,10 @@ public Operand call(Operand dims, Class type) { Output qo = qrOp.q(); Output ro = qrOp.r(); Operand diagOp = - tf.linalg.matrixDiagPart(ro, tf.constant(0), tf.dtypes.cast(tf.constant(0), type)); + tf.linalg.matrixDiagPart(ro, tf.constant(0), cast(tf, tf.constant(0), type)); Operand qop = tf.math.mul(qo, tf.math.sign(diagOp)); if (numRows < numCols) qop = tf.linalg.transpose(qop, null); - return tf.math.mul(qop, tf.dtypes.cast(tf.constant(this.gain), type)); + return tf.math.mul(qop, cast(tf, tf.constant(this.gain), type)); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/RandomNormal.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/RandomNormal.java index 38ab194a56b..fd8aa3a6766 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/RandomNormal.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/RandomNormal.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.initializers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.TInt64; @@ -29,7 +31,7 @@ * RandomNormal<TFloat32, TFloat32> initializer = * new org.tensorflow.framework.initializers.RandomNormal<>(tf, seed); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The TType for the call operation @@ -47,37 +49,34 @@ public class RandomNormal extends BaseInitializer { * Creates the RandomUniform initializer using {@link #MEAN_DEFAULT} for the mean and {@link * #STDDEV_DEFAULT} for the standard deviation. * - * @param tf the TensorFlow Ops * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public RandomNormal(Ops tf, long seed) { - this(tf, MEAN_DEFAULT, STDDEV_DEFAULT, seed); + public RandomNormal(long seed) { + this(MEAN_DEFAULT, STDDEV_DEFAULT, seed); } /** * Creates the RandomUniform initializer using {@link #STDDEV_DEFAULT} for the standard deviation. * - * @param tf the TensorFlow Ops * @param mean Mean of the random values to generate. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public RandomNormal(Ops tf, double mean, long seed) { - this(tf, mean, STDDEV_DEFAULT, seed); + public RandomNormal(double mean, long seed) { + this(mean, STDDEV_DEFAULT, seed); } /** * creates the RandomUniform initializer * - * @param tf the TensorFlow Ops * @param mean Mean of the random values to generate. * @param stddev Standard deviation of the random values to generate. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public RandomNormal(Ops tf, double mean, double stddev, long seed) { - super(tf); + public RandomNormal(double mean, double stddev, long seed) { + super(); this.mean = mean; this.stddev = stddev; this.seed = seed; @@ -85,10 +84,11 @@ public RandomNormal(Ops tf, double mean, double stddev, long seed) { /** {@inheritDoc} */ @Override - public Operand call(Operand dims, Class type) { + public Operand call(Ops tf, Operand dims, Class type) { + long[] seeds = {seed, 0}; Operand distOp = tf.random.statelessRandomNormal(dims, tf.constant(seeds), type); - Operand op = tf.math.mul(distOp, tf.dtypes.cast(tf.constant(this.stddev), type)); - return tf.math.add(op, tf.dtypes.cast(tf.constant(mean), type)); + Operand op = tf.math.mul(distOp, cast(tf, tf.constant(this.stddev), type)); + return tf.math.add(op, cast(tf, tf.constant(mean), type)); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/RandomUniform.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/RandomUniform.java index 787af15f709..45ef6c4491d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/RandomUniform.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/RandomUniform.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.initializers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.op.random.RandomUniformInt; @@ -31,7 +33,7 @@ * RandomUniform<TFloat32, TFloat32> initializer = * new org.tensorflow.framework.initializers.RandomUniform<>(tf, seed); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The TType for the call operation @@ -46,28 +48,26 @@ public class RandomUniform extends BaseInitializer { private final long seed; /** - * Creates a RandomUniform initializer using {@link #MINVAL_DEFAULT} for the minval and - * {@link #MAXVAL_DEFAULT} for the maxval + * Creates a RandomUniform initializer using {@link #MINVAL_DEFAULT} for the minval and {@link + * #MAXVAL_DEFAULT} for the maxval * - * @param tf the TensorFlow Ops * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public RandomUniform(Ops tf, long seed) { - this(tf, MINVAL_DEFAULT, MAXVAL_DEFAULT, seed); + public RandomUniform(long seed) { + this(MINVAL_DEFAULT, MAXVAL_DEFAULT, seed); } /** * Creates a RandomUniform initializer * - * @param tf the TensorFlow Ops * @param minval Lower bound of the range of random values to generate (inclusive). * @param maxval Upper bound of the range of random values to generate (exclusive). * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public RandomUniform(Ops tf, double minval, double maxval, long seed) { - super(tf); + public RandomUniform(double minval, double maxval, long seed) { + super(); this.minval = minval; this.maxval = maxval; this.seed = seed; @@ -75,26 +75,27 @@ public RandomUniform(Ops tf, double minval, double maxval, long seed) { /** {@inheritDoc} */ @Override - public Operand call(Operand dims, Class type) { + public Operand call(Ops tf, Operand dims, Class type) { + Operand distOp; if (TIntegral.class.isAssignableFrom(type)) { RandomUniformInt.Options options = RandomUniformInt.seed(this.seed); distOp = tf.random.randomUniformInt( dims, - tf.dtypes.cast(tf.constant(this.minval), type), - tf.dtypes.cast(tf.constant(this.maxval), type), + cast(tf, tf.constant(this.minval), type), + cast(tf, tf.constant(this.maxval), type), options); } else { long[] seeds = {seed, 0}; distOp = tf.random.statelessRandomUniform(dims, tf.constant(seeds), type); if (this.minval == 0) { if (this.maxval != 1.0) { - distOp = tf.math.mul(distOp, tf.dtypes.cast(tf.constant(this.maxval), type)); + distOp = tf.math.mul(distOp, cast(tf, tf.constant(this.maxval), type)); } } else { - distOp = tf.math.mul(distOp, tf.dtypes.cast(tf.constant(this.maxval - this.minval), type)); - distOp = tf.math.add(distOp, tf.dtypes.cast(tf.constant(this.minval), type)); + distOp = tf.math.mul(distOp, cast(tf, tf.constant(this.maxval - this.minval), type)); + distOp = tf.math.add(distOp, cast(tf, tf.constant(this.minval), type)); } } return distOp; diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/TruncatedNormal.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/TruncatedNormal.java index d3cfec26338..c5b23beef88 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/TruncatedNormal.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/TruncatedNormal.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.initializers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.TInt64; @@ -29,7 +31,7 @@ * TruncatedNormal<TFloat32, TFloat32> initializer = * new org.tensorflow.framework.initializers.TruncatedNormal<>(tf, seed); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The TType for the call operation @@ -47,25 +49,23 @@ public class TruncatedNormal extends BaseInitializer { * Creates a TruncatedNormal Initializer using {@link #MEAN_DEFAULT} for the mean and {@link * #STDDEV_DEFAULT} for the standard deviation. * - * @param tf the TensorFlow Ops * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public TruncatedNormal(Ops tf, long seed) { - this(tf, MEAN_DEFAULT, STDDEV_DEFAULT, seed); + public TruncatedNormal(long seed) { + this(MEAN_DEFAULT, STDDEV_DEFAULT, seed); } /** * Creates a TruncatedNormal Initializer. * - * @param tf the TensorFlow Ops * @param mean Mean of the random values to generate. * @param stddev Standard deviation of the random values to generate. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. */ - public TruncatedNormal(Ops tf, double mean, double stddev, long seed) { - super(tf); + public TruncatedNormal(double mean, double stddev, long seed) { + super(); this.mean = mean; this.stddev = stddev; this.seed = seed; @@ -73,11 +73,12 @@ public TruncatedNormal(Ops tf, double mean, double stddev, long seed) { /** {@inheritDoc} */ @Override - public Operand call(Operand dims, Class type) { - long[] seeds = {seed,0}; + public Operand call(Ops tf, Operand dims, Class type) { + + long[] seeds = {seed, 0}; Operand distOp = tf.random.statelessTruncatedNormal(dims, tf.constant(seeds), type); return tf.math.add( - tf.math.mul(distOp, tf.dtypes.cast(tf.constant(stddev), type)), - tf.dtypes.cast(tf.constant(mean), type)); + tf.math.mul(distOp, cast(tf, tf.constant(stddev), type)), + cast(tf, tf.constant(mean), type)); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/VarianceScaling.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/VarianceScaling.java index 5d951450505..3ae493a8432 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/VarianceScaling.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/VarianceScaling.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.initializers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.utils.ShapeUtils; import org.tensorflow.ndarray.Shape; @@ -24,8 +26,8 @@ /** * Initializer capable of adapting its scale to the shape of weights tensors. * - *

With distribution=TRUNCATED_NORMAL or NORMAL, samples are drawn from - * a truncated/untruncated normal distribution with a mean of zero and a standard deviation (after + *

With distribution=TRUNCATED_NORMAL or NORMAL, samples are drawn from a + * truncated/untruncated normal distribution with a mean of zero and a standard deviation (after * truncation, if used) stddev = Math.sqrt(scale / n), where n is: * *

    @@ -46,7 +48,7 @@ * new org.tensorflow.framework.initializers.VarianceScaling<>( * tf, scale, Mode.FAN_IN, Distribution.UNIFORM, seed); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The TType for the call operation @@ -64,28 +66,25 @@ public class VarianceScaling extends BaseInitializer { private final Distribution distribution; private final long seed; - /** * Creates a VarianceScaling Initializer * - * @param tf the TensorFlow Ops * @param seed sed to create random seeds. */ - public VarianceScaling(Ops tf, long seed) { - this(tf, SCALE_DEFAULT, MODE_DEFAULT, DISTRIBUTION_DEFAULT, seed); + public VarianceScaling(long seed) { + this(SCALE_DEFAULT, MODE_DEFAULT, DISTRIBUTION_DEFAULT, seed); } /** * Creates a VarianceScaling Initializer * - * @param tf the TensorFlow Ops * @param scale Scaling factor (positive float). * @param mode the mode for the variance * @param distribution Random distribution to use. * @param seed Used to create random seeds. */ - public VarianceScaling(Ops tf, double scale, Mode mode, Distribution distribution, long seed) { - super(tf); + public VarianceScaling(double scale, Mode mode, Distribution distribution, long seed) { + super(); if (scale <= 0.0) { throw new IllegalArgumentException("scale must be greater than 0, got " + scale); } @@ -97,8 +96,9 @@ public VarianceScaling(Ops tf, double scale, Mode mode, Distribution distributio /** {@inheritDoc} */ @Override - public Operand call(Operand dims, Class type) { - Shape shape = ShapeUtils.toShape(this.tf.scope(), dims); + public Operand call(Ops tf, Operand dims, Class type) { + + Shape shape = ShapeUtils.toShape(tf.scope(), dims); double lscale = this.scale; double[] fans /* fanIn, fanOut */ = computeFans(shape); switch (mode) { @@ -119,18 +119,18 @@ public Operand call(Operand dims, Class type) { switch (distribution) { case TRUNCATED_NORMAL: distOp = tf.random.statelessTruncatedNormal(dims, tf.constant(seeds), type); - stddev = Math.sqrt(lscale) / .87962566103423978; - mulOp = tf.math.mul(distOp, tf.dtypes.cast(tf.constant(stddev), type)); + stddev = Math.sqrt(lscale) / 0.87962566103423978; + mulOp = tf.math.mul(distOp, cast(tf, tf.constant(stddev), type)); break; case NORMAL: distOp = tf.random.statelessRandomNormal(dims, tf.constant(seeds), type); stddev = Math.sqrt(lscale); - mulOp = tf.math.mul(distOp, tf.dtypes.cast(tf.constant(stddev), type)); + mulOp = tf.math.mul(distOp, cast(tf, tf.constant(stddev), type)); break; case UNIFORM: distOp = tf.random.statelessRandomUniform(dims, tf.constant(seeds), type); stddev = Math.sqrt(3.0 * lscale); - mulOp = tf.math.mul(distOp, tf.dtypes.cast(tf.constant(stddev), type)); + mulOp = tf.math.mul(distOp, cast(tf, tf.constant(stddev), type)); break; } return mulOp; diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Zeros.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Zeros.java index 4298493ac44..f581d247deb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Zeros.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Zeros.java @@ -28,24 +28,21 @@ * Zeros<TFloat32> initializer = * new org.tensorflow.framework.initializers.Zeros<>(tf); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The TType for the call operation */ public class Zeros extends BaseInitializer { - /** - * Creates an Initializer that sets all values to one. - * - * @param tf the TensorFlow Ops - */ - public Zeros(Ops tf) { - super(tf); + /** Creates an Initializer that sets all values to one. */ + public Zeros() { + super(); } @Override - public Operand call(Operand dims, Class dtype) { - return tf.zeros(dims, dtype); + public Operand call(Ops tf, Operand dims, Class type) { + + return tf.zeros(dims, type); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/BinaryCrossentropy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/BinaryCrossentropy.java index 3417c07372a..690396f2c28 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/BinaryCrossentropy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/BinaryCrossentropy.java @@ -14,13 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the cross-entropy loss between true labels and predicted labels. * @@ -35,7 +36,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{0.6f, 0.4f}, {0.4f, 0.6f}}); * BinaryCrossentropy bce = new BinaryCrossentropy(tf); - * Operand<TFloat32> result = bce.call(labels, predictions); + * Operand<TFloat32> result = bce.call(Ops tf, labels, predictions); * // produces 0.815 * * @@ -43,7 +44,7 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {1.f, 0.f});
    - *    Operand<TFloat32> result = bce.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = bce.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.458f
      * 
    * @@ -51,7 +52,7 @@ * *
      *    BinaryCrossentropy bce = new BinaryCrossentropy(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = bce.call(labels, predictions);
    + *    Operand<TFloat32> result = bce.call(Ops tf, labels, predictions);
      *    // produces 1.630f
      * 
    * @@ -59,11 +60,11 @@ * *
      *    BinaryCrossentropy bce = new BinaryCrossentropy(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = bce.call(labels, predictions);
    + *    Operand<TFloat32> result = bce.call(Ops tf, labels, predictions);
      *    // produces [0.916f, 0.714f]
      * 
    */ -public class BinaryCrossentropy extends Loss { +public class BinaryCrossentropy extends AbstractLoss { public static final boolean FROM_LOGITS_DEFAULT = false; public static final float LABEL_SMOOTHING_DEFAULT = 0.0f; @@ -71,70 +72,63 @@ public class BinaryCrossentropy extends Loss { private final float labelSmoothing; /** - * Creates a Binary Crossentropy Loss using {@link Class#getSimpleName()} as the loss name, {@link - * #FROM_LOGITS_DEFAULT} for fromLogits, {@link #LABEL_SMOOTHING_DEFAULT} for labelSmoothing and a - * Loss Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a Binary Crossentropy AbstractLoss using {@link Class#getSimpleName()} as the loss + * name, {@link #FROM_LOGITS_DEFAULT} for fromLogits, {@link #LABEL_SMOOTHING_DEFAULT} for + * labelSmoothing and a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public BinaryCrossentropy(Ops tf) { - this(tf, null, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT); + public BinaryCrossentropy() { + this(null, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT); } /** * Creates a Binary Crossentropy loss using {@link Class#getSimpleName()} as the loss name, {@link * #FROM_LOGITS_DEFAULT} for fromLogits, and {@link #LABEL_SMOOTHING_DEFAULT} for labelSmoothing * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public BinaryCrossentropy(Ops tf, Reduction reduction) { - this(tf, null, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, reduction); + public BinaryCrossentropy(Reduction reduction) { + this(null, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, reduction); } /** * Creates a Binary Crossentropy loss using using {@link Class#getSimpleName()} as the loss name, * labelSmoothing of {@link #LABEL_SMOOTHING_DEFAULT}, a reduction of {@link - * Loss#REDUCTION_DEFAULT}, + * AbstractLoss#REDUCTION_DEFAULT}, * - * @param tf the TensorFlow Ops * @param fromLogits Whether to interpret predictions as a tensor of logit values */ - public BinaryCrossentropy(Ops tf, boolean fromLogits) { - this(tf, null, fromLogits, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT); + public BinaryCrossentropy(boolean fromLogits) { + this(null, fromLogits, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT); } /** * Creates a Binary Crossentropy loss using labelSmoothing of {@link #LABEL_SMOOTHING_DEFAULT} a - * reduction of {@link Loss#REDUCTION_DEFAULT}. + * reduction of {@link AbstractLoss#REDUCTION_DEFAULT}. * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param fromLogits Whether to interpret predictions as a tensor of logit values */ - public BinaryCrossentropy(Ops tf, String name, boolean fromLogits) { - this(tf, name, fromLogits, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT); + public BinaryCrossentropy(String name, boolean fromLogits) { + this(name, fromLogits, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT); } /** * Creates a Binary Crossentropy loss using using {@link Class#getSimpleName()} as the loss name, - * and a reduction of {@link Loss#REDUCTION_DEFAULT}. + * and a reduction of {@link AbstractLoss#REDUCTION_DEFAULT}. * - * @param tf the TensorFlow Ops * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param labelSmoothing A number in the range, [0, 1]. When 0, no smoothing occurs. When > 0, * compute the loss between the predicted labels and a smoothed version of the true labels, * where the smoothing squeezes the labels towards 0.5. Larger values of labelSmoothing * correspond to heavier smoothing. */ - public BinaryCrossentropy(Ops tf, boolean fromLogits, float labelSmoothing) { - this(tf, null, fromLogits, labelSmoothing, REDUCTION_DEFAULT); + public BinaryCrossentropy(boolean fromLogits, float labelSmoothing) { + this(null, fromLogits, labelSmoothing, REDUCTION_DEFAULT); } /** - * Creates a Binary Crossentropy loss using a reduction of {@link Loss#REDUCTION_DEFAULT}. + * Creates a Binary Crossentropy loss using a reduction of {@link AbstractLoss#REDUCTION_DEFAULT}. * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param labelSmoothing A number in the range, [0, 1]. When 0, no smoothing occurs. When > 0, @@ -142,14 +136,13 @@ public BinaryCrossentropy(Ops tf, boolean fromLogits, float labelSmoothing) { * where the smoothing squeezes the labels towards 0.5. Larger values of labelSmoothing * correspond to heavier smoothing. */ - public BinaryCrossentropy(Ops tf, String name, boolean fromLogits, float labelSmoothing) { - this(tf, name, fromLogits, labelSmoothing, REDUCTION_DEFAULT); + public BinaryCrossentropy(String name, boolean fromLogits, float labelSmoothing) { + this(name, fromLogits, labelSmoothing, REDUCTION_DEFAULT); } /** * Creates a Binary Crossentropy loss * - * @param tf the TensorFlow Ops * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param labelSmoothing A number in the range, [0, 1]. When 0, no smoothing occurs. When > 0, * compute the loss between the predicted labels and a smoothed version of the true labels, @@ -157,14 +150,13 @@ public BinaryCrossentropy(Ops tf, String name, boolean fromLogits, float labelSm * correspond to heavier smoothing. * @param reduction Type of Reduction to apply to the loss. */ - public BinaryCrossentropy(Ops tf, boolean fromLogits, float labelSmoothing, Reduction reduction) { - this(tf, null, fromLogits, labelSmoothing, reduction); + public BinaryCrossentropy(boolean fromLogits, float labelSmoothing, Reduction reduction) { + this(null, fromLogits, labelSmoothing, reduction); } /** * Creates a Binary Crossentropy loss * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param labelSmoothing A number in the range, [0, 1]. When 0, no smoothing occurs. When > 0, @@ -175,8 +167,8 @@ public BinaryCrossentropy(Ops tf, boolean fromLogits, float labelSmoothing, Redu * @throws IllegalArgumentException if labelSmoothing is not in the inclusive range of 0. - 1. */ public BinaryCrossentropy( - Ops tf, String name, boolean fromLogits, float labelSmoothing, Reduction reduction) { - super(tf, name, reduction); + String name, boolean fromLogits, float labelSmoothing, Reduction reduction) { + super(name, reduction); if (labelSmoothing < 0 || labelSmoothing > 1) throw new IllegalArgumentException( "labelSmoothing must be >= 0. and <= 1, found " + labelSmoothing); @@ -207,24 +199,25 @@ public BinaryCrossentropy( */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + Operand lPredictions; if (!fromLogits) { // add predictions range check for 0 - 1 lPredictions = LossesHelper.rangeCheck( - getTF(), + tf, "predictions range check [0-1]", predictions, - cast(getTF(), getTF().constant(0), predictions.type()), - cast(getTF(), getTF().constant(1), predictions.type())); + cast(tf, tf.constant(0), predictions.type()), + cast(tf, tf.constant(1), predictions.type())); } else { lPredictions = predictions; } Operand losses = - Losses.binaryCrossentropy(getTF(), labels, lPredictions, fromLogits, labelSmoothing); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Losses.binaryCrossentropy(tf, labels, lPredictions, fromLogits, labelSmoothing); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CategoricalCrossentropy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CategoricalCrossentropy.java index 5aac163c1e4..9b3ed8eb19d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CategoricalCrossentropy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CategoricalCrossentropy.java @@ -14,13 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the crossentropy loss between the labels and predictions. * @@ -37,7 +38,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{0.05f, 0.95f, 0f}, {0.1f, 0.8f, 0.1f}}); * CategoricalCrossentropy cce = new CategoricalCrossentropy(tf); - * Operand<TFloat32> result = cce.call(labels, predictions); + * Operand<TFloat32> result = cce.call(Ops tf, labels, predictions); * // produces 1.177 * * @@ -45,15 +46,15 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.3f, 0.7f});
    - *    Operand<TFloat32> result = cce.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = cce.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.814f
      * 
    * *

    Using SUM reduction type: * *

    - *    CategoricalCrossentropy cce = new CategoricalCrossentropy(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = cce.call(labels, predictions);
    + *    CategoricalCrossentropy cce = new CategoricalCrossentropy(Reduction.SUM);
    + *    Operand<TFloat32> result = cce.call(Ops tf, labels, predictions);
      *    // produces 2.354f
      * 
    * @@ -61,12 +62,12 @@ * *
      *    CategoricalCrossentropy cce =
    - *        new CategoricalCrossentropy(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = cce.call(labels, predictions);
    + *        new CategoricalCrossentropy(Reduction.NONE);
    + *    Operand<TFloat32> result = cce.call(Ops tf, labels, predictions);
      *    // produces [0.0513f, 2.303f]
      * 
    */ -public class CategoricalCrossentropy extends Loss { +public class CategoricalCrossentropy extends AbstractLoss { public static final boolean FROM_LOGITS_DEFAULT = false; public static final float LABEL_SMOOTHING_DEFAULT = 0.0f; public static final int DEFAULT_AXIS = Losses.CHANNELS_LAST; @@ -76,98 +77,90 @@ public class CategoricalCrossentropy extends Loss { private final int axis; /** - * Creates a categorical cross entropy Loss using {@link Class#getSimpleName()} as the loss name, - * {@link #FROM_LOGITS_DEFAULT} for fromLogits, {@link #LABEL_SMOOTHING_DEFAULT} for - * labelSmoothing, a Loss Reduction of {@link Loss#REDUCTION_DEFAULT}, and an axis of {@link - * #DEFAULT_AXIS} - * - * @param tf the TensorFlow Ops + * Creates a categorical cross entropy AbstractLoss using {@link Class#getSimpleName()} as the + * loss name, {@link #FROM_LOGITS_DEFAULT} for fromLogits, {@link #LABEL_SMOOTHING_DEFAULT} for + * labelSmoothing, a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT}, and an axis + * of {@link #DEFAULT_AXIS} */ - public CategoricalCrossentropy(Ops tf) { - this(tf, null, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT, DEFAULT_AXIS); + public CategoricalCrossentropy() { + this(null, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss using {@link #FROM_LOGITS_DEFAULT} for fromLogits, - * {@link #LABEL_SMOOTHING_DEFAULT} for labelSmoothing, a Loss Reduction of {@link - * Loss#REDUCTION_DEFAULT}, and an axis of {@link #DEFAULT_AXIS} + * Creates a categorical cross entropy AbstractLoss using {@link #FROM_LOGITS_DEFAULT} for + * fromLogits, {@link #LABEL_SMOOTHING_DEFAULT} for labelSmoothing, a AbstractLoss Reduction of + * {@link AbstractLoss#REDUCTION_DEFAULT}, and an axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param name the name of this loss */ - public CategoricalCrossentropy(Ops tf, String name) { - this(tf, name, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT, DEFAULT_AXIS); + public CategoricalCrossentropy(String name) { + this(name, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss using {@link Class#getSimpleName()} as the loss name, - * {@link #FROM_LOGITS_DEFAULT} for fromLogits, {@link #LABEL_SMOOTHING_DEFAULT} for + * Creates a categorical cross entropy AbstractLoss using {@link Class#getSimpleName()} as the + * loss name, {@link #FROM_LOGITS_DEFAULT} for fromLogits, {@link #LABEL_SMOOTHING_DEFAULT} for * labelSmoothing and an axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to loss. */ - public CategoricalCrossentropy(Ops tf, Reduction reduction) { - this(tf, null, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, reduction, DEFAULT_AXIS); + public CategoricalCrossentropy(Reduction reduction) { + this(null, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, reduction, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss {@link #FROM_LOGITS_DEFAULT} for fromLogits, {@link - * #LABEL_SMOOTHING_DEFAULT} for labelSmoothing, and an axis of {@link #DEFAULT_AXIS} + * Creates a categorical cross entropy AbstractLoss {@link #FROM_LOGITS_DEFAULT} for fromLogits, + * {@link #LABEL_SMOOTHING_DEFAULT} for labelSmoothing, and an axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param name the name of this loss * @param reduction Type of Reduction to apply to loss. */ - public CategoricalCrossentropy(Ops tf, String name, Reduction reduction) { - this(tf, name, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, reduction, DEFAULT_AXIS); + public CategoricalCrossentropy(String name, Reduction reduction) { + this(name, FROM_LOGITS_DEFAULT, LABEL_SMOOTHING_DEFAULT, reduction, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss using {@link Class#getSimpleName()} as the loss name, - * {@link #LABEL_SMOOTHING_DEFAULT} for labelSmoothing, a Loss Reduction of {@link - * Loss#REDUCTION_DEFAULT}, and an axis of {@link #DEFAULT_AXIS} + * Creates a categorical cross entropy AbstractLoss using {@link Class#getSimpleName()} as the + * loss name, {@link #LABEL_SMOOTHING_DEFAULT} for labelSmoothing, a AbstractLoss Reduction of + * {@link AbstractLoss#REDUCTION_DEFAULT}, and an axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param fromLogits Whether to interpret predictions as a tensor of logit values */ - public CategoricalCrossentropy(Ops tf, boolean fromLogits) { - this(tf, null, fromLogits, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT, DEFAULT_AXIS); + public CategoricalCrossentropy(boolean fromLogits) { + this(null, fromLogits, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss using {@link #LABEL_SMOOTHING_DEFAULT} for - * labelSmoothing, a Loss Reduction of {@link Loss#REDUCTION_DEFAULT}, and a channel axis of - * {@link #DEFAULT_AXIS} + * Creates a categorical cross entropy AbstractLoss using {@link #LABEL_SMOOTHING_DEFAULT} for + * labelSmoothing, a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT}, and a + * channel axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param name the name of this loss * @param fromLogits Whether to interpret predictions as a tensor of logit values */ - public CategoricalCrossentropy(Ops tf, String name, boolean fromLogits) { - this(tf, name, fromLogits, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT, DEFAULT_AXIS); + public CategoricalCrossentropy(String name, boolean fromLogits) { + this(name, fromLogits, LABEL_SMOOTHING_DEFAULT, REDUCTION_DEFAULT, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss using {@link Class#getSimpleName()} as the loss name, - * a Loss Reduction of {@link Loss#REDUCTION_DEFAULT}, and a channel axis of {@link #DEFAULT_AXIS} + * Creates a categorical cross entropy AbstractLoss using {@link Class#getSimpleName()} as the + * loss name, a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT}, and a channel + * axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param labelSmoothing Float in [0, 1]. When > 0, label values are * smoothed, meaning the confidence on label values are relaxed. e.g. labelSmoothing=0.2 * means that we will use a value of 0.1 for label 0 and * 0.9 for label 1 */ - public CategoricalCrossentropy(Ops tf, boolean fromLogits, float labelSmoothing) { - this(tf, null, fromLogits, labelSmoothing, REDUCTION_DEFAULT, DEFAULT_AXIS); + public CategoricalCrossentropy(boolean fromLogits, float labelSmoothing) { + this(null, fromLogits, labelSmoothing, REDUCTION_DEFAULT, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss using a Loss Reduction of {@link - * Loss#REDUCTION_DEFAULT}, and a channel axis of {@link #DEFAULT_AXIS} + * Creates a categorical cross entropy AbstractLoss using a AbstractLoss Reduction of {@link + * AbstractLoss#REDUCTION_DEFAULT}, and a channel axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param name the name of this loss * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param labelSmoothing Float in [0, 1]. When > 0, label values are @@ -175,15 +168,14 @@ public CategoricalCrossentropy(Ops tf, boolean fromLogits, float labelSmoothing) *
    means that we will use a value of 0.1 for label 0 and * 0.9 for label 1 */ - public CategoricalCrossentropy(Ops tf, String name, boolean fromLogits, float labelSmoothing) { - this(tf, name, fromLogits, labelSmoothing, REDUCTION_DEFAULT, DEFAULT_AXIS); + public CategoricalCrossentropy(String name, boolean fromLogits, float labelSmoothing) { + this(name, fromLogits, labelSmoothing, REDUCTION_DEFAULT, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss using {@link Class#getSimpleName()} as the loss name - * and a channel axis of {@link #DEFAULT_AXIS} + * Creates a categorical cross entropy AbstractLoss using {@link Class#getSimpleName()} as the + * loss name and a channel axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param labelSmoothing Float in [0, 1]. When > 0, label values are * smoothed, meaning the confidence on label values are relaxed. e.g. x=0.2 means @@ -191,15 +183,13 @@ public CategoricalCrossentropy(Ops tf, String name, boolean fromLogits, float la * for label 1 * @param reduction Type of Reduction to apply to loss. */ - public CategoricalCrossentropy( - Ops tf, boolean fromLogits, float labelSmoothing, Reduction reduction) { - this(tf, null, fromLogits, labelSmoothing, reduction, DEFAULT_AXIS); + public CategoricalCrossentropy(boolean fromLogits, float labelSmoothing, Reduction reduction) { + this(null, fromLogits, labelSmoothing, reduction, DEFAULT_AXIS); } /** - * Creates a categorical cross entropy Loss + * Creates a categorical cross entropy AbstractLoss * - * @param tf the TensorFlow Ops * @param name the name of this loss * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param labelSmoothing Float in [0, 1]. When > 0, label values are @@ -213,13 +203,8 @@ public CategoricalCrossentropy( * @throws IllegalArgumentException if labelSmoothing is not in the inclusive range of 0. - 1. */ public CategoricalCrossentropy( - Ops tf, - String name, - boolean fromLogits, - float labelSmoothing, - Reduction reduction, - int axis) { - super(tf, name, reduction); + String name, boolean fromLogits, float labelSmoothing, Reduction reduction, int axis) { + super(name, reduction); if (labelSmoothing < 0 || labelSmoothing > 1) throw new IllegalArgumentException( "labelSmoothing must be >= 0. and <= 1, found " + labelSmoothing); @@ -251,24 +236,24 @@ public CategoricalCrossentropy( */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + Operand lPredictions; if (!fromLogits) { // add predictions range check for 0 - 1 lPredictions = LossesHelper.rangeCheck( - getTF(), + tf, "predictions range check [0-1]", predictions, - cast(getTF(), getTF().constant(0), predictions.type()), - cast(getTF(), getTF().constant(1), predictions.type())); + cast(tf, tf.constant(0), predictions.type()), + cast(tf, tf.constant(1), predictions.type())); } else { lPredictions = predictions; } Operand losses = - Losses.categoricalCrossentropy( - getTF(), labels, lPredictions, fromLogits, labelSmoothing, axis); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Losses.categoricalCrossentropy(tf, labels, lPredictions, fromLogits, labelSmoothing, axis); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CategoricalHinge.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CategoricalHinge.java index 73837ed1756..c9987fb0884 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CategoricalHinge.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CategoricalHinge.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -35,7 +36,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{0.6f, 0.4f}, {0.4f, 0.6f}}); * CategoricalHinge categoricalHinge = new CategoricalHinge(tf); - * Operand<TFloat32> result = categoricalHinge.call(labels, predictions); + * Operand<TFloat32> result = categoricalHinge.call(Ops tf, labels, predictions); * // produces 1.4 * * @@ -43,7 +44,7 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {1f, 0.f});
    - *    Operand<TFloat32> result = categoricalHinge.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = categoricalHinge.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.6f
      * 
    * @@ -51,7 +52,7 @@ * *
      *    CategoricalHinge categoricalHinge = new CategoricalHinge(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = categoricalHinge.call(labels, predictions);
    + *    Operand<TFloat32> result = categoricalHinge.call(Ops tf, labels, predictions);
      *    // produces 2.8f
      * 
    * @@ -60,48 +61,45 @@ *
      *    CategoricalHinge categoricalHinge =
      *        new CategoricalHinge(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = categoricalHinge.call(labels, predictions);
    + *    Operand<TFloat32> result = categoricalHinge.call(Ops tf, labels, predictions);
      *    // produces [1.2f, 1.6f]
      * 
    */ -public class CategoricalHinge extends Loss { +public class CategoricalHinge extends AbstractLoss { /** - * Creates a Categorical Hinge Loss using {@link Class#getSimpleName()} as the loss name and a - * Loss Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a Categorical Hinge AbstractLoss using {@link Class#getSimpleName()} as the loss name + * and a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public CategoricalHinge(Ops tf) { - super(tf); + public CategoricalHinge() { + super(); } /** - * Creates a Categorical Hinge Loss using {@link Class#getSimpleName()} as the loss name + * Creates a Categorical Hinge AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public CategoricalHinge(Ops tf, Reduction reduction) { - super(tf, null, reduction); + public CategoricalHinge(Reduction reduction) { + super(null, reduction); } /** * Creates a Categorical Hinge * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public CategoricalHinge(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public CategoricalHinge(String name, Reduction reduction) { + super(name, reduction); } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.categoricalHinge(getTF(), labels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.categoricalHinge(tf, labels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CosineSimilarity.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CosineSimilarity.java index 0a18d93caf3..ac810139d71 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CosineSimilarity.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/CosineSimilarity.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -40,7 +41,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{1.f, 0.f}, {1.f, 1.f}}); * CosineSimilarity cosineLoss = new CosineSimilarity(tf); - * Operand<TFloat32> result = cosineLoss.call(labels, predictions); + * Operand<TFloat32> result = cosineLoss.call(Ops tf, labels, predictions); * // produces -0.5 * * @@ -48,7 +49,7 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.8f, 0.2f});
    - *    Operand<TFloat32> result = cosineLoss.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = cosineLoss.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces -0.0999f
      * 
    * @@ -56,7 +57,7 @@ * *
      *    CosineSimilarity cosineLoss = new CosineSimilarity(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = cosineLoss.call(labels, predictions);
    + *    Operand<TFloat32> result = cosineLoss.call(Ops tf, labels, predictions);
      *    // produces -0.999f
      * 
    * @@ -64,165 +65,155 @@ * *
      *    CosineSimilarity cosineLoss = new CosineSimilarity(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = cosineLoss.call(labels, predictions);
    + *    Operand<TFloat32> result = cosineLoss.call(Ops tf, labels, predictions);
      *    // produces [-0.f, -0.999f]
      * 
    */ -public class CosineSimilarity extends Loss { +public class CosineSimilarity extends AbstractLoss { public static final int DEFAULT_AXIS = -1; public static final Reduction DEFAULT_REDUCTION = Reduction.AUTO; private final int[] axis; /** - * Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name, an axis - * of {@link #DEFAULT_AXIS}, and a Loss Reduction of {@link #DEFAULT_REDUCTION} - * - * @param tf the TensorFlow Ops + * Creates a Cosine Similarity AbstractLoss using {@link Class#getSimpleName()} as the loss name, + * an axis of {@link #DEFAULT_AXIS}, and a AbstractLoss Reduction of {@link #DEFAULT_REDUCTION} */ - public CosineSimilarity(Ops tf) { + public CosineSimilarity() { - this(tf, null, DEFAULT_AXIS, DEFAULT_REDUCTION); + this(null, DEFAULT_AXIS, DEFAULT_REDUCTION); } /** - * Creates a Cosine Similarity Loss using an axis of {@link #DEFAULT_AXIS}, and a Loss Reduction - * of {@link #DEFAULT_REDUCTION} + * Creates a Cosine Similarity AbstractLoss using an axis of {@link #DEFAULT_AXIS}, and a + * AbstractLoss Reduction of {@link #DEFAULT_REDUCTION} * - * @param tf the TensorFlow Ops * @param name the name of the loss */ - public CosineSimilarity(Ops tf, String name) { + public CosineSimilarity(String name) { - this(tf, name, DEFAULT_AXIS, DEFAULT_REDUCTION); + this(name, DEFAULT_AXIS, DEFAULT_REDUCTION); } /** - * Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name, and a - * Loss Reduction of {@link #DEFAULT_REDUCTION} + * Creates a Cosine Similarity AbstractLoss using {@link Class#getSimpleName()} as the loss name, + * and a AbstractLoss Reduction of {@link #DEFAULT_REDUCTION} * - * @param tf the TensorFlow Ops * @param axis The dimension along which the cosine similarity is computed. */ - public CosineSimilarity(Ops tf, int axis) { + public CosineSimilarity(int axis) { - this(tf, null, axis, DEFAULT_REDUCTION); + this(null, axis, DEFAULT_REDUCTION); } /** - * Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name, and a - * Loss Reduction of {@link #DEFAULT_REDUCTION} + * Creates a Cosine Similarity AbstractLoss using {@link Class#getSimpleName()} as the loss name, + * and a AbstractLoss Reduction of {@link #DEFAULT_REDUCTION} * - * @param tf the TensorFlow Ops * @param axis The dimension along which the cosine similarity is computed. */ - public CosineSimilarity(Ops tf, int[] axis) { + public CosineSimilarity(int[] axis) { - this(tf, null, axis, DEFAULT_REDUCTION); + this(null, axis, DEFAULT_REDUCTION); } /** - * Creates a Cosine Similarity Loss using a Loss Reduction of {@link #DEFAULT_REDUCTION} + * Creates a Cosine Similarity AbstractLoss using a AbstractLoss Reduction of {@link + * #DEFAULT_REDUCTION} * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param axis The dimension along which the cosine similarity is computed. */ - public CosineSimilarity(Ops tf, String name, int axis) { + public CosineSimilarity(String name, int axis) { - this(tf, name, axis, DEFAULT_REDUCTION); + this(name, axis, DEFAULT_REDUCTION); } /** - * Creates a Cosine Similarity Loss using a Loss Reduction of {@link #DEFAULT_REDUCTION} + * Creates a Cosine Similarity AbstractLoss using a AbstractLoss Reduction of {@link + * #DEFAULT_REDUCTION} * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param axis The dimension along which the cosine similarity is computed. */ - public CosineSimilarity(Ops tf, String name, int[] axis) { + public CosineSimilarity(String name, int[] axis) { - this(tf, name, axis, DEFAULT_REDUCTION); + this(name, axis, DEFAULT_REDUCTION); } /** - * Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name and an - * axis of {@link #DEFAULT_AXIS} + * Creates a Cosine Similarity AbstractLoss using {@link Class#getSimpleName()} as the loss name + * and an axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public CosineSimilarity(Ops tf, Reduction reduction) { + public CosineSimilarity(Reduction reduction) { - this(tf, null, DEFAULT_AXIS, reduction); + this(null, DEFAULT_AXIS, reduction); } /** - * Creates a Cosine Similarity Loss using an axis of {@link #DEFAULT_AXIS} + * Creates a Cosine Similarity AbstractLoss using an axis of {@link #DEFAULT_AXIS} * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public CosineSimilarity(Ops tf, String name, Reduction reduction) { + public CosineSimilarity(String name, Reduction reduction) { - this(tf, name, DEFAULT_AXIS, reduction); + this(name, DEFAULT_AXIS, reduction); } /** - * Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name + * Creates a Cosine Similarity AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param axis The dimension along which the cosine similarity is computed. * @param reduction Type of Reduction to apply to the loss. */ - public CosineSimilarity(Ops tf, int axis, Reduction reduction) { + public CosineSimilarity(int axis, Reduction reduction) { - this(tf, null, new int[] {axis}, reduction); + this(null, new int[] {axis}, reduction); } /** - * Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name + * Creates a Cosine Similarity AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param axis The dimension along which the cosine similarity is computed. * @param reduction Type of Reduction to apply to the loss. */ - public CosineSimilarity(Ops tf, int[] axis, Reduction reduction) { + public CosineSimilarity(int[] axis, Reduction reduction) { - this(tf, null, axis, reduction); + this(null, axis, reduction); } /** - * Creates a Cosine Similarity Loss + * Creates a Cosine Similarity AbstractLoss * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param axis The dimension along which the cosine similarity is computed. * @param reduction Type of Reduction to apply to the loss. */ - public CosineSimilarity(Ops tf, String name, int axis, Reduction reduction) { - this(tf, name, new int[] {axis}, reduction); + public CosineSimilarity(String name, int axis, Reduction reduction) { + this(name, new int[] {axis}, reduction); } /** - * Creates a Cosine Similarity Loss + * Creates a Cosine Similarity AbstractLoss * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param axis The dimension along which the cosine similarity is computed. * @param reduction Type of Reduction to apply to the loss. */ - public CosineSimilarity(Ops tf, String name, int[] axis, Reduction reduction) { - super(tf, name, reduction); + public CosineSimilarity(String name, int[] axis, Reduction reduction) { + super(name, reduction); this.axis = axis; } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.cosineSimilarity(getTF(), labels, predictions, axis); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.cosineSimilarity(tf, labels, predictions, axis); losses = tf.math.neg(losses); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Hinge.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Hinge.java index d4c350ef06c..9a443247996 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Hinge.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Hinge.java @@ -14,13 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the hinge loss between labels and predictions. * @@ -37,7 +38,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{0.6f, 0.4f}, {0.4f, 0.6f}}); * Hinge hingeLoss = new Hinge(tf); - * Operand<TFloat32> result = hingeLoss.call(labels, predictions); + * Operand<TFloat32> result = hingeLoss.call(Ops tf, labels, predictions); * // produces 1.3f * * @@ -45,57 +46,53 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {1.f, 0.f});
    - *    Operand<TFloat32> result = hingeLoss.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = hingeLoss.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.55f
      * 
    * *

    Using SUM reduction type: * *

    - *    Hinge hingeLoss = new Hinge(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = hingeLoss.call(labels, predictions);
    + *    Hinge hingeLoss = new Hinge(Reduction.SUM);
    + *    Operand<TFloat32> result = hingeLoss.call(Ops tf, labels, predictions);
      *    // produces 2.6f
      * 
    * *

    Using NONE reduction type: * *

    - *    Hinge hingeLoss = new Hinge(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = hingeLoss.call(labels, predictions);
    + *    Hinge hingeLoss = new Hinge(Reduction.NONE);
    + *    Operand<TFloat32> result = hingeLoss.call(Ops tf, labels, predictions);
      *    // produces [1.1f, 1.5f]
      * 
    */ -public class Hinge extends Loss { +public class Hinge extends AbstractLoss { /** - * Creates a Hinge Loss using {@link Class#getSimpleName()} as the loss name and a Loss Reduction - * of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a Hinge AbstractLoss using {@link Class#getSimpleName()} as the loss name and a + * AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public Hinge(Ops tf) { - this(tf, null, Reduction.AUTO); + public Hinge() { + this(null, Reduction.AUTO); } /** - * Creates a Hinge Loss using {@link Class#getSimpleName()} as the loss name + * Creates a Hinge AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public Hinge(Ops tf, Reduction reduction) { - super(tf, null, reduction); + public Hinge(Reduction reduction) { + super(null, reduction); } /** * Creates a Hinge * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public Hinge(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public Hinge(String name, Reduction reduction) { + super(name, reduction); } /** @@ -122,15 +119,16 @@ public Hinge(Ops tf, String name, Reduction reduction) { */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + Operand tLabels = cast(tf, labels, predictions.type()); tLabels = LossesHelper.valueCheck( - getTF(), + tf, "labels value check [-1, 0, 1]", tLabels, - cast(getTF(), getTF().constant(new int[] {-1, 0, 1}), predictions.type())); - Operand losses = Losses.hinge(getTF(), tLabels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + cast(tf, tf.constant(new int[] {-1, 0, 1}), predictions.type())); + Operand losses = Losses.hinge(tf, tLabels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Huber.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Huber.java index b1aee1b0656..c9a7d7edcb8 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Huber.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Huber.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -39,7 +40,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{0.6f, 0.4f}, {0.4f, 0.6f}}); * Huber huberLoss = new Huber(tf); - * Operand<TFloat32> result = huberLoss.call(labels, predictions); + * Operand<TFloat32> result = huberLoss.call(Ops tf, labels, predictions); * // produces 0.155 * * @@ -47,7 +48,7 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {1.f, 0.f});
    - *    Operand<TFloat32> result = huberLoss.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = huberLoss.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.09f
      * 
    * @@ -55,7 +56,7 @@ * *
      *    Huber huberLoss = new Huber(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = huberLoss.call(labels, predictions);
    + *    Operand<TFloat32> result = huberLoss.call(Ops tf, labels, predictions);
      *    // produces 0.32f
      * 
    * @@ -63,78 +64,74 @@ * *
      *    Huber huberLoss = new Huber(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = huberLoss.call(labels, predictions);
    + *    Operand<TFloat32> result = huberLoss.call(Ops tf, labels, predictions);
      *    // produces [0.18f, 0.13f]
      * 
    * * @see
    Huber loss */ -public class Huber extends Loss { +public class Huber extends AbstractLoss { public static final float DELTA_DEFAULT = 1.0f; private final float delta; /** - * Creates a Huber Loss using {@link Class#getSimpleName()} as the loss name, {@link - * #DELTA_DEFAULT} as the delta and a Loss Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a Huber AbstractLoss using {@link Class#getSimpleName()} as the loss name, {@link + * #DELTA_DEFAULT} as the delta and a AbstractLoss Reduction of {@link + * AbstractLoss#REDUCTION_DEFAULT} */ - public Huber(Ops tf) { - this(tf, null, DELTA_DEFAULT, Reduction.AUTO); + public Huber() { + this(null, DELTA_DEFAULT, Reduction.AUTO); } /** - * Creates a Huber Loss using {@link #DELTA_DEFAULT} as the delta and a Loss Reduction of {@link - * Loss#REDUCTION_DEFAULT} + * Creates a Huber AbstractLoss using {@link #DELTA_DEFAULT} as the delta and a AbstractLoss + * Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} * - * @param tf the TensorFlow Ops * @param name the name of the loss, if null then {@link Class#getSimpleName()} is used. */ - public Huber(Ops tf, String name) { - this(tf, name, DELTA_DEFAULT, Reduction.AUTO); + public Huber(String name) { + this(name, DELTA_DEFAULT, Reduction.AUTO); } /** - * Creates a Huber Loss using {@link Class#getSimpleName()} as the loss name and and {@link - * #DELTA_DEFAULT} as the delta + * Creates a Huber AbstractLoss using {@link Class#getSimpleName()} as the loss name and and + * {@link #DELTA_DEFAULT} as the delta * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public Huber(Ops tf, Reduction reduction) { - this(tf, null, DELTA_DEFAULT, reduction); + public Huber(Reduction reduction) { + this(null, DELTA_DEFAULT, reduction); } /** - * Creates a Huber Loss using {@link #DELTA_DEFAULT} as the delta + * Creates a Huber AbstractLoss using {@link #DELTA_DEFAULT} as the delta * - * @param tf the TensorFlow Ops * @param name the name of the loss, if null then {@link Class#getSimpleName()} is used. * @param reduction Type of Reduction to apply to the loss. */ - public Huber(Ops tf, String name, Reduction reduction) { - this(tf, name, DELTA_DEFAULT, reduction); + public Huber(String name, Reduction reduction) { + this(name, DELTA_DEFAULT, reduction); } /** - * Creates a Huber Loss + * Creates a Huber AbstractLoss * - * @param tf the TensorFlow Ops * @param name the name of the loss, if null then {@link Class#getSimpleName()} is used. * @param delta the point where the Huber loss function changes from quadratic to linear. * @param reduction Type of Reduction to apply to the loss. */ - public Huber(Ops tf, String name, float delta, Reduction reduction) { - super(tf, name, reduction); + public Huber(String name, float delta, Reduction reduction) { + super(name, reduction); this.delta = delta; } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.huber(getTF(), labels, predictions, delta); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.huber(tf, labels, predictions, delta); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/KLDivergence.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/KLDivergence.java index 2aa1f72092b..ef5d88539db 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/KLDivergence.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/KLDivergence.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -31,8 +32,8 @@ * tf.constant(new float[][] {{0.f, 1.f}, {0.f, 0.f}}); * Operand<TFloat32> predictions = * tf.constant(new float[][] {{0.6f, 0.4f}, {0.4f, 0.6f}}); - * KLDivergence kld = new KLDivergence(tf); - * Operand<TFloat32> result = kld.call(labels, predictions); + * KLDivergence kld = new KLDivergence(); + * Operand<TFloat32> result = kld.call(Ops tf, labels, predictions); * // produces 0.458 * * @@ -40,68 +41,65 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.8f, 0.2f});
    - *    Operand<TFloat32> result = kld.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = kld.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.366f
      * 
    * *

    Using SUM reduction type: * *

    - *    KLDivergence kld = new KLDivergence(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = kld.call(labels, predictions);
    + *    KLDivergence kld = new KLDivergence(, Reduction.SUM);
    + *    Operand<TFloat32> result = kld.call(Ops tf, labels, predictions);
      *    // produces 0.916f
      * 
    * *

    Using NONE reduction type: * *

    - *    KLDivergence kld = new KLDivergence(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = kld.call(labels, predictions);
    + *    KLDivergence kld = new KLDivergence(, Reduction.NONE);
    + *    Operand<TFloat32> result = kld.call(Ops tf, labels, predictions);
      *    // produces [0.916f, -3.08e-06f]
      * 
    * * @see Kullback?Leibler * divergence */ -public class KLDivergence extends Loss { +public class KLDivergence extends AbstractLoss { /** - * Creates a Kullback Leibler Divergence Loss using {@link Class#getSimpleName()} as the loss name - * and a Loss Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a Kullback Leibler Divergence AbstractLoss using {@link Class#getSimpleName()} as the + * loss name and a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public KLDivergence(Ops tf) { - super(tf); + public KLDivergence() { + super(); } /** - * Creates a Kullback Leibler Divergence Loss Loss using {@link Class#getSimpleName()} as the loss - * name + * Creates a Kullback Leibler Divergence AbstractLoss AbstractLoss using {@link + * Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public KLDivergence(Ops tf, Reduction reduction) { - super(tf, null, reduction); + public KLDivergence(Reduction reduction) { + super(null, reduction); } /** - * Creates a Kullback Leibler Divergence Loss + * Creates a Kullback Leibler Divergence AbstractLoss * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public KLDivergence(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public KLDivergence(String name, Reduction reduction) { + super(name, reduction); } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.kullbackLeiblerDivergence(getTF(), labels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.kullbackLeiblerDivergence(tf, labels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/LogCosh.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/LogCosh.java index a11d582e527..02200c3a9e0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/LogCosh.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/LogCosh.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -33,7 +34,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{1.f, 1.f}, {0.f, 0.f}}); * LogCosh logcosh = new LogCosh(tf); - * Operand<TFloat32> result = logcosh.call(labels, predictions); + * Operand<TFloat32> result = logcosh.call(Ops tf, labels, predictions); * // produces 0.108 * * @@ -41,74 +42,71 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.8f, 0.2f});
    - *    Operand<TFloat32> result = logcosh.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = logcosh.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.087f
      * 
    * *

    Using SUM reduction type: * *

    - *    LogCosh logcosh = new LogCosh(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = logcosh.call(labels, predictions);
    + *    LogCosh logcosh = new LogCosh(Reduction.SUM);
    + *    Operand<TFloat32> result = logcosh.call(Ops tf, labels, predictions);
      *    // produces 0.217f
      * 
    * *

    Using NONE reduction type: * *

    - *    LogCosh logcosh = new LogCosh(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = logcosh.call(labels, predictions);
    + *    LogCosh logcosh = new LogCosh(Reduction.NONE);
    + *    Operand<TFloat32> result = logcosh.call(Ops tf, labels, predictions);
      *    // produces [0.217f, 0f]
      * 
    */ -public class LogCosh extends Loss { +public class LogCosh extends AbstractLoss { /** - * Creates a LogCosh Loss using {@link Class#getSimpleName()} as the loss name and a Loss - * Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a LogCosh AbstractLoss using {@link Class#getSimpleName()} as the loss name and a + * AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public LogCosh(Ops tf) { - this(tf, null, Reduction.AUTO); + public LogCosh() { + this(null, Reduction.AUTO); } /** - * Creates a LogCosh Loss using a Loss Reduction of {@link Loss#REDUCTION_DEFAULT} + * Creates a LogCosh AbstractLoss using a AbstractLoss Reduction of {@link + * AbstractLoss#REDUCTION_DEFAULT} * - * @param tf the TensorFlow Ops * @param name the name of the loss, if null then {@link Class#getSimpleName()} is used. */ - public LogCosh(Ops tf, String name) { - this(tf, name, Reduction.AUTO); + public LogCosh(String name) { + this(name, Reduction.AUTO); } /** - * Creates a LogCosh Loss using {@link Class#getSimpleName()} as the loss name + * Creates a LogCosh AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public LogCosh(Ops tf, Reduction reduction) { - this(tf, null, reduction); + public LogCosh(Reduction reduction) { + this(null, reduction); } /** - * Creates a LogCosh Loss + * Creates a LogCosh AbstractLoss * - * @param tf the TensorFlow Ops * @param name the name of the loss, if null then {@link Class#getSimpleName()} is used. * @param reduction Type of Reduction to apply to the loss. */ - public LogCosh(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public LogCosh(String name, Reduction reduction) { + super(name, reduction); } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.logCosh(getTF(), labels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.logCosh(tf, labels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Loss.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Loss.java index cdd35d28aba..4dd5bce6cde 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Loss.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Loss.java @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,60 +18,14 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -public abstract class Loss { - public static final Reduction REDUCTION_DEFAULT = Reduction.AUTO; - - protected final Ops tf; - protected final Reduction reduction; - - /** - * Creates a Loss using {@link Class#getSimpleName()} as the name and a Loss Reduction of {@link - * Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops - */ - protected Loss(Ops tf) { - this(tf, null, Reduction.AUTO); - } - - /** - * Creates a Loss using a Loss Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops - * @param name the name of this Loss, if null the name will be {@link Class#getSimpleName()}. - */ - protected Loss(Ops tf, String name) { - this(tf, name, Reduction.AUTO); - } - - /** - * Creates a Loss - * - * @param tf the TensorFlow Ops - * @param name the name of this loss, if null the name will be {@link Class#getSimpleName()}. - * @param reduction Type of Reduction to apply to the loss. - */ - protected Loss(Ops tf, String name, Reduction reduction) { - this.tf = name != null ? tf.withSubScope(name) : tf.withSubScope(getClass().getSimpleName()); - this.reduction = reduction; - } - - /** - * Calculates the loss - * - * @param labels the truth values or labels - * @param predictions the predictions - * @param The data type of the predictions and loss. - * @return the loss - */ - public Operand call( - Operand labels, Operand predictions) { - return call(labels, predictions, null); - } +/** Interface for loss calc ulation */ +@FunctionalInterface +public interface Loss { /** * Generates an Operand that calculates the loss. * + * @param tf the TensorFlow Ops * @param labels the truth values or labels * @param predictions the predictions * @param sampleWeights Optional sampleWeights acts as a coefficient for the loss. If a scalar is @@ -84,24 +38,6 @@ public Operand call( * @param The data type of the predictions, sampleWeights and loss. * @return the loss */ - public abstract Operand call( - Operand labels, Operand predictions, Operand sampleWeights); - - /** - * Gets the TensorFlow Ops - * - * @return the TensorFlow Ops - */ - public Ops getTF() { - return tf; - } - - /** - * Gets the loss reduction - * - * @return the loss reduction - */ - public Reduction getReduction() { - return reduction; - } + Operand call( + Ops tf, Operand labels, Operand predictions, Operand sampleWeights); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanAbsoluteError.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanAbsoluteError.java index 03a3cf70110..d85bdf3561a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanAbsoluteError.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanAbsoluteError.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -32,7 +33,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{1.f, 1.f}, {1.f, 0.f}}); * MeanAbsoluteError mae = new MeanAbsoluteError(tf); - * Operand<TFloat32> result = mae.call(labels, predictions); + * Operand<TFloat32> result = mae.call(Ops tf, labels, predictions); * // produces 0.5f * * @@ -40,64 +41,61 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.7f, 0.3f});
    - *    Operand<TFloat32> result = mae.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = mae.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.25f
      * 
    * *

    Using SUM reduction type: * *

    - *    MeanAbsoluteError mae = new MeanAbsoluteError(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = mae.call(labels, predictions);
    + *    MeanAbsoluteError mae = new MeanAbsoluteError(Reduction.SUM);
    + *    Operand<TFloat32> result = mae.call(Ops tf, labels, predictions);
      *    // produces 1.0f
      * 
    * *

    Using NONE reduction type: * *

    - *    MeanAbsoluteError mae = new MeanAbsoluteError(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = mae.call(labels, predictions);
    + *    MeanAbsoluteError mae = new MeanAbsoluteError(Reduction.NONE);
    + *    Operand<TFloat32> result = mae.call(Ops tf, labels, predictions);
      *    // produces [0.5f, 0.5f]
      * 
    */ -public class MeanAbsoluteError extends Loss { +public class MeanAbsoluteError extends AbstractLoss { /** - * Creates a MeanAbsoluteError Loss using {@link Class#getSimpleName()} as the loss name and a - * Loss Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a MeanAbsoluteError AbstractLoss using {@link Class#getSimpleName()} as the loss name + * and a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public MeanAbsoluteError(Ops tf) { - super(tf); + public MeanAbsoluteError() { + super(); } /** - * Creates a MeanAbsoluteError Loss using {@link Class#getSimpleName()} as the loss name + * Creates a MeanAbsoluteError AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public MeanAbsoluteError(Ops tf, Reduction reduction) { - super(tf, null, reduction); + public MeanAbsoluteError(Reduction reduction) { + super(null, reduction); } /** * Creates a MeanAbsoluteError * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public MeanAbsoluteError(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public MeanAbsoluteError(String name, Reduction reduction) { + super(name, reduction); } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.meanAbsoluteError(getTF(), labels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.meanAbsoluteError(tf, labels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanAbsolutePercentageError.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanAbsolutePercentageError.java index 6c5242df4f2..ed5c7d73e2f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanAbsolutePercentageError.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanAbsolutePercentageError.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -32,7 +33,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{1.f, 1.f}, {1.f, 0.f}}); * MeanAbsolutePercentageError mape = new MeanAbsolutePercentageError(tf); - * Operand<TFloat32> result = mape.call(labels, predictions); + * Operand<TFloat32> result = mape.call(Ops tf, labels, predictions); * // produces 50f * * @@ -40,64 +41,62 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.7f, 0.3f});
    - *    Operand<TFloat32> result = mape.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = mape.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 20f
      * 
    * *

    Using SUM reduction type: * *

    - *    MeanAbsolutePercentageError mape = new MeanAbsolutePercentageError(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = mape.call(labels, predictions);
    + *    MeanAbsolutePercentageError mape = new MeanAbsolutePercentageError(Reduction.SUM);
    + *    Operand<TFloat32> result = mape.call(Ops tf, labels, predictions);
      *    // produces 100.0f
      * 
    * *

    Using NONE reduction type: * *

    - *    MeanAbsolutePercentageError mape = new MeanAbsolutePercentageError(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = mape.call(labels, predictions);
    + *    MeanAbsolutePercentageError mape = new MeanAbsolutePercentageError(Reduction.NONE);
    + *    Operand<TFloat32> result = mape.call(Ops tf, labels, predictions);
      *    // produces [25f, 75f]
      * 
    */ -public class MeanAbsolutePercentageError extends Loss { +public class MeanAbsolutePercentageError extends AbstractLoss { /** - * Creates a MeanAbsolutePercentageError Loss using {@link Class#getSimpleName()} as the loss name - * and a Loss Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a MeanAbsolutePercentageError AbstractLoss using {@link Class#getSimpleName()} as the + * loss name and a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public MeanAbsolutePercentageError(Ops tf) { - super(tf); + public MeanAbsolutePercentageError() { + super(); } /** - * Creates a MeanAbsolutePercentageError Loss using {@link Class#getSimpleName()} as the loss name + * Creates a MeanAbsolutePercentageError AbstractLoss using {@link Class#getSimpleName()} as the + * loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public MeanAbsolutePercentageError(Ops tf, Reduction reduction) { - super(tf, null, reduction); + public MeanAbsolutePercentageError(Reduction reduction) { + super(null, reduction); } /** * Creates a MeanAbsolutePercentageError * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public MeanAbsolutePercentageError(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public MeanAbsolutePercentageError(String name, Reduction reduction) { + super(name, reduction); } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.meanAbsolutePercentageError(getTF(), labels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.meanAbsolutePercentageError(tf, labels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanSquaredError.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanSquaredError.java index f975db55c44..c6898e20f20 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanSquaredError.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanSquaredError.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -32,7 +33,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{1.f, 1.f}, {1.f, 0.f}}); * MeanSquaredError mse = new MeanSquaredError(tf); - * Operand<TFloat32> result = mse.call(labels, predictions); + * Operand<TFloat32> result = mse.call(Ops tf, labels, predictions); * // produces 0.5f * * @@ -40,64 +41,61 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.7f, 0.3f});
    - *    Operand<TFloat32> result = mse.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = mse.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.25f
      * 
    * *

    Using SUM reduction type: * *

    - *    MeanSquaredError mse = new MeanSquaredError(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = mse.call(labels, predictions);
    + *    MeanSquaredError mse = new MeanSquaredError(Reduction.SUM);
    + *    Operand<TFloat32> result = mse.call(Ops tf, labels, predictions);
      *    // produces 1.0f
      * 
    * *

    Using NONE reduction type: * *

    - *    MeanSquaredError mse = new MeanSquaredError(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = mse.call(labels, predictions);
    + *    MeanSquaredError mse = new MeanSquaredError(Reduction.NONE);
    + *    Operand<TFloat32> result = mse.call(Ops tf, labels, predictions);
      *    // produces [0.5f, 0.5f]
      * 
    */ -public class MeanSquaredError extends Loss { +public class MeanSquaredError extends AbstractLoss { /** - * Creates a MeanSquaredError Loss using {@link Class#getSimpleName()} as the loss name and a Loss - * Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a MeanSquaredError AbstractLoss using {@link Class#getSimpleName()} as the loss name + * and a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public MeanSquaredError(Ops tf) { - super(tf); + public MeanSquaredError() { + super(); } /** - * Creates a MeanSquaredError Loss using {@link Class#getSimpleName()} as the loss name + * Creates a MeanSquaredError AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public MeanSquaredError(Ops tf, Reduction reduction) { - super(tf, null, reduction); + public MeanSquaredError(Reduction reduction) { + super(null, reduction); } /** * Creates a MeanSquaredError * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public MeanSquaredError(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public MeanSquaredError(String name, Reduction reduction) { + super(name, reduction); } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.meanSquaredError(getTF(), labels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.meanSquaredError(tf, labels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanSquaredLogarithmicError.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanSquaredLogarithmicError.java index 11b8e157e90..3d325a98a6a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanSquaredLogarithmicError.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/MeanSquaredLogarithmicError.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -32,7 +33,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{1.f, 1.f}, {1.f, 0.f}}); * MeanSquaredLogarithmicError msle = new MeanSquaredLogarithmicError(tf); - * Operand<TFloat32> result = msle.call(labels, predictions); + * Operand<TFloat32> result = msle.call(Ops tf, labels, predictions); * // produces 0.240f * * @@ -40,64 +41,61 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.7f, 0.3f});
    - *    Operand<TFloat32> result = msle.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = msle.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.120f
      * 
    * *

    Using SUM reduction type: * *

    - *    MeanSquaredLogarithmicError msle = new MeanSquaredLogarithmicError(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = msle.call(labels, predictions);
    + *    MeanSquaredLogarithmicError msle = new MeanSquaredLogarithmicError(Reduction.SUM);
    + *    Operand<TFloat32> result = msle.call(Ops tf, labels, predictions);
      *    // produces 0.480f
      * 
    * *

    Using NONE reduction type: * *

    - *    MeanSquaredLogarithmicError msle = new MeanSquaredLogarithmicError(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = msle.call(labels, predictions);
    + *    MeanSquaredLogarithmicError msle = new MeanSquaredLogarithmicError(Reduction.NONE);
    + *    Operand<TFloat32> result = msle.call(Ops tf, labels, predictions);
      *    // produces [0.240f, 0.240f]
      * 
    */ -public class MeanSquaredLogarithmicError extends Loss { +public class MeanSquaredLogarithmicError extends AbstractLoss { /** - * Creates a MeanSquaredError Loss using {@link Class#getSimpleName()} as the loss name and a Loss - * Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a MeanSquaredError AbstractLoss using {@link Class#getSimpleName()} as the loss name + * and a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public MeanSquaredLogarithmicError(Ops tf) { - super(tf); + public MeanSquaredLogarithmicError() { + super(); } /** - * Creates a MeanSquaredError Loss using {@link Class#getSimpleName()} as the loss name + * Creates a MeanSquaredError AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public MeanSquaredLogarithmicError(Ops tf, Reduction reduction) { - super(tf, null, reduction); + public MeanSquaredLogarithmicError(Reduction reduction) { + super(null, reduction); } /** * Creates a MeanSquaredError * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public MeanSquaredLogarithmicError(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public MeanSquaredLogarithmicError(String name, Reduction reduction) { + super(name, reduction); } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.meanSquaredLogarithmicError(getTF(), labels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.meanSquaredLogarithmicError(tf, labels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Poisson.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Poisson.java index 78324acf8a5..a6eb29b7109 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Poisson.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Poisson.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.losses; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -32,7 +33,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{1.f, 1.f}, {0.f, 0.f}}); * Poisson poissonLoss = new Poisson(tf); - * Operand<TFloat32> result = poissonLoss.call(labels, predictions); + * Operand<TFloat32> result = poissonLoss.call(Ops tf, labels, predictions); * // produces 0.5f * * @@ -40,74 +41,71 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.8f, 0.2f});
    - *    Operand<TFloat32> result = poissonLoss.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = poissonLoss.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.4f
      * 
    * *

    Using SUM reduction type: * *

    - *    Poisson poissonLoss = new Poisson(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = poissonLoss.call(labels, predictions);
    + *    Poisson poissonLoss = new Poisson(Reduction.SUM);
    + *    Operand<TFloat32> result = poissonLoss.call(Ops tf, labels, predictions);
      *    // produces 0.999f
      * 
    * *

    Using NONE reduction type: * *

    - *    Poisson poissonLoss = new Poisson(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = poissonLoss.call(labels, predictions);
    + *    Poisson poissonLoss = new Poisson(Reduction.NONE);
    + *    Operand<TFloat32> result = poissonLoss.call(Ops tf, labels, predictions);
      *    // produces [0.999f, 0f]
      * 
    */ -public class Poisson extends Loss { +public class Poisson extends AbstractLoss { /** - * Creates a Poisson Loss using {@link Class#getSimpleName()} as the loss name and a Loss - * Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a Poisson AbstractLoss using {@link Class#getSimpleName()} as the loss name and a + * AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public Poisson(Ops tf) { - this(tf, null, Reduction.AUTO); + public Poisson() { + this(null, Reduction.AUTO); } /** - * Creates a Poisson Loss using a Loss Reduction of {@link Loss#REDUCTION_DEFAULT} + * Creates a Poisson AbstractLoss using a AbstractLoss Reduction of {@link + * AbstractLoss#REDUCTION_DEFAULT} * - * @param tf the TensorFlow Ops * @param name the name of the loss, if null then {@link Class#getSimpleName()} is used. */ - public Poisson(Ops tf, String name) { - this(tf, name, Reduction.AUTO); + public Poisson(String name) { + this(name, Reduction.AUTO); } /** - * Creates a Poisson Loss using {@link Class#getSimpleName()} as the loss name + * Creates a Poisson AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public Poisson(Ops tf, Reduction reduction) { - this(tf, null, reduction); + public Poisson(Reduction reduction) { + this(null, reduction); } /** - * Creates a Poisson Loss + * Creates a Poisson AbstractLoss * - * @param tf the TensorFlow Ops * @param name the name of the loss, if null then {@link Class#getSimpleName()} is used. * @param reduction Type of Reduction to apply to the loss. */ - public Poisson(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public Poisson(String name, Reduction reduction) { + super(name, reduction); } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { - Operand losses = Losses.poisson(getTF(), labels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + + Operand losses = Losses.poisson(tf, labels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Reduction.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Reduction.java index 87ea43c6c3a..e40ec6d6ebb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Reduction.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Reduction.java @@ -15,7 +15,7 @@ package org.tensorflow.framework.losses; /** - * Type of Loss Reduction + * Type of AbstractLoss Reduction * *

    {@link #AUTO} indicates that the reduction option will be determined by the usage context. For * almost all cases this defaults to {@link #SUM_OVER_BATCH_SIZE}. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/SparseCategoricalCrossentropy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/SparseCategoricalCrossentropy.java index d04cc67d5d9..dff77bfc75b 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/SparseCategoricalCrossentropy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/SparseCategoricalCrossentropy.java @@ -14,13 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the crossentropy loss between labels and predictions. * @@ -43,7 +44,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{0.05f, 0.95f, 0f}, {0.1f, 0.8f, 0.1f}}); * SparseCategoricalCrossentropy sparseCCE = new SparseCategoricalCrossentropy(tf); - * Operand<TFloat32> result = sparseCCE.call(labels, predictions); + * Operand<TFloat32> result = sparseCCE.call(Ops tf, labels, predictions); * // produces 1.177f * * @@ -51,27 +52,27 @@ * *

      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {0.3f, 0.7f});
    - *    Operand<TFloat32> result = sparseCCE.call(labels, predictions, sampleWeight);
    + *    Operand<TFloat32> result = sparseCCE.call(Ops tf, labels, predictions, sampleWeight);
      *    // produces 0.814f
      * 
    * *

    Using SUM reduction type: * *

    - *    SparseCategoricalCrossentropy sparseCCE = new SparseCategoricalCrossentropy(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = sparseCCE.call(labels, predictions);
    + *    SparseCategoricalCrossentropy sparseCCE = new SparseCategoricalCrossentropy(Reduction.SUM);
    + *    Operand<TFloat32> result = sparseCCE.call(Ops tf, labels, predictions);
      *    // produces 2.354f
      * 
    * *

    Using NONE reduction type: * *

    - *    SparseCategoricalCrossentropy sparseCCE = new SparseCategoricalCrossentropy(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = sparseCCE.call(labels, predictions);
    + *    SparseCategoricalCrossentropy sparseCCE = new SparseCategoricalCrossentropy(Reduction.NONE);
    + *    Operand<TFloat32> result = sparseCCE.call(Ops tf, labels, predictions);
      *    // produces [0.0513f, 2.303f]
      * 
    */ -public class SparseCategoricalCrossentropy extends Loss { +public class SparseCategoricalCrossentropy extends AbstractLoss { public static final boolean FROM_LOGITS_DEFAULT = false; public static final int AXIS_DEFAULT = -1; @@ -80,89 +81,80 @@ public class SparseCategoricalCrossentropy extends Loss { /** * Creates a SparseCategoricalCrossentropy loss using {@link Class#getSimpleName()} as the loss - * name, a Loss Reduction of {@link Loss#REDUCTION_DEFAULT}, and fromLogits={@link + * name, a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT}, and fromLogits={@link * #FROM_LOGITS_DEFAULT}. - * - * @param tf the TensorFlow Ops */ - public SparseCategoricalCrossentropy(Ops tf) { - this(tf, null, FROM_LOGITS_DEFAULT, REDUCTION_DEFAULT, AXIS_DEFAULT); + public SparseCategoricalCrossentropy() { + this(null, FROM_LOGITS_DEFAULT, REDUCTION_DEFAULT, AXIS_DEFAULT); } /** - * Creates a SparseCategoricalCrossentropy loss using a Loss Reduction of {@link - * Loss#REDUCTION_DEFAULT}, and fromLogits={@link #FROM_LOGITS_DEFAULT}. + * Creates a SparseCategoricalCrossentropy loss using a AbstractLoss Reduction of {@link + * AbstractLoss#REDUCTION_DEFAULT}, and fromLogits={@link #FROM_LOGITS_DEFAULT}. * - * @param tf the TensorFlow Ops * @param name the name of this loss function */ - public SparseCategoricalCrossentropy(Ops tf, String name) { - this(tf, name, FROM_LOGITS_DEFAULT, REDUCTION_DEFAULT, AXIS_DEFAULT); + public SparseCategoricalCrossentropy(String name) { + this(name, FROM_LOGITS_DEFAULT, REDUCTION_DEFAULT, AXIS_DEFAULT); } /** * Creates a SparseCategoricalCrossentropy loss using {@link Class#getSimpleName()} as the loss * name, with Reduction.AUTO and fromLogits={@link #FROM_LOGITS_DEFAULT}. * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to loss. */ - public SparseCategoricalCrossentropy(Ops tf, Reduction reduction) { - this(tf, null, FROM_LOGITS_DEFAULT, reduction, AXIS_DEFAULT); + public SparseCategoricalCrossentropy(Reduction reduction) { + this(null, FROM_LOGITS_DEFAULT, reduction, AXIS_DEFAULT); } /** * Creates a SparseCategoricalCrossentropy loss with Reduction.AUTO and fromLogits={@link * #FROM_LOGITS_DEFAULT}. * - * @param tf the TensorFlow Ops * @param name the name of this loss function * @param reduction Type of Reduction to apply to loss. */ - public SparseCategoricalCrossentropy(Ops tf, String name, Reduction reduction) { - this(tf, name, FROM_LOGITS_DEFAULT, reduction, AXIS_DEFAULT); + public SparseCategoricalCrossentropy(String name, Reduction reduction) { + this(name, FROM_LOGITS_DEFAULT, reduction, AXIS_DEFAULT); } /** - * Creates a SparseCategoricalCrossentropy using a Loss Reduction of {@link - * Loss#REDUCTION_DEFAULT}, and fromLogits={@link #FROM_LOGITS_DEFAULT}. + * Creates a SparseCategoricalCrossentropy using a AbstractLoss Reduction of {@link + * AbstractLoss#REDUCTION_DEFAULT}, and fromLogits={@link #FROM_LOGITS_DEFAULT}. * - * @param tf the TensorFlow Ops * @param name the name of this loss function * @param fromLogits Whether to interpret predictions as a tensor of logit values */ - public SparseCategoricalCrossentropy(Ops tf, String name, boolean fromLogits) { - this(tf, name, fromLogits, REDUCTION_DEFAULT, AXIS_DEFAULT); + public SparseCategoricalCrossentropy(String name, boolean fromLogits) { + this(name, fromLogits, REDUCTION_DEFAULT, AXIS_DEFAULT); } /** * Creates a SparseCategoricalCrossentropy loss using {@link Class#getSimpleName()} as the loss - * name, a Loss Reduction of {@link Loss#REDUCTION_DEFAULT} and fromLogits={@link + * name, a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} and fromLogits={@link * #FROM_LOGITS_DEFAULT}. * - * @param tf the TensorFlow Ops * @param fromLogits Whether to interpret predictions as a tensor of logit values */ - public SparseCategoricalCrossentropy(Ops tf, boolean fromLogits) { - this(tf, null, fromLogits, REDUCTION_DEFAULT, AXIS_DEFAULT); + public SparseCategoricalCrossentropy(boolean fromLogits) { + this(null, fromLogits, REDUCTION_DEFAULT, AXIS_DEFAULT); } /** * Creates a SparseCategoricalCrossentropy loss using {@link Class#getSimpleName()} as the loss * name, * - * @param tf the TensorFlow Ops * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param reduction Type of Reduction to apply to loss. */ - public SparseCategoricalCrossentropy(Ops tf, boolean fromLogits, Reduction reduction) { - this(tf, null, fromLogits, reduction, AXIS_DEFAULT); + public SparseCategoricalCrossentropy(boolean fromLogits, Reduction reduction) { + this(null, fromLogits, reduction, AXIS_DEFAULT); } /** * Creates a SparseCategoricalCrossentropy * - * @param tf the TensorFlow Ops * @param name the name of this loss function * @param fromLogits Whether to interpret predictions as a tensor of logit values * @param reduction Type of Reduction to apply to loss. @@ -170,8 +162,8 @@ public SparseCategoricalCrossentropy(Ops tf, boolean fromLogits, Reduction reduc * and axis=1 corresponds to data format 'Channels First'. */ public SparseCategoricalCrossentropy( - Ops tf, String name, boolean fromLogits, Reduction reduction, int axis) { - super(tf, name, reduction); + String name, boolean fromLogits, Reduction reduction, int axis) { + super(name, reduction); this.fromLogits = fromLogits; this.axis = axis; } @@ -184,6 +176,7 @@ public SparseCategoricalCrossentropy( * range o [0. to 1.]. In Eager Mode, this call will throw {@link IllegalArgumentException}, if * the predictions values are outside the range o [0. to 1.] * + * @param tf the TensorFlow Ops * @param labels the truth values or labels * @param predictions the predictions, values must be in the range [0. to 1.] inclusive. * @param sampleWeights Optional SampleWeights acts as a coefficient for the loss. If a scalar is @@ -199,23 +192,24 @@ public SparseCategoricalCrossentropy( */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + Operand lPredictions; if (!fromLogits) { // add predictions range check for 0 - 1 lPredictions = LossesHelper.rangeCheck( - getTF(), + tf, "predictions range check [0-1]", predictions, - cast(getTF(), getTF().constant(0), predictions.type()), - cast(getTF(), getTF().constant(1), predictions.type())); + cast(tf, tf.constant(0), predictions.type()), + cast(tf, tf.constant(1), predictions.type())); } else { lPredictions = predictions; } Operand losses = - Losses.sparseCategoricalCrossentropy(getTF(), labels, lPredictions, fromLogits, axis); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + Losses.sparseCategoricalCrossentropy(tf, labels, lPredictions, fromLogits, axis); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/SquaredHinge.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/SquaredHinge.java index dadbdb3b95e..2959e541892 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/SquaredHinge.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/SquaredHinge.java @@ -14,13 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the squared hinge loss between labels and predictions. * @@ -37,7 +38,7 @@ * Operand<TFloat32> predictions = * tf.constant(new float[][] {{0.6f, 0.4f}, {0.4f, 0.6f}}); * SquaredHinge squaredHinge = new SquaredHinge(tf); - * Operand<TFloat32> result = squaredHinge.call(labels, predictions); + * Operand<TFloat32> result = squaredHinge.call(Ops tf, labels, predictions); * // produces 1.86f * * @@ -45,7 +46,7 @@ * *
      *    Operand<TFloat32> sampleWeight = tf.constant(new float[] {1.f, 0.f});
    - *    Operand<TFloat32> result = squaredHinge.call(labels, predictions,
    + *    Operand<TFloat32> result = squaredHinge.call(Ops tf, labels, predictions,
      *                                                  sampleWeight);
      *    // produces 0.73f
      * 
    @@ -53,50 +54,46 @@ *

    Using SUM reduction type: * *

    - *    SquaredHinge squaredHinge = new SquaredHinge(tf, Reduction.SUM);
    - *    Operand<TFloat32> result = squaredHinge.call(labels, predictions);
    + *    SquaredHinge squaredHinge = new SquaredHinge(Reduction.SUM);
    + *    Operand<TFloat32> result = squaredHinge.call(Ops tf, labels, predictions);
      *    // produces 3.72f
      * 
    * *

    Using NONE reduction type: * *

    - *    SquaredHinge squaredHinge = new SquaredHinge(tf, Reduction.NONE);
    - *    Operand<TFloat32> result = squaredHinge.call(labels, predictions);
    + *    SquaredHinge squaredHinge = new SquaredHinge(Reduction.NONE);
    + *    Operand<TFloat32> result = squaredHinge.call(Ops tf, labels, predictions);
      *    // produces [1.46f, 2.26f]
      * 
    */ -public class SquaredHinge extends Loss { +public class SquaredHinge extends AbstractLoss { /** - * Creates a Squared Hinge Loss using {@link Class#getSimpleName()} as the loss name and a Loss - * Reduction of {@link Loss#REDUCTION_DEFAULT} - * - * @param tf the TensorFlow Ops + * Creates a Squared Hinge AbstractLoss using {@link Class#getSimpleName()} as the loss name and a + * AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} */ - public SquaredHinge(Ops tf) { - super(tf); + public SquaredHinge() { + super(); } /** - * Creates a Squared Hinge Loss using {@link Class#getSimpleName()} as the loss name + * Creates a Squared Hinge AbstractLoss using {@link Class#getSimpleName()} as the loss name * - * @param tf the TensorFlow Ops * @param reduction Type of Reduction to apply to the loss. */ - public SquaredHinge(Ops tf, Reduction reduction) { - super(tf, null, reduction); + public SquaredHinge(Reduction reduction) { + super(null, reduction); } /** * Creates a Squared Hinge * - * @param tf the TensorFlow Ops * @param name the name of the loss * @param reduction Type of Reduction to apply to the loss. */ - public SquaredHinge(Ops tf, String name, Reduction reduction) { - super(tf, name, reduction); + public SquaredHinge(String name, Reduction reduction) { + super(name, reduction); } /** @@ -123,19 +120,17 @@ public SquaredHinge(Ops tf, String name, Reduction reduction) { */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { + @SuppressWarnings("unchecked") - Operand tLabels = - predictions.type() == labels.type() - ? (Operand) labels - : cast(tf, labels, predictions.type()); + Operand tLabels = cast(tf, labels, predictions.type()); tLabels = LossesHelper.valueCheck( - getTF(), + tf, "labels value check [-1, 0, 1]", tLabels, - cast(getTF(), getTF().constant(new int[] {-1, 0, 1}), predictions.type())); - Operand losses = Losses.squaredHinge(getTF(), tLabels, predictions); - return LossesHelper.computeWeightedLoss(getTF(), losses, getReduction(), sampleWeights); + cast(tf, tf.constant(new int[] {-1, 0, 1}), predictions.type())); + Operand losses = Losses.squaredHinge(tf, tLabels, predictions); + return LossesHelper.computeWeightedLoss(tf, losses, getReduction(), sampleWeights); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/AbstractLoss.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/AbstractLoss.java new file mode 100644 index 00000000000..9534f6fe3ad --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/AbstractLoss.java @@ -0,0 +1,89 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.losses.impl; + +import org.tensorflow.Operand; +import org.tensorflow.framework.losses.Loss; +import org.tensorflow.framework.losses.Reduction; +import org.tensorflow.op.Ops; +import org.tensorflow.types.family.TNumber; + +public abstract class AbstractLoss implements Loss { + public static final Reduction REDUCTION_DEFAULT = Reduction.AUTO; + + protected final Reduction reduction; + private final String name; + + /** + * Creates a AbstractLoss using {@link Class#getSimpleName()} as the name and a AbstractLoss + * Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} + */ + protected AbstractLoss() { + this(null, Reduction.AUTO); + } + + /** + * Creates a AbstractLoss using a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} + * + * @param name the name of this AbstractLoss, if null the name will be {@link + * Class#getSimpleName()}. + */ + protected AbstractLoss(String name) { + this(name, Reduction.AUTO); + } + + /** + * Creates a AbstractLoss + * + * @param name the name of this loss, if null the name will be {@link Class#getSimpleName()}. + * @param reduction Type of Reduction to apply to the loss. + */ + protected AbstractLoss(String name, Reduction reduction) { + this.name = name == null ? getClass().getSimpleName() : name; + this.reduction = reduction; + } + + /** + * Calculates the loss + * + * @param tf the TensorFlow Ops + * @param labels the truth values or labels + * @param predictions the predictions + * @param The data type of the predictions and loss. + * @return the loss + */ + public Operand call( + Ops tf, Operand labels, Operand predictions) { + return call(tf, labels, predictions, null); + } + + /** + * Gets the loss reduction + * + * @return the loss reduction + */ + public Reduction getReduction() { + return reduction; + } + + /** + * Gets the name for this loss + * + * @return the name for this loss + */ + public String getName() { + return name; + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/AUC.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/AUC.java index bc5047d5855..0ba94798c19 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/AUC.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/AUC.java @@ -14,6 +14,15 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; import org.tensorflow.framework.metrics.impl.ConfusionMatrixEnum; @@ -27,39 +36,29 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Metric that computes the approximate AUC (Area under the curve) via a Riemann sum. * - *

    This metric creates four local variables, {@code truePositives}, {@code trueNegatives - * }, {@code falsePositives} and {@code falseNegatives} that are used to compute the - * AUC. To discretize the AUC curve, a linearly spaced set of thresholds is used to compute pairs of - * recall and precision values. The area under the ROC-curve is therefore computed using the height - * of the recall values by the false positive rate, while the area under the PR-curve is the - * computed using the height of the precision values by the recall. + *

    This metric creates four local variables, {@code truePositives}, {@code trueNegatives }, + * {@code falsePositives} and {@code falseNegatives} that are used to compute the AUC. To discretize + * the AUC curve, a linearly spaced set of thresholds is used to compute pairs of recall and + * precision values. The area under the ROC-curve is therefore computed using the height of the + * recall values by the false positive rate, while the area under the PR-curve is the computed using + * the height of the precision values by the recall. * - *

    This value is ultimately returned as {@code auc}, an idempotent operation that computes - * the area under a discretized curve of precision versus recall values (computed using the + *

    This value is ultimately returned as {@code auc}, an idempotent operation that computes the + * area under a discretized curve of precision versus recall values (computed using the * aforementioned variables). The {@code numThresholds} variable controls the degree of * discretization with larger numbers of thresholds more closely approximating the true AUC. The - * quality of the approximation may vary dramatically depending on {@code numThresholds}. The - * {@code thresholds} parameter can be used to manually specify thresholds which split the - * predictions more evenly. + * quality of the approximation may vary dramatically depending on {@code numThresholds}. The {@code + * thresholds} parameter can be used to manually specify thresholds which split the predictions more + * evenly. * - *

    For best results, {@code predictions} should be distributed approximately uniformly in - * the range [0, 1] and not peaked around 0 or 1. The quality of the AUC approximation may be poor - * if this is not the case. Setting {@code summationMethod} to {@code minoring} or {@code - * majoring} can help quantify the error in the approximation by providing lower or upper - * bound estimate of the AUC. + *

    For best results, {@code predictions} should be distributed approximately uniformly in the + * range [0, 1] and not peaked around 0 or 1. The quality of the AUC approximation may be poor if + * this is not the case. Setting {@code summationMethod} to {@code minoring} or {@code majoring} can + * help quantify the error in the approximation by providing lower or upper bound estimate of the + * AUC. * *

    Usage:
    * @@ -155,8 +154,8 @@ public class AUC extends Metric { /** * Creates an AUC (Area under the curve) metric using {@link #DEFAULT_NAME} for the metric name, * {@link #DEFAULT_NUM_THRESHOLDS} for the numThresholds, {@link AUCCurve#ROC} for the curve type, - * {@link AUCSummationMethod#INTERPOLATION} for the summation method, {@code null} for - * thresholds, {@code false} for multiLabel, and {@code null} for labelWeights. + * {@link AUCSummationMethod#INTERPOLATION} for the summation method, {@code null} for thresholds, + * {@code false} for multiLabel, and {@code null} for labelWeights. * * @param tf The TensorFlow Ops * @param seed the seed for random number generation. An initializer created with a given seed @@ -180,8 +179,8 @@ public AUC(Ops tf, long seed, Class type) { /** * Creates an AUC (Area under the curve) metric using {@link #DEFAULT_NUM_THRESHOLDS} for the * numThresholds, {@link AUCCurve#ROC} for the curve type, {@link - * AUCSummationMethod#INTERPOLATION} for the summation method, {@code null} for thresholds, - * {@code false} for multiLabel, and {@code null} for labelWeights. + * AUCSummationMethod#INTERPOLATION} for the summation method, {@code null} for thresholds, {@code + * false} for multiLabel, and {@code null} for labelWeights. * * @param tf The TensorFlow Ops * @param name the name of the metric, if {@code null} defaults to {@link #DEFAULT_NAME} @@ -206,8 +205,8 @@ public AUC(Ops tf, String name, long seed, Class type) { /** * Creates an AUC (Area under the curve) metric using {@link #DEFAULT_NAME} for the metric name, * {@link AUCCurve#ROC} for the curve type, {@link AUCSummationMethod#INTERPOLATION} for the - * summation method, {@code null} for thresholds, {@code false} for multiLabel, and - * {@code null} for labelWeights. + * summation method, {@code null} for thresholds, {@code false} for multiLabel, and {@code null} + * for labelWeights. * * @param tf The TensorFlow Ops * @param numThresholds the number of thresholds to use when discretizing the roc curve. Values @@ -233,8 +232,8 @@ public AUC(Ops tf, int numThresholds, long seed, Class type) { /** * Creates an AUC (Area under the curve) metric using {@link #DEFAULT_NAME} for the metric name, * {@link AUCCurve#ROC} for the curve type, {@link AUCSummationMethod#INTERPOLATION} for the - * summation method, {@code null} for numThresholds, {@code false} for multiLabel, and - * {@code null} for labelWeights. + * summation method, {@code null} for numThresholds, {@code false} for multiLabel, and {@code + * null} for labelWeights. * * @param tf The TensorFlow Ops * @param thresholds Optional values to use as the thresholds for discretizing the curve. If set, @@ -259,8 +258,8 @@ public AUC(Ops tf, float[] thresholds, long seed, Class type) { /** * Creates an AUC (Area under the curve) metric. using {@link AUCCurve#ROC} for the curve type, - * {@link AUCSummationMethod#INTERPOLATION} for the summation method, {@code null} for - * thresholds, {@code false} for multiLabel, and {@code null} for labelWeights. + * {@link AUCSummationMethod#INTERPOLATION} for the summation method, {@code null} for thresholds, + * {@code false} for multiLabel, and {@code null} for labelWeights. * * @param tf The TensorFlow Ops * @param name the name of the metric, if {@code null} defaults to {@link #DEFAULT_NAME} @@ -314,8 +313,8 @@ public AUC(Ops tf, String name, float[] thresholds, long seed, Class type) { /** * Creates an AUC (Area under the curve) metric using {@link AUCSummationMethod#INTERPOLATION} for - * the summation method, {@code null} for thresholds, {@code false} for multiLabel, and - * {@code null} for labelWeights. + * the summation method, {@code null} for thresholds, {@code false} for multiLabel, and {@code + * null} for labelWeights. * * @param tf The TensorFlow Ops * @param name the name of the metric, if {@code null} defaults to {@link #DEFAULT_NAME} @@ -372,8 +371,8 @@ public AUC(Ops tf, String name, float[] thresholds, AUCCurve curve, long seed, C /** * Creates an AUC (Area under the curve) metric using {@link #DEFAULT_NAME} for the metric name, - * {@link AUCSummationMethod#INTERPOLATION} for the summation method, {@code null} for - * thresholds, {@code false} for multiLabel, and {@code null} for labelWeights. + * {@link AUCSummationMethod#INTERPOLATION} for the summation method, {@code null} for thresholds, + * {@code false} for multiLabel, and {@code null} for labelWeights. * * @param tf The TensorFlow Ops * @param numThresholds the number of thresholds to use when discretizing the roc curve. Values @@ -400,8 +399,8 @@ public AUC(Ops tf, int numThresholds, AUCCurve curve, long seed, Class type) /** * Creates an AUC (Area under the curve) metric using {@code null} for numThresholds, {@link - * AUCSummationMethod#INTERPOLATION} for the summation method, {@code false} for multiLabel, - * and {@code null} for labelWeights. + * AUCSummationMethod#INTERPOLATION} for the summation method, {@code false} for multiLabel, and + * {@code null} for labelWeights. * * @param tf The TensorFlow Ops * @param thresholds Optional values to use as the thresholds for discretizing the curve. If set, @@ -428,8 +427,7 @@ public AUC(Ops tf, float[] thresholds, AUCCurve curve, long seed, Class type) /** * Creates an AUC (Area under the curve) metric. using {@link #DEFAULT_NAME} for the metric name,, - * {@code null} for thresholds, {@code false} for multiLabel, and {@code null} for - * labelWeights. + * {@code null} for thresholds, {@code false} for multiLabel, and {@code null} for labelWeights. * * @param tf The TensorFlow Ops * @param numThresholds the number of thresholds to use when discretizing the roc curve. Values @@ -453,8 +451,8 @@ public AUC( /** * Creates an AUC (Area under the curve) metric using {@link #DEFAULT_NAME} for the metric name, - * {@code null} for numThresholds, {@code false} for multiLabel, and {@code null} - * for labelWeights. + * {@code null} for numThresholds, {@code false} for multiLabel, and {@code null} for + * labelWeights. * * @param tf The TensorFlow Ops * @param thresholds Optional values to use as the thresholds for discretizing the curve. If set, @@ -487,8 +485,8 @@ public AUC( } /** - * Creates an AUC (Area under the curve) metric. using {@code null} for thresholds, {@code - * false} for multiLabel, and {@code null} for labelWeights. + * Creates an AUC (Area under the curve) metric. using {@code null} for thresholds, {@code false} + * for multiLabel, and {@code null} for labelWeights. * * @param tf The TensorFlow Ops * @param name the name of the metric, if {@code null} defaults to {@link #DEFAULT_NAME} @@ -513,8 +511,8 @@ public AUC( } /** - * Creates an AUC (Area under the curve) metric. using {@code null} for the numThresholds, - * {@code false} for multiLabel, and {@code null} for labelWeights. + * Creates an AUC (Area under the curve) metric. using {@code null} for the numThresholds, {@code + * false} for multiLabel, and {@code null} for labelWeights. * * @param tf The TensorFlow Ops * @param name the name of the metric, if {@code null} defaults to {@link #DEFAULT_NAME} @@ -560,16 +558,16 @@ public AUC( * @param summationMethod Specifies the Riemann summation method used * @param thresholds Optional values to use as the thresholds for discretizing the curve. If set, * the numThresholds parameter is ignored. Values should be in [0, 1]. This method - * automatically brackets the provided {@code thresholds} with a (-{@link #EPSILON}) - * below and a (1 + {@link #EPSILON}) above. + * automatically brackets the provided {@code thresholds} with a (-{@link #EPSILON}) below and + * a (1 + {@link #EPSILON}) above. * @param multiLabel boolean indicating whether multilabel data should be treated as such, wherein * AUC is computed separately for each label and then averaged across labels, or (when false) * if the data should be flattened into a single label before AUC computation. In the latter * case, when multilabel data is passed to AUC, each label-prediction pair is treated as an * individual data point. Should be set to {@code false} for multi-class data. * @param labelWeights non-negative weights used to compute AUCs for multilabel data. When {@code - * multiLabel} is true, the weights are applied to the individual label AUCs when they - * are averaged to produce the multi-label AUC. When it's false, they are used to weight the + * multiLabel} is true, the weights are applied to the individual label AUCs when they are + * averaged to produce the multi-label AUC. When it's false, they are used to weight the * individual label predictions in computing the confusion matrix on the flattened data. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. @@ -684,8 +682,8 @@ private Map> build(Shape shape) { } // Create metric variables - Zeros zeros = new Zeros<>(tf); - Operand zero = zeros.call(tf.constant(variableShape), type); + Zeros zeros = new Zeros<>(); + Operand zero = zeros.call(tf, tf.constant(variableShape), type); if (truePositives == null) { truePositives = tf.withName(getTruePositivesName()).variable(zero); initializers.put(ConfusionMatrixEnum.TRUE_POSITIVES, tf.assign(truePositives, zero)); @@ -715,8 +713,8 @@ private Map> build(Shape shape) { * * @param labels shape (N, Cx, L1?) where N is the number of examples, Cx is zero or more class * dimensions, and L1 is a potential extra dimension of size 1 that would be squeezed. Will be - * cast to {@code }. If {@link #multiLabel} or if {@link #labelWeights} {@code != null - * }, then Cx must be a single dimension. + * cast to {@code }. If {@link #multiLabel} or if {@link #labelWeights} {@code != null }, + * then Cx must be a single dimension. * @param predictions the predictions shape (N, Cx, P1?). Will be cast to {@code T}. * @param sampleWeights sample weights to be applied to values, may be null. Will be cast to * {@code }. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Accuracy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Accuracy.java index 516d6c91ba6..14f45020739 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Accuracy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Accuracy.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.losses.impl.LossTuple; import org.tensorflow.framework.metrics.impl.LossMetric; @@ -23,18 +25,14 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Metric that calculates how often predictions equals labels. * *

    This metric creates two local variables, total and count that are used to compute the - * frequency with which {@code predictions} matches {@code labels}. This frequency is - * ultimately returned as binary accuracy: an idempotent operation that simply divides total by - * count. + * frequency with which {@code predictions} matches {@code labels}. This frequency is ultimately + * returned as binary accuracy: an idempotent operation that simply divides total by count. * - *

    If sampleWeights is {@code null}, weights default to 1. Use sampleWeights of 0 to mask - * values. + *

    If sampleWeights is {@code null}, weights default to 1. Use sampleWeights of 0 to mask values. * * @param The data type for the metric result */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/BinaryAccuracy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/BinaryAccuracy.java index 0e41699e165..c27bf1b2acf 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/BinaryAccuracy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/BinaryAccuracy.java @@ -14,24 +14,22 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.metrics.impl.LossMetric; import org.tensorflow.framework.metrics.impl.MeanMetricWrapper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Metric that calculates how often predictions matches binary labels. * *

    This metric creates two local variables, total and count that are used to compute the - * frequency with which {@code predictions} matches {@code labels}. This frequency is - * ultimately returned as binary accuracy: an idempotent operation that simply divides total by - * count. + * frequency with which {@code predictions} matches {@code labels}. This frequency is ultimately + * returned as binary accuracy: an idempotent operation that simply divides total by count. * - *

    If sampleWeights is {@code null}, weights default to 1. Use sampleWeights of 0 to mask - * values. + *

    If sampleWeights is {@code null}, weights default to 1. Use sampleWeights of 0 to mask values. * * @param The data type for the metric result */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/CategoricalAccuracy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/CategoricalAccuracy.java index dece2d1cd50..70dfebc508d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/CategoricalAccuracy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/CategoricalAccuracy.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.metrics.impl.LossMetric; import org.tensorflow.framework.metrics.impl.MeanMetricWrapper; @@ -22,23 +24,20 @@ import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Metric that calculates how often predictions matches one-hot labels. * - *

    You can provide {@code logits} of classes as {@code predictions}, since argmax of - * {@code logits} and probabilities are same. + *

    You can provide {@code logits} of classes as {@code predictions}, since argmax of {@code + * logits} and probabilities are same. * - *

    This metric creates two local variables, {@code total} and {@code count} that are - * used to compute the frequency with which {@code predictions} matches {@code labels}. - * This frequency is ultimately returned as categorical accuracy: an idempotent operation that - * simply divides total by count. + *

    This metric creates two local variables, {@code total} and {@code count} that are used to + * compute the frequency with which {@code predictions} matches {@code labels}. This frequency is + * ultimately returned as categorical accuracy: an idempotent operation that simply divides total by + * count. * - *

    {@code predictions} and {@code labels} should be passed in as vectors of - * probabilities, rather than as labels. If necessary, use {@link - * org.tensorflow.op.Ops#oneHot(Operand, Operand, Operand, Operand, OneHot.Options...)} to expand - * {@code labels} as a vector. + *

    {@code predictions} and {@code labels} should be passed in as vectors of probabilities, rather + * than as labels. If necessary, use {@link org.tensorflow.op.Ops#oneHot(Operand, Operand, Operand, + * Operand, OneHot.Options...)} to expand {@code labels} as a vector. * *

    If sample_weight is None, weights default to 1. Use sample_weight of 0 to mask values. * diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/CategoricalCrossentropy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/CategoricalCrossentropy.java index 58aa51f664c..fa7c1a1a626 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/CategoricalCrossentropy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/CategoricalCrossentropy.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.losses.Losses; import org.tensorflow.framework.metrics.impl.LossMetric; @@ -21,16 +23,13 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * A Metric that computes the categorical cross-entropy loss between true labels and predicted * labels. * *

    This is the crossentropy metric class to be used when there are multiple label classes (2 or * more). The labels should be given as a one_hot representation. eg., When labels values are {@code - * [2, 0, 1]}, the labels Operand contains = {@code [[0, 0, 1], [1, 0, 0], [0, 1, 0]] - * }. + * [2, 0, 1]}, the labels Operand contains = {@code [[0, 0, 1], [1, 0, 0], [0, 1, 0]] }. * * @param The data type for the metric result */ @@ -52,9 +51,9 @@ public class CategoricalCrossentropy extends MeanMetricWrappe * @param fromLogits Whether to interpret predictions as a tensor of logit values oras opposed to * a probability distribution. * @param labelSmoothing value used to smooth labels, When > 0, label values are smoothed, - * meaning the confidence on label values are relaxed. e.g. {@code labelSmoothing=0.2} - * means that we will use a value of {@code 0.1} for label {@code 0} and {@code 0.9 - * } for label {@code 1} + * meaning the confidence on label values are relaxed. e.g. {@code labelSmoothing=0.2} means + * that we will use a value of {@code 0.1} for label {@code 0} and {@code 0.9 } for label + * {@code 1} * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the type for the variables and result @@ -73,13 +72,12 @@ public CategoricalCrossentropy( * @param fromLogits Whether to interpret predictions as a tensor of logit values as opposed to a * probability distribution. * @param labelSmoothing value used to smooth labels, When > 0, label values are smoothed, - * meaning the confidence on label values are relaxed. e.g. {@code labelSmoothing=0.2} - * means that we will use a value of {@code 0.1} for label {@code 0} and {@code 0.9 - * } for label {@code 1} + * meaning the confidence on label values are relaxed. e.g. {@code labelSmoothing=0.2} means + * that we will use a value of {@code 0.1} for label {@code 0} and {@code 0.9 } for label + * {@code 1} * @param axis Int specifying the channels axis. {@code axis={@link Losses#CHANNELS_LAST}} - * corresponds to data format {@code channels_last}, and {@code - * axis={@link Losses#CHANNELS_FIRST}} corresponds to data format {@code - * channels_first}. + * corresponds to data format {@code channels_last}, and {@code axis={@link + * Losses#CHANNELS_FIRST}} corresponds to data format {@code channels_first}. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the type for the variables and result diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/FalseNegatives.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/FalseNegatives.java index 3db7fffc2e9..9f957ee6c17 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/FalseNegatives.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/FalseNegatives.java @@ -22,12 +22,12 @@ /** * Metric that calculates the number of false negatives. * - *

    If {@code sampleWeights} is given, calculates the sum of the weights of false negatives. - * This metric creates one local variable, {@code accumulator} that is used to keep track of - * the number of false negatives. + *

    If {@code sampleWeights} is given, calculates the sum of the weights of false negatives. This + * metric creates one local variable, {@code accumulator} that is used to keep track of the number + * of false negatives. * - *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use {@code - * sampleWeights} of 0 to mask values. + *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use {@code sampleWeights} of 0 + * to mask values. * * @param The data type for the metric result */ @@ -50,10 +50,10 @@ public FalseNegatives(Ops tf, long seed, Class type) { * Creates a FalseNegatives metric, using {@link Class#getSimpleName()} for the metric name * * @param tf the TensorFlow Ops - * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -66,10 +66,10 @@ public FalseNegatives(Ops tf, float threshold, long seed, Class type) { * Creates a FalseNegatives metric, using {@link Class#getSimpleName()} for the metric name * * @param tf the TensorFlow Ops - * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -96,10 +96,10 @@ public FalseNegatives(Ops tf, String name, long seed, Class type) { * * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used - * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -113,10 +113,10 @@ public FalseNegatives(Ops tf, String name, float threshold, long seed, Class * * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used - * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/FalsePositives.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/FalsePositives.java index 551529b6179..a3d585dea0f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/FalsePositives.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/FalsePositives.java @@ -22,12 +22,12 @@ /** * Metric that calculates the number of false positives. * - *

    If {@code sampleWeights} is given, calculates the sum of the weights of false positives. - * This metric creates one local variable, {@code accumulator} that is used to keep track of - * the number of false positives. + *

    If {@code sampleWeights} is given, calculates the sum of the weights of false positives. This + * metric creates one local variable, {@code accumulator} that is used to keep track of the number + * of false positives. * - *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use {@code - * sampleWeights} of 0 to mask values. + *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use {@code sampleWeights} of 0 + * to mask values. * * @param The data type for the metric result */ @@ -50,10 +50,10 @@ public FalsePositives(Ops tf, long seed, Class type) { * Creates a FalsePositives metric, using {@link Class#getSimpleName()} for the metric name * * @param tf the TensorFlow Ops - * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -66,10 +66,10 @@ public FalsePositives(Ops tf, float threshold, long seed, Class type) { * Creates a FalsePositives metric, using {@link Class#getSimpleName()} for the metric name * * @param tf the TensorFlow Ops - * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -96,10 +96,10 @@ public FalsePositives(Ops tf, String name, long seed, Class type) { * * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used - * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -113,10 +113,10 @@ public FalsePositives(Ops tf, String name, float threshold, long seed, Class * * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used - * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java index 22baab3d6cb..00ae3727249 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java @@ -14,6 +14,11 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.Collections; +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; import org.tensorflow.framework.metrics.impl.MetricsHelper; @@ -24,12 +29,6 @@ import org.tensorflow.op.core.Variable; import org.tensorflow.types.family.TNumber; -import java.util.Collections; -import java.util.List; - -import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the mean Intersection-Over-Union metric. * @@ -93,11 +92,15 @@ private void init() { Shape variableShape = Shape.of(numClasses, numClasses); if (totalConfusionMatrix == null) { - Zeros zeros = new Zeros<>(getTF()); + Zeros zeros = new Zeros<>(); totalConfusionMatrix = - getTF().withName(totalCMName).variable(zeros.call(getTF().constant(variableShape), type)); + getTF() + .withName(totalCMName) + .variable(zeros.call(getTF(), getTF().constant(variableShape), type)); initializer = - getTF().assign(totalConfusionMatrix, zeros.call(getTF().constant(variableShape), type)); + getTF() + .assign( + totalConfusionMatrix, zeros.call(getTF(), getTF().constant(variableShape), type)); } } @@ -124,8 +127,8 @@ public Assign getInitializer() { * @param sampleWeights Optional weighting of each example. Defaults to 1, if null. Rank is either * 0, or the same rank as labels, and must be broadcastable to labels. * @return the Operands that updates totalConfusionMatrix variable - * @throws IllegalArgumentException if the weights rank is not 0, and weights rank @{code !=} labels rank, - * and if the predictions size is not equal to the labels size + * @throws IllegalArgumentException if the weights rank is not 0, and weights rank @{code !=} + * labels rank, and if the predictions size is not equal to the labels size */ @Override public List updateStateList( diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanRelativeError.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanRelativeError.java index acf28f5b2cc..915d281e44b 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanRelativeError.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanRelativeError.java @@ -14,6 +14,9 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.framework.losses.impl.LossTuple; import org.tensorflow.framework.losses.impl.LossesHelper; @@ -21,20 +24,15 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import java.util.List; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the mean relative error by normalizing with the given values. * - *

    This metric creates two local variables, {@code total} and {@code count} that are - * used to compute the mean relative error. This is weighted by {@code sampleWeight}, and it is - * ultimately returned as mean relative error: an idempotent operation that simply divides total by - * count. + *

    This metric creates two local variables, {@code total} and {@code count} that are used to + * compute the mean relative error. This is weighted by {@code sampleWeight}, and it is ultimately + * returned as mean relative error: an idempotent operation that simply divides total by count. * - *

    If {@code sampleWeight} is {@code null}, weights default to 1. Use {@code sampleWeight} - * of 0 to mask values. + *

    If {@code sampleWeight} is {@code null}, weights default to 1. Use {@code sampleWeight} of 0 + * to mask values. * * @param The data type for the metric result */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanTensor.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanTensor.java index d88d7a4c1b4..be09e7dd3f6 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanTensor.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanTensor.java @@ -14,6 +14,10 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.ArrayList; +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; import org.tensorflow.framework.losses.impl.LossTuple; @@ -26,11 +30,6 @@ import org.tensorflow.op.core.Variable; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; -import java.util.List; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Metric that computes the element-wise (weighted) mean of the given tensors. * @@ -85,8 +84,8 @@ public MeanTensor(Ops tf, String name, long seed, Class type) { private boolean init(Shape shape) { if (!initialized) { this.shape = shape; - Zeros zeros = new Zeros<>(getTF()); - Operand zero = zeros.call(getTF().constant(shape), type); + Zeros zeros = new Zeros<>(); + Operand zero = zeros.call(getTF(), getTF().constant(shape), type); if (total == null) { total = getTF().withName(totalName).variable(zero); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Precision.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Precision.java index 3812e799b75..f978c0e20da 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Precision.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Precision.java @@ -14,6 +14,13 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; import org.tensorflow.framework.metrics.impl.ConfusionMatrixEnum; @@ -25,33 +32,25 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the precision of the predictions with respect to the labels. * - *

    The metric creates two local variables, {@code truePositives} and {@code falsePositives - * } that are used to compute the precision. This value is ultimately returned as precision, - * an idempotent operation that simply divides {@code truePositives} by the sum of {@code - * truePositives} and {@code falsePositives}. + *

    The metric creates two local variables, {@code truePositives} and {@code falsePositives } that + * are used to compute the precision. This value is ultimately returned as precision, an idempotent + * operation that simply divides {@code truePositives} by the sum of {@code truePositives} and + * {@code falsePositives}. * - *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use sampleWeights of - * 0 to mask values. + *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use sampleWeights of 0 to mask + * values. * - *

    If {@code topK} is set, the metric calculates precision as how often on average a class - * among the top-k classes with the highest predicted values of a batch entry is correct and can be - * found in the label for that entry. + *

    If {@code topK} is set, the metric calculates precision as how often on average a class among + * the top-k classes with the highest predicted values of a batch entry is correct and can be found + * in the label for that entry. * *

    If {@code classId} is specified, the metric calculates precision by considering only the - * entries in the batch for which {@code classId} is above the {@code thresholds} and/or - * in the top-k highest predictions, and computing the fraction of them for which {@code classId - * } is indeed a correct label. + * entries in the batch for which {@code classId} is above the {@code thresholds} and/or in the + * top-k highest predictions, and computing the fraction of them for which {@code classId } is + * indeed a correct label. * * @param The data type for the metric result */ @@ -103,10 +102,9 @@ public Precision(Ops tf, String name, long seed, Class type) { * values. * * @param tf the TensorFlow Ops - * @param threshold Optional threshold value in the range {@code [0, 1]}. A threshold is - * compared with prediction values to determine the truth value of predictions (i.e., above - * the threshold is true, below is false). One metric value is generated for each threshold - * value. + * @param threshold Optional threshold value in the range {@code [0, 1]}. A threshold is compared + * with prediction values to determine the truth value of predictions (i.e., above the + * threshold is true, below is false). One metric value is generated for each threshold value. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -138,10 +136,9 @@ public Precision(Ops tf, float[] thresholds, long seed, Class type) { * @param tf the TensorFlow Ops * @param name name of the metric instance. If null, name defaults to {@link * Class#getSimpleName()}. - * @param threshold Optional threshold value in the range {@code [0, 1]}. A threshold is - * compared with prediction values to determine the truth value of predictions (i.e., above - * the threshold is true, below is false). One metric value is generated for each threshold - * value. + * @param threshold Optional threshold value in the range {@code [0, 1]}. A threshold is compared + * with prediction values to determine the truth value of predictions (i.e., above the + * threshold is true, below is false). One metric value is generated for each threshold value. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -172,10 +169,9 @@ public Precision(Ops tf, String name, float[] thresholds, long seed, Class ty * Creates a Precision Metric with a name of {@link Class#getSimpleName()} * * @param tf the TensorFlow Ops - * @param threshold Optional threshold value in the range {@code [0, 1]}. A threshold is - * compared with prediction values to determine the truth value of predictions (i.e., above - * the threshold is true, below is false). One metric value is generated for each threshold - * value. + * @param threshold Optional threshold value in the range {@code [0, 1]}. A threshold is compared + * with prediction values to determine the truth value of predictions (i.e., above the + * threshold is true, below is false). One metric value is generated for each threshold value. * @param topK An optional value specifying the top-k predictions to consider when calculating * precision. * @param classId Optional Integer class ID for which we want binary metrics. This must be in the @@ -216,10 +212,9 @@ public Precision( * @param tf the TensorFlow Ops * @param name name of the metric instance. If null, name defaults to {@link * Class#getSimpleName()}. - * @param threshold Optional threshold value in the range {@code [0, 1]}. A threshold is - * compared with prediction values to determine the truth value of predictions (i.e., above - * the threshold is true, below is false). One metric value is generated for each threshold - * value. + * @param threshold Optional threshold value in the range {@code [0, 1]}. A threshold is compared + * with prediction values to determine the truth value of predictions (i.e., above the + * threshold is true, below is false). One metric value is generated for each threshold value. * @param topK An optional value specifying the top-k predictions to consider when calculating * precision. * @param classId Optional Integer class ID for which we want binary metrics. This must be in the @@ -280,17 +275,15 @@ public Precision( /** Initializes the variables */ private void init() { Ops tf = getTF(); - Zeros zeros = new Zeros<>(tf); - Operand zero = zeros.call(tf.constant(Shape.of(thresholds.length)), type); + Zeros zeros = new Zeros<>(); + Operand zero = zeros.call(tf, tf.constant(Shape.of(thresholds.length)), type); if (this.truePositives == null) { this.truePositives = tf.withName(truePositivesName).variable(zero); initializers.add(tf.assign(truePositives, zero)); } if (this.falsePositives == null) { - this.falsePositives = - tf.withName(falsePositivesName) - .variable(zero); + this.falsePositives = tf.withName(falsePositivesName).variable(zero); initializers.add(tf.assign(falsePositives, zero)); } } @@ -340,11 +333,12 @@ public List updateStateList( public Operand result() { Ops tf = getTF(); Operand result = tf.math.divNoNan(truePositives, tf.math.add(truePositives, falsePositives)); - return thresholds.length == 1 - ? tf.reshape(tf.slice( - result, - tf.expandDims(tf.constant(0), tf.constant(0)), - tf.expandDims(tf.constant(1), tf.constant(0))), + return thresholds.length == 1 + ? tf.reshape( + tf.slice( + result, + tf.expandDims(tf.constant(0), tf.constant(0)), + tf.expandDims(tf.constant(1), tf.constant(0))), tf.constant(Shape.scalar())) : result; } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/PrecisionAtRecall.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/PrecisionAtRecall.java index 5f5f9b47a10..a5285ff6b2d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/PrecisionAtRecall.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/PrecisionAtRecall.java @@ -14,14 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.metrics.impl.SensitivitySpecificityBase; import org.tensorflow.op.Ops; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes best precision where recall is >= specified value. * @@ -29,8 +29,8 @@ * falseNegatives that are used to compute the precision at the given recall. The threshold for the * given recall value is computed and used to evaluate the corresponding precision. * - *

    If {@code sampleWeights} is null, weights default to 1. Use {@code sampleWeights} of - * 0 to mask values. + *

    If {@code sampleWeights} is null, weights default to 1. Use {@code sampleWeights} of 0 to mask + * values. * * @param The data type for the metric result */ @@ -115,8 +115,7 @@ public PrecisionAtRecall( public Operand result() { Ops tf = getTF(); - Operand div = - tf.math.divNoNan(truePositives, tf.math.add(truePositives, falseNegatives)); + Operand div = tf.math.divNoNan(truePositives, tf.math.add(truePositives, falseNegatives)); Operand sub = tf.math.sub(div, cast(tf, tf.constant(recall), getType())); Operand minIndex = tf.math.argMin(tf.math.abs(sub), tf.constant(0), TInt32.class); minIndex = tf.expandDims(minIndex, tf.constant(0)); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Recall.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Recall.java index 3886ec050b0..6cb87f5be9e 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Recall.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Recall.java @@ -14,6 +14,13 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; import org.tensorflow.framework.metrics.impl.ConfusionMatrixEnum; @@ -25,31 +32,23 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the recall of the predictions with respect to the labels. * - *

    This metric creates two local variables, {@code truePositives} and {@code falseNegatives - * }, that are used to compute the recall. This value is ultimately returned as recall, an - * idempotent operation that simply divides {@code truePositives} by the sum of {@code - * truePositives} and {@code falseNegatives}. + *

    This metric creates two local variables, {@code truePositives} and {@code falseNegatives }, + * that are used to compute the recall. This value is ultimately returned as recall, an idempotent + * operation that simply divides {@code truePositives} by the sum of {@code truePositives} and + * {@code falseNegatives}. * - *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use sampleWeights of - * 0 to mask values. + *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use sampleWeights of 0 to mask + * values. * - *

    If {@code topK} is set, the metric calculates recall as how often on average a class - * among the labels of a batch entry is in the top-k predictions. + *

    If {@code topK} is set, the metric calculates recall as how often on average a class among the + * labels of a batch entry is in the top-k predictions. * - *

    If {@code classId} is specified, the metric calculates recall by considering only the - * entries in the batch for which {@code classId} is in the label, and computing the fraction - * of them for which {@code classId} is above the threshold and/or in the top-k predictions. + *

    If {@code classId} is specified, the metric calculates recall by considering only the entries + * in the batch for which {@code classId} is in the label, and computing the fraction of them for + * which {@code classId} is above the threshold and/or in the top-k predictions. * * @param The data type for the metric result */ @@ -305,8 +304,8 @@ public Recall( /** Initializes the Variables */ private void init() { Ops tf = getTF(); - Zeros zeros = new Zeros<>(tf); - Operand zero = zeros.call(tf.constant(Shape.of(this.thresholds.length)), type); + Zeros zeros = new Zeros<>(); + Operand zero = zeros.call(tf, tf.constant(Shape.of(this.thresholds.length)), type); if (truePositives == null) { truePositives = tf.withName(truePositivesName).variable(zero); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/RecallAtPrecision.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/RecallAtPrecision.java index a3fc2f77b7f..2386087e8a2 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/RecallAtPrecision.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/RecallAtPrecision.java @@ -14,6 +14,9 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.metrics.impl.SensitivitySpecificityBase; import org.tensorflow.op.Ops; @@ -21,9 +24,6 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes best recall where precision is >= specified value. * @@ -34,8 +34,8 @@ * falseNegatives that are used to compute the recall at the given precision. The threshold for the * given precision value is computed and used to evaluate the corresponding recall. * - *

    If {@code sampleWeights} is null, weights default to 1. Use {@code sampleWeights} of - * 0 to mask values. + *

    If {@code sampleWeights} is null, weights default to 1. Use {@code sampleWeights} of 0 to mask + * values. * * @param The data type for the metric result */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/RootMeanSquaredError.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/RootMeanSquaredError.java index 3886428425b..8b0b06e788d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/RootMeanSquaredError.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/RootMeanSquaredError.java @@ -15,6 +15,9 @@ */ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.framework.losses.impl.LossTuple; import org.tensorflow.framework.losses.impl.LossesHelper; @@ -22,13 +25,8 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import java.util.List; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** - * Computes root mean squared error metric between {@code labels} and {@code predictions} - * . + * Computes root mean squared error metric between {@code labels} and {@code predictions} . * * @param The data type for the metric result */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SensitivityAtSpecificity.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SensitivityAtSpecificity.java index 29c0504b823..3892af920e9 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SensitivityAtSpecificity.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SensitivityAtSpecificity.java @@ -14,30 +14,30 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.metrics.impl.SensitivitySpecificityBase; import org.tensorflow.op.Ops; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes best sensitivity where sensitivity is >= specified value. * - *

    {@code Sensitivity} measures the proportion of actual positives that are correctly - * identified as such {@code (tp / (tp + fn))}. + *

    {@code Sensitivity} measures the proportion of actual positives that are correctly identified + * as such {@code (tp / (tp + fn))}. * - *

    {@code Specificity} measures the proportion of actual negatives that are correctly - * identified as such {@code (tn / (tn + fp))}. + *

    {@code Specificity} measures the proportion of actual negatives that are correctly identified + * as such {@code (tn / (tn + fp))}. * - *

    This metric creates four local variables, {@code truePositives}, {@code trueNegatives - * }, {@code falsePositives} and {@code falseNegatives} that are used to compute the - * sensitivity at the given specificity. The threshold for the given specificity value is computed - * and used to evaluate the corresponding sensitivity. + *

    This metric creates four local variables, {@code truePositives}, {@code trueNegatives }, + * {@code falsePositives} and {@code falseNegatives} that are used to compute the sensitivity at the + * given specificity. The threshold for the given specificity value is computed and used to evaluate + * the corresponding sensitivity. * - *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use sample_weight of - * 0 to mask values. + *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use sample_weight of 0 to mask + * values. * * @see Additional information * about specificity and sensitivity diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SparseCategoricalAccuracy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SparseCategoricalAccuracy.java index 5294f798044..10d33c31508 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SparseCategoricalAccuracy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SparseCategoricalAccuracy.java @@ -14,6 +14,9 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.Collections; import org.tensorflow.Operand; import org.tensorflow.framework.metrics.impl.LossMetric; import org.tensorflow.framework.metrics.impl.MeanMetricWrapper; @@ -24,10 +27,6 @@ import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; -import java.util.Collections; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Calculates how often predictions matches integer labels. * @@ -35,9 +34,9 @@ * probabilities are same. * *

    This metric creates two local variables, `total` and `count` that are used to compute the - * frequency with which {@code predictions} matches {@code labels}. This frequency is - * ultimately returned as `sparse categorical accuracy`: an idempotent operation that simply divides - * `total` by `count`. + * frequency with which {@code predictions} matches {@code labels}. This frequency is ultimately + * returned as `sparse categorical accuracy`: an idempotent operation that simply divides `total` by + * `count`. * *

    If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values.' * diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SpecificityAtSensitivity.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SpecificityAtSensitivity.java index 2cb7e54eba0..aa8eeb062b3 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SpecificityAtSensitivity.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/SpecificityAtSensitivity.java @@ -14,29 +14,29 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.metrics.impl.SensitivitySpecificityBase; import org.tensorflow.op.Ops; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes best specificity where sensitivity is >= specified value. {@code Sensitivity} - * measures the proportion of actual positives that are correctly identified as such {@code - * (tp / (tp + fn))}. + * measures the proportion of actual positives that are correctly identified as such {@code (tp / + * (tp + fn))}. * - *

    {@code Specificity} measures the proportion of actual negatives that are correctly - * identified as such {@code (tn / (tn + fp))}. + *

    {@code Specificity} measures the proportion of actual negatives that are correctly identified + * as such {@code (tn / (tn + fp))}. * - *

    This metric creates four local variables, {@code truePositives}, {@code trueNegatives - * }, {@code falsePositives} and {@code falseNegatives} that are used to compute the - * specificity at the given sensitivity. The threshold for the given sensitivity value is computed - * and used to evaluate the corresponding specificity. + *

    This metric creates four local variables, {@code truePositives}, {@code trueNegatives }, + * {@code falsePositives} and {@code falseNegatives} that are used to compute the specificity at the + * given sensitivity. The threshold for the given sensitivity value is computed and used to evaluate + * the corresponding specificity. * - *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use sample_weight of - * 0 to mask values. + *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use sample_weight of 0 to mask + * values. * * @see Additional information * about specificity and sensitivity diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Sum.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Sum.java index 637ca6cdd05..bcb1d7b9a36 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Sum.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/Sum.java @@ -21,11 +21,11 @@ /** * Computes the (weighted) sum of the given values. * - *

    For example, if values is {@code [1, 3, 5, 7]} then the sum is {@code 16}. If the - * weights were specified as {@code [1, 1, 0, 0]}, then the sum would be {@code 4.} + *

    For example, if values is {@code [1, 3, 5, 7]} then the sum is {@code 16}. If the weights were + * specified as {@code [1, 1, 0, 0]}, then the sum would be {@code 4.} * - *

    This metric creates one variable, {@code total}, that is used to compute the sum of - * values. This is ultimately returned as sum. + *

    This metric creates one variable, {@code total}, that is used to compute the sum of values. + * This is ultimately returned as sum. * *

    If sample_weight is None, weights default to 1. Use sample_weight of 0 to mask values. */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TopKCategoricalAccuracy.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TopKCategoricalAccuracy.java index 0146552433f..b630be5bcc2 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TopKCategoricalAccuracy.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TopKCategoricalAccuracy.java @@ -14,14 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.metrics; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.metrics.impl.LossMetric; import org.tensorflow.framework.metrics.impl.MeanMetricWrapper; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Computes the poisson loss metric between labels and predictions. * @@ -34,8 +34,8 @@ public class TopKCategoricalAccuracy extends MeanMetricWrappe private final int k; /** - * Creates a TopKCategoricalAccuracy metric using {@link #DEFAULT_K} for {@code k}, Number of - * top elements to look at for computing accuracy. + * Creates a TopKCategoricalAccuracy metric using {@link #DEFAULT_K} for {@code k}, Number of top + * elements to look at for computing accuracy. * * @param tf the TensorFlow Ops * @param name the name of this metric, if null then metric name is {@link Class#getSimpleName()}. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TrueNegatives.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TrueNegatives.java index 5c65f8c469f..fd6b95df6d2 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TrueNegatives.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TrueNegatives.java @@ -22,12 +22,12 @@ /** * Metric that calculates the number of true negatives. * - *

    If {@code sampleWeights} is given, calculates the sum of the weights of true negatives. - * This metric creates one local variable, {@code accumulator} that is used to keep track of - * the number of true negatives. + *

    If {@code sampleWeights} is given, calculates the sum of the weights of true negatives. This + * metric creates one local variable, {@code accumulator} that is used to keep track of the number + * of true negatives. * - *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use {@code - * sampleWeights} of 0 to mask values. + *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use {@code sampleWeights} of 0 + * to mask values. * * @param The data type for the metric result */ @@ -50,10 +50,10 @@ public TrueNegatives(Ops tf, long seed, Class type) { * Creates a TrueNegatives metric, using {@link Class#getSimpleName()} for the metric name * * @param tf the TensorFlow Ops - * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -66,10 +66,10 @@ public TrueNegatives(Ops tf, float threshold, long seed, Class type) { * Creates a TrueNegatives metric, using {@link Class#getSimpleName()} for the metric name * * @param tf the TensorFlow Ops - * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -96,10 +96,10 @@ public TrueNegatives(Ops tf, String name, long seed, Class type) { * * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used - * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -113,10 +113,10 @@ public TrueNegatives(Ops tf, String name, float threshold, long seed, Class t * * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used - * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TruePositives.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TruePositives.java index f0dd8c42de5..90fe9142014 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TruePositives.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/TruePositives.java @@ -22,12 +22,12 @@ /** * Metric that calculates the number of true positives. * - *

    If {@code sampleWeights} is given, calculates the sum of the weights of true positives. - * This metric creates one local variable, {@code accumulator} that is used to keep track of - * the number of true positives. + *

    If {@code sampleWeights} is given, calculates the sum of the weights of true positives. This + * metric creates one local variable, {@code accumulator} that is used to keep track of the number + * of true positives. * - *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use {@code - * sampleWeights} of 0 to mask values. + *

    If {@code sampleWeights} is {@code null}, weights default to 1. Use {@code sampleWeights} of 0 + * to mask values. * * @param The data type for the metric result */ @@ -50,10 +50,10 @@ public TruePositives(Ops tf, long seed, Class type) { * Creates a TruePositives metric, using {@link Class#getSimpleName()} for the metric name * * @param tf the TensorFlow Ops - * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -66,10 +66,10 @@ public TruePositives(Ops tf, float threshold, long seed, Class type) { * Creates a TruePositives metric, using {@link Class#getSimpleName()} for the metric name * * @param tf the TensorFlow Ops - * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -96,10 +96,10 @@ public TruePositives(Ops tf, String name, long seed, Class type) { * * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used - * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param threshold a threshold value in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -113,10 +113,10 @@ public TruePositives(Ops tf, String name, float threshold, long seed, Class t * * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used - * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared - * with prediction values to determine the truth value of predictions (i.e., above the - * threshold is {@code true}, below is {@code false}). One metric value is generated - * for each threshold value + * @param thresholds threshold values in the range {@code [0, 1]}. A threshold is compared with + * prediction values to determine the truth value of predictions (i.e., above the threshold is + * {@code true}, below is {@code false}). One metric value is generated for each threshold + * value * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/ConfusionMatrixConditionCount.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/ConfusionMatrixConditionCount.java index 88597cf85ec..4463e1f8213 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/ConfusionMatrixConditionCount.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/ConfusionMatrixConditionCount.java @@ -14,6 +14,11 @@ =======================================================================*/ package org.tensorflow.framework.metrics.impl; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; import org.tensorflow.framework.metrics.Metric; @@ -24,12 +29,6 @@ import org.tensorflow.op.core.Variable; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Abstract base class that calculates the value of the given confusion matrix condition based on * labels and predictions. @@ -67,10 +66,9 @@ public ConfusionMatrixConditionCount( * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used * @param confusionMatrixCond the confusion matrix condition to calculate - * @param threshold a threshold value in {@code [0, 1]}. A threshold is compared with - * prediction values to determine the truth value of predictions (i.e., above the threshold is - * {@code true}, below is {@code false}). One metric value is generated for each - * threshold value. + * @param threshold a threshold value in {@code [0, 1]}. A threshold is compared with prediction + * values to determine the truth value of predictions (i.e., above the threshold is {@code + * true}, below is {@code false}). One metric value is generated for each threshold value. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -91,10 +89,9 @@ public ConfusionMatrixConditionCount( * @param tf the TensorFlow Ops * @param name the name of the metric, if null then {@link Class#getSimpleName()} is used * @param confusionMatrixCond the confusion matrix condition to calculate - * @param thresholds threshold values in {@code [0, 1]}. A threshold is compared with - * prediction values to determine the truth value of predictions (i.e., above the threshold is - * {@code true}, below is {@code false}). One metric value is generated for each - * threshold value. + * @param thresholds threshold values in {@code [0, 1]}. A threshold is compared with prediction + * values to determine the truth value of predictions (i.e., above the threshold is {@code + * true}, below is {@code false}). One metric value is generated for each threshold value. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and data type. * @param type the data type for the variables @@ -118,12 +115,13 @@ public ConfusionMatrixConditionCount( private void init() { Shape variableShape = Shape.of(this.thresholds.length); - Zeros zeros = new Zeros<>(getTF()); + Zeros zeros = new Zeros<>(); accumulator = getTF() .withName(getAccumulatorName()) - .variable(zeros.call(getTF().constant(variableShape), type)); - initializer = getTF().assign(accumulator, zeros.call(getTF().constant(variableShape), type)); + .variable(zeros.call(getTF(), getTF().constant(variableShape), type)); + initializer = + getTF().assign(accumulator, zeros.call(getTF(), getTF().constant(variableShape), type)); } /** @@ -189,7 +187,11 @@ public float[] getThresholds() { return this.thresholds; } - /** @return the accumulatorName */ + /** + * Gets the accumulatorName + * + * @return the accumulatorName + */ public String getAccumulatorName() { return accumulatorName; } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/LossMetric.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/LossMetric.java index f89047e457d..76c21aebefc 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/LossMetric.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/LossMetric.java @@ -18,7 +18,7 @@ import org.tensorflow.types.family.TNumber; /** - * Interface for Metrics that wrap Loss functions. + * Interface for Metrics that wrap AbstractLoss functions. * * @param The data type of the predictions. */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MeanMetricWrapper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MeanMetricWrapper.java index 37bdd5849ae..d9f4bb60cba 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MeanMetricWrapper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MeanMetricWrapper.java @@ -14,6 +14,9 @@ =======================================================================*/ package org.tensorflow.framework.metrics.impl; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.framework.metrics.Mean; import org.tensorflow.framework.metrics.MetricReduction; @@ -21,17 +24,13 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -import java.util.List; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * A class that bridges a stateless loss function with the {@link Mean} metric using a reduction of * {@link MetricReduction#WEIGHTED_MEAN}. * - *

    The loss function calculates the loss between the {@code labels} and {@code predictions - * } then passes this loss to the {@link Mean} metric to calculate the weighted mean of the - * loss over many iterations or epochs + *

    The loss function calculates the loss between the {@code labels} and {@code predictions } then + * passes this loss to the {@link Mean} metric to calculate the weighted mean of the loss over many + * iterations or epochs * * @param The data type for the metric result */ @@ -63,7 +62,7 @@ public LossMetric getLoss() { } /** - * Sets the Loss function for this wrapper. + * Sets the AbstractLoss function for this wrapper. * * @param loss the loss function. */ diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index 40336233d21..7d265ef7651 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -14,6 +14,16 @@ =======================================================================*/ package org.tensorflow.framework.metrics.impl; +import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import org.tensorflow.Operand; import org.tensorflow.framework.losses.impl.LossTuple; import org.tensorflow.framework.losses.impl.LossesHelper; @@ -38,17 +48,6 @@ import org.tensorflow.types.family.TIntegral; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - -import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * These are helper methods for Metrics and will be module private when Java modularity is applied * to TensorFlow Java. These methods should not be used outside of the metrics packages. @@ -59,8 +58,7 @@ public class MetricsHelper { "weights can not be broadcast to values."; /** - * Asserts that the {@code sampleWeights} can be broadcast to the same shape as {@code values - * } + * Asserts that the {@code sampleWeights} can be broadcast to the same shape as {@code values } * *

    In losses and metrics, limited weight broadcasting is supported. Weights must be either * scalar, or the same rank as the target values, with each dimension either 1, or the same as the @@ -69,8 +67,8 @@ public class MetricsHelper { * @param tf the TensorFlow Ops * @param sampleWeights the sample weights. * @param values the values to which weights are applied. - * @return {@code Operation} with control dependencies to ensure {@code sampleWeight} - * can be broadcast to {@code values} + * @return {@code Operation} with control dependencies to ensure {@code sampleWeight} can be + * broadcast to {@code values} * @param the type of Operand * @throws NotBroadcastableException If static checks determine {@code sampleWeights} has an * incorrect shape that prohibit broadcasting to {@code values} @@ -114,10 +112,7 @@ public static Op assertBroadcastable( throw new NotBroadcastableException( String.format( "%s Mismatch at dim %d. values.shape=%s weights.shape=%s.", - ASSERT_BROADCAST_ERROR_PREFIX, - i, - valuesShapeStatic, - weightsShapeStatic)); + ASSERT_BROADCAST_ERROR_PREFIX, i, valuesShapeStatic, weightsShapeStatic)); } } return tf.withSubScope("staticDimsCheckSuccess") @@ -307,24 +302,24 @@ public static List assertShapes( *

    For estimation of these metrics over a stream of data, the function creates an `update_op` * operation that updates the given variables. * - *

    {@code labels}, {@code predictions}, and {@code sampleWeight} tensors are - * aligned by {@link LossesHelper#removeSqueezableDimensions(Ops, Operand, Operand)}. {@code - * sampleWeight} is then broadcast to the shape of {@code predictions}. + *

    {@code labels}, {@code predictions}, and {@code sampleWeight} tensors are aligned by {@link + * LossesHelper#removeSqueezableDimensions(Ops, Operand, Operand)}. {@code sampleWeight} is then + * broadcast to the shape of {@code predictions}. * * @param tf the TensorFlow Ops * @param variablesToUpdate map with {@link ConfusionMatrixEnum} values as valid keys and * corresponding variables to update as values. If {@code multiLabel}, then the variable * shapes are (T, D), where T is the number of thresholds and D is the number of classes - * (after slicing by {@code classIndex}, if provided). If {@code multiLabels}, then - * the variable shapes are (T). + * (after slicing by {@code classIndex}, if provided). If {@code multiLabels}, then the + * variable shapes are (T). * @param varInitializers map with {@link ConfusionMatrixEnum} values as valid keys and * corresponding initializer Operands to for {@code variablesToUpdate}. * @param labels the labels. Will be cast to {@link TBool}. Shape (N, Cx, L1?), where N is the * number of examples, Cx is zero or more class dimensions, and L1 is a potential extra * dimension of size 1 that would be squeezed. * @param predictions the predictions shape (N, Cx, P1?) - * @param thresholds thresholds in the range {@code [0, 1]}, or {@link #NEG_INF} is used when - * topK is set + * @param thresholds thresholds in the range {@code [0, 1]}, or {@link #NEG_INF} is used when topK + * is set * @param topK optional, indicates that only the top k predictions should be considered. Applied * before possibly slicing by {@code classIndex}. * @param classIndex optional, limits the prediction and labels to the specified class. This is an @@ -338,14 +333,14 @@ public static List assertShapes( * @param labelWeights tensor of non-negative weights for multilabel data. The weights are applied * when calculating TRUE_POSITIVES, FALSE_POSITIVES, TRUE_NEGATIVES, and FALSE_NEGATIVES * without explicit multilabel handling (i.e. when the data is to be flattened). Must have - * shape (Dx), which is the same as (Cx) referenced above, except that if {@code classIndex - * } is provided, then the final dimension of Dx is 1. These weights will be broadcast - * across the 0th dimension (the examples dimension) of {@code predictions}. May be null. - * Must be null if {@code multiLabel}. + * shape (Dx), which is the same as (Cx) referenced above, except that if {@code classIndex } + * is provided, then the final dimension of Dx is 1. These weights will be broadcast across + * the 0th dimension (the examples dimension) of {@code predictions}. May be null. Must be + * null if {@code multiLabel}. * @param the data type for the variables - * @throws IllegalArgumentException If {@code predictions} and {@code labels} have - * mismatched shapes, or if {@code sampleWeight} is not null and its shape - * doesn't match {@code predictions}, or if {@code multiLabel && labelWeights != null}.. + * @throws IllegalArgumentException If {@code predictions} and {@code labels} have mismatched + * shapes, or if {@code sampleWeight} is not null and its shape doesn't match {@code + * predictions}, or if {@code multiLabel && labelWeights != null}.. * @return an op to update the given confusion matrix variables. */ @SuppressWarnings({"unchecked", "rawtypes"}) @@ -439,11 +434,13 @@ tPredictions, cast(tf, tf.constant(0), tPredictions.type())), if (classIndex != null) { // Slice to new shapes (N, Dx) - tLabels = tf.squeeze(tf.gather(tLabels, - tf.constant(new int[] {classIndex}), tf.constant(-1)), + tLabels = + tf.squeeze( + tf.gather(tLabels, tf.constant(new int[] {classIndex}), tf.constant(-1)), Squeeze.axis(Collections.singletonList(1L))); - tPredictions = tf.squeeze(tf.gather(tPredictions, - tf.constant(new int[] {classIndex}), tf.constant(-1)), + tPredictions = + tf.squeeze( + tf.gather(tPredictions, tf.constant(new int[] {classIndex}), tf.constant(-1)), Squeeze.axis(Collections.singletonList(1L))); } org.tensorflow.op.core.Shape predShape = tf.shape(tPredictions); @@ -693,8 +690,7 @@ private static Operand filterTopK(Ops tf, Operand x, i // alias for mean /** - * Calculate the mean of the operand, along all axes and {@code keepDims} is {@code false - * } + * Calculate the mean of the operand, along all axes and {@code keepDims} is {@code false } * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -706,8 +702,8 @@ public static Operand mean(Ops tf, Operand x) { } /** - * Calculate the mean of the operand, alongside the specified axis with {@code keepDims} is - * {@code false} + * Calculate the mean of the operand, alongside the specified axis with {@code keepDims} is {@code + * false} * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -725,10 +721,9 @@ public static Operand mean( * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean - * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is - * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes - * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained - * with length 1. + * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is {@code + * false}, the rank of the tensor is reduced by 1 for each entry in {@code axes }. If {@code + * keepdims} is {@code true}, the reduced dimensions are retained with length 1. * @param the type of the operand * @return the mean of elements of {@code x}. */ @@ -742,10 +737,9 @@ public static Operand mean(Ops tf, Operand x, boolean * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean * @param axes Axes to compute the mean. - * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is - * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes - * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained - * with length 1. + * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is {@code + * false}, the rank of the tensor is reduced by 1 for each entry in {@code axes }. If {@code + * keepdims} is {@code true}, the reduced dimensions are retained with length 1. * @param the data type of the Operand * @return the mean of elements of {@code x}. */ @@ -783,12 +777,12 @@ LossTuple raggedAssertCompatibleAndGetFlatValues( *

    For example: * *

    {@code
    -   *     confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
    -   *          [[0 0 0 0 0]
    -   *           [0 0 1 0 0]
    -   *           [0 0 1 0 0]
    -   *           [0 0 0 0 0]
    -   *           [0 0 0 0 1]]
    +   * confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
    +   *      [[0 0 0 0 0]
    +   *       [0 0 1 0 0]
    +   *       [0 0 1 0 0]
    +   *       [0 0 0 0 0]
    +   *       [0 0 0 0 1]]
        * }
    * * Note that the possible labels are assumed to be {@code [0, 1, 2, 3,4]}, resulting in a 5x5 @@ -802,12 +796,12 @@ LossTuple raggedAssertCompatibleAndGetFlatValues( * @param weights optional weights to be applied to the confusion matrix * @param type Data type of the confusion matrix. * @param the type of Operands - * @return A {@code Operand} of type {@code type} with shape {@code [n, n]} - * representing the confusion matrix, where {@code n} is the number of possible labels in - * the classification task. - * @throws IllegalArgumentException If both {@code predictions} and {@code labels} do - * not have compatible shapes, or if {@code weights} is not{@code null} and its - * shape is not compatible with {@code predictions}. + * @return A {@code Operand} of type {@code type} with shape {@code [n, n]} representing the + * confusion matrix, where {@code n} is the number of possible labels in the classification + * task. + * @throws IllegalArgumentException If both {@code predictions} and {@code labels} do not have + * compatible shapes, or if {@code weights} is not{@code null} and its shape is not compatible + * with {@code predictions}. */ // TODO should this be moved to FramnworkOps under math. public static Operand confusionMatrix( @@ -883,8 +877,7 @@ public static Operand confusionMatrix( } /** - * Calculate the mean of the operand, along all axes and {@code keepDims} is {@code false - * } + * Calculate the mean of the operand, along all axes and {@code keepDims} is {@code false } * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -895,8 +888,8 @@ public static Operand booleanMean(Ops tf, Operand x) { } /** - * Calculate the mean of the operand, alongside the specified axis with {@code keepDims} is - * {@code false} + * Calculate the mean of the operand, alongside the specified axis with {@code keepDims} is {@code + * false} * * @param tf the TensorFlow Ops * @param x the Operand used to calculate the mean @@ -913,10 +906,9 @@ public static Operand booleanMean( * * @param tf the TensorFlow Ops * @param x the boolean Operand used to calculate the mean - * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is - * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes - * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained - * with length 1. + * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is {@code + * false}, the rank of the tensor is reduced by 1 for each entry in {@code axes }. If {@code + * keepdims} is {@code true}, the reduced dimensions are retained with length 1. * @return the mean of elements of {@code x} containing floating point numbers */ public static Operand booleanMean(Ops tf, Operand x, boolean keepDims) { @@ -929,10 +921,9 @@ public static Operand booleanMean(Ops tf, Operand x, boolean ke * @param tf the TensorFlow Ops * @param x the boolean Operand used to calculate the mean * @param axes Axes to compute the mean. - * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is - * {@code false}, the rank of the tensor is reduced by 1 for each entry in {@code axes - * }. If {@code keepdims} is {@code true}, the reduced dimensions are retained - * with length 1. + * @param keepDims Indicates whether to keep the dimensions or not. If {@code keepdims} is {@code + * false}, the rank of the tensor is reduced by 1 for each entry in {@code axes }. If {@code + * keepdims} is {@code true}, the reduced dimensions are retained with length 1. * @return the mean of elements of {@code x} containing floating point numbers */ public static Operand booleanMean( diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SensitivitySpecificityBase.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SensitivitySpecificityBase.java index 60a6c1ea3df..6779b6b1f5a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SensitivitySpecificityBase.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SensitivitySpecificityBase.java @@ -1,5 +1,12 @@ package org.tensorflow.framework.metrics.impl; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; import org.tensorflow.framework.metrics.Metric; @@ -10,14 +17,6 @@ import org.tensorflow.op.core.Variable; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Abstract base class for computing sensitivity and specificity. * @@ -87,9 +86,9 @@ protected SensitivitySpecificityBase( /** Initializes the Variables */ private void init() { Ops tf = getTF(); - Zeros zeros = new Zeros<>(tf); + Zeros zeros = new Zeros<>(); Shape varShape = Shape.of(numThresholds); - Operand zero = zeros.call(tf.constant(varShape), type); + Operand zero = zeros.call(tf, tf.constant(varShape), type); if (this.getTruePositives() == null) { @@ -228,8 +227,6 @@ public int getNumThresholds() { return numThresholds; } - - /** * Gets the thresholds * diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java index 68157632557..dd77a1be4aa 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java @@ -14,20 +14,20 @@ =======================================================================*/ package org.tensorflow.framework.metrics.impl; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.op.SparseOps; import org.tensorflow.op.sparse.DenseToDenseSetOperation; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** Implementation of set operations */ public class SetsOps { /** - * Computes set difference of elements in last dimension of {@code a} and {@code b} with - * {@code aMinusB} set to true. + * Computes set difference of elements in last dimension of {@code a} and {@code b} with {@code + * aMinusB} set to true. * *

    All but the last dimension of {@code a} and {@code b} must match * @@ -35,8 +35,8 @@ public class SetsOps { * @param a The first operand representing set {@code a} * @param b The other operand representing set {@code b} * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the * same. Elements along the last dimension contain the results of the set + * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last + * dimension the * same. Elements along the last dimension contain the results of the set * operation. */ public static Operand difference(Ops tf, Operand a, Operand b) { @@ -53,8 +53,8 @@ public static Operand difference(Ops tf, Operand a, Op * @param b The other operand representing set {@code b} * @param aMinusB whether to subtract b from a, vs vice versa. * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the * same. Elements along the last dimension contain the results of the set + * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last + * dimension the * same. Elements along the last dimension contain the results of the set * operation. */ public static Operand difference( @@ -69,8 +69,8 @@ public static Operand difference( * @param a The first operand representing set {@code a} * @param b The other operand representing set {@code b} * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the * same. Elements along the last dimension contain the results of the set + * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last + * dimension the * same. Elements along the last dimension contain the results of the set * operation. */ public static Operand union(Ops tf, Operand a, Operand b) { @@ -84,8 +84,8 @@ public static Operand union(Ops tf, Operand a, Operand * @param a The first operand representing set {@code a} * @param b The other operand representing set {@code b} * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the * same. Elements along the last dimension contain the results of the set + * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last + * dimension the * same. Elements along the last dimension contain the results of the set * operation. */ public static Operand intersection(Ops tf, Operand a, Operand b) { @@ -100,8 +100,8 @@ public static Operand intersection(Ops tf, Operand a, * @param b The other et operation operand * @param setOperation The set operation to perform, {@link Operation}. * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the - * last dimension the same. Elements along the last dimension contain the results of the set + * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last + * dimension the same. Elements along the last dimension contain the results of the set * operation. */ public static Operand setOperation( diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SymbolicShape.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SymbolicShape.java index d28185ae041..b8698ab197d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SymbolicShape.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SymbolicShape.java @@ -14,42 +14,78 @@ =======================================================================*/ package org.tensorflow.framework.metrics.impl; -import org.tensorflow.Operand; -import org.tensorflow.types.family.TNumber; - import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.types.family.TNumber; +/** + * A class that represents a Symbolic shape. + * + *

    A Symbolic shape uses symbols to identify the relationship of the shape of an operand to + * underlying values that are not know until compute time. For example, "N" represent the number of + * examples, while "L" represents the number of labels. When the values later become known, the + * shape of the operand must conform the these symbolic values. + * + * @param The data type for the Operand. + */ public class SymbolicShape { private Operand operand; private List symbols = new ArrayList<>(); + /** + * Creates a SymbolicShape + * + * @param operand the Operand that needs to conform to the shape + * @param symbols the symbolic value for each dimension of the shape. + */ public SymbolicShape(Operand operand, String... symbols) { this.operand = operand; this.symbols.addAll(Arrays.asList(symbols)); } - /** @return the operand */ + /** + * Gets the operand + * + * @return the operand + */ public Operand getOperand() { return operand; } - /** @param operand the operand to set */ + /** + * Sets the operand + * + * @param operand the operand to set + */ public void setOperand(Operand operand) { this.operand = operand; } - /** @return the symbols */ + /** + * Gets the symbols associated with each dimension of the shape + * + * @return the symbols associated with each dimension of the shape + */ public List getSymbols() { return symbols; } - /** @param symbols the symbols to set */ + /** + * Sets teh symbols associated with each dimension of the shape + * + * @param symbols the symbols associated with each dimension of the shape + */ public void setSymbols(List symbols) { this.symbols = symbols; } + /** + * Gets the rank associated with this Symbolic Shape + * + * @return the rank associated with this Symbolic Shape + */ public int rank() { return this.symbols.size(); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java index 6583465da2e..2df90a841ee 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java @@ -14,6 +14,11 @@ =======================================================================*/ package org.tensorflow.framework.metrics.impl; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; @@ -23,17 +28,11 @@ import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * Weight broadcasting operations. * - *

    In {@link org.tensorflow.framework.losses} and `{@link org.tensorflow.framework.metrics}, we support limited weight broadcasting. This file includes - * operations for those broadcasting rules. + *

    In {@link org.tensorflow.framework.losses} and `{@link org.tensorflow.framework.metrics}, we + * support limited weight broadcasting. This file includes operations for those broadcasting rules. */ public class WeightsBroadcastOps { @@ -46,10 +45,11 @@ public class WeightsBroadcastOps { * @param tf the TensorFlow Ops * @param weights the weights Operand * @param values Operand of values to which weights are applied. - * @return {@code Operation} raising a tensorflow InvalidArgumentError if {@code weights} has incorrect shape. {@link NoOp} if - * static checks determine {@code weights} has correct shape. + * @return {@code Operation} raising a tensorflow InvalidArgumentError if {@code weights} has + * incorrect shape. {@link NoOp} if static checks determine {@code weights} has correct shape. * @param the type of weights and values - * @throws IllegalArgumentException If static checks determine {@code weights} has incorrect shape. + * @throws IllegalArgumentException If static checks determine {@code weights} has incorrect + * shape. */ public static Op assertBroadcastable( Ops tf, Operand weights, Operand values) { @@ -81,14 +81,12 @@ public static Op assertBroadcastable( } for (int i = 0; i < valuesRankStatic; i++) { - if (weightsShapeStatic.size(i) != 1 && valuesShapeStatic.size(i) != weightsShapeStatic.size(i)) { + if (weightsShapeStatic.size(i) != 1 + && valuesShapeStatic.size(i) != weightsShapeStatic.size(i)) { throw new IllegalArgumentException( String.format( "%s Mismatch at dim %s. values.shape=%s weights.shape=%s.", - ASSERT_BROADCASTABLE_ERROR_PREFIX, - i, - valuesShapeStatic, - weightsShapeStatic)); + ASSERT_BROADCASTABLE_ERROR_PREFIX, i, valuesShapeStatic, weightsShapeStatic)); } } return tf.withSubScope("staticDimsCheckSuccess") @@ -105,12 +103,12 @@ public static Op assertBroadcastable( tf.constant("values.shape="), valuesShape, tf.constant("isScalar="), - isScalar); + isScalar); Operand isValidShape = tf.select( - isScalar, - isScalar, + isScalar, + isScalar, hasValidNonscalarShape(tf, weightsRank, weightsShape, valuesRank, valuesShape)); return tf.assertThat(isValidShape, data); @@ -140,7 +138,8 @@ private static Operand hasValidNonscalarShape( } /** - * Checks that each dimension of the two shapes are the same size, or that the weight dimension size is 1. + * Checks that each dimension of the two shapes are the same size, or that the weight dimension + * size is 1. * * @param tf the TensorFlow Ops * @param weightsShape the shape of the weights @@ -152,7 +151,8 @@ private static Operand hasValidDims( tf = tf.withSubScope("hasInvalidDims"); Operand valuesShape2d = tf.expandDims(valuesShape, tf.constant(-1)); - Operand validDims = tf.concat(Arrays.asList(valuesShape2d, tf.onesLike(valuesShape2d)), tf.constant(1)); + Operand validDims = + tf.concat(Arrays.asList(valuesShape2d, tf.onesLike(valuesShape2d)), tf.constant(1)); Operand weightsShape2d = tf.expandDims(weightsShape, tf.constant(-1)); Operand invalidDims = SetsOps.difference(tf, weightsShape2d, validDims); @@ -164,8 +164,7 @@ private static Operand hasValidDims( * Broadcast {@code weights} to the same shape as {@code values}. * *

    This returns a version of {@code weights} following the same broadcast rules as {@code - * mul(weights, - * values)}, but limited to the weights shapes allowed by {@code assertBroadcastable} + * mul(weights, values)}, but limited to the weights shapes allowed by {@code assertBroadcastable} * When computing a weighted average, use this function to broadcast {@code weights} before * summing them; e.g., {@code reduceSum(w * v) / reduceSum(_broadcast_weights(w, v))}. * diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/AbstractRegularizer.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/AbstractRegularizer.java new file mode 100644 index 00000000000..25535292db3 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/AbstractRegularizer.java @@ -0,0 +1,63 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.regularizers; + +import org.tensorflow.framework.losses.impl.AbstractLoss; + +/** + * Base class for Regularizers + * + *

    Regularizers allow you to apply penalties on layer parameters or layer activity during + * optimization. These penalties are summed into the loss function that the network optimizes. + */ +public abstract class AbstractRegularizer implements Regularizer { + + public static final float DEFAULT_REGULARIZATION_PENALTY = 0.01f; + + private final String name; + + /** Creates a AbstractRegularizer, using {@link Class#getSimpleName()} for the name */ + protected AbstractRegularizer() { + this(null); + } + /** + * Creates a AbstractRegularizer + * + * @param name the name of this regularizer, if null use {@link Class#getSimpleName()} for the + * name. + */ + protected AbstractRegularizer(String name) { + this.name = name == null ? this.getClass().getSimpleName() : name; + } + + /** + * Returns this AbstractRegularizer as a AbstractLoss This is a convenience to use regularize a + * loss. Only sampleWeights are applied to the regularizer. + * + * @return this AbstractRegularizer as a AbstractLoss + */ + public AbstractLoss asLoss() { + return new RegularizerLoss(this); + } + + /** + * Gets the name for this regularizer + * + * @return the name for this regularizer + */ + public String getName() { + return name; + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L1.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L1.java index 7c8c2a1360a..4b7aa1af620 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L1.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L1.java @@ -14,8 +14,6 @@ =======================================================================*/ package org.tensorflow.framework.regularizers; -import org.tensorflow.op.Ops; - /** * A regularizer that applies an L1 or Lasso(least absolute shrinkage and selection operator) * Regression, regularization penalty. @@ -24,24 +22,43 @@ */ public class L1 extends L1L2 { + /** + * Create a regularizer that applies an L1 regularization penalty of {@link + * #DEFAULT_REGULARIZATION_PENALTY} and a name based on the class name. + */ + public L1() { + this(null, DEFAULT_REGULARIZATION_PENALTY); + } + /** * Create a regularizer that applies an L1 regularization penalty of {@link * #DEFAULT_REGULARIZATION_PENALTY} * - * @param tf the TensorFlow Ops + * @param name the name for this AbstractRegularizer + */ + public L1(String name) { + this(name, DEFAULT_REGULARIZATION_PENALTY); + } + + /** + * Create a regularizer that applies an L1 regularization penalty and a name based on the class + * name. + * + * @param l1 the L1 regularization penalty + * @throws IllegalArgumentException if the l1 regularization factor is NaN or is infinite. */ - public L1(Ops tf) { - this(tf, DEFAULT_REGULARIZATION_PENALTY); + public L1(float l1) { + this(null, l1); } /** * Create a regularizer that applies an L1 regularization penalty * - * @param tf the TensorFlow Ops + * @param name the name for this AbstractRegularizer * @param l1 the L1 regularization penalty * @throws IllegalArgumentException if the l1 regularization factor is NaN or is infinite. */ - public L1(Ops tf, float l1) { - super(tf, l1, 0f); + public L1(String name, float l1) { + super(name, l1, 0f); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L1L2.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L1L2.java index 29e411f9897..87db69f2a77 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L1L2.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L1L2.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.regularizers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.op.Ops; @@ -29,33 +31,40 @@ *

    The L2 regularization penalty is computed as * *

    loss = l2 * reduceSum(square(x))
    - * */ -public class L1L2 extends Regularizer { +public class L1L2 extends AbstractRegularizer { private final float l1; private final float l2; + /** Creates an L1L2 regularizer with no l1 or l2 penalty with zero penalty */ + public L1L2() { + this(DEFAULT_REGULARIZATION_PENALTY, DEFAULT_REGULARIZATION_PENALTY); + } + /** - * Creates an L1L2 regularizer with no l1 or l2 penalty with zero penalty + * Creates an L1L2 regularizer * - * @param tf the TensorFlow Ops + * @param l1 L1 regularization factor, if null it is set to 0. + * @param l2 L2 regularization factor, if null it is set to 0. + * @throws IllegalArgumentException if the l1 or l2 regularization factor is {@link Float#isNaN} + * of {@link Float#isInfinite} */ - public L1L2(Ops tf) { - this(tf, DEFAULT_REGULARIZATION_PENALTY, DEFAULT_REGULARIZATION_PENALTY); + public L1L2(float l1, float l2) { + this(null, l1, l2); } /** * Creates an L1L2 regularizer * - * @param tf the TensorFlow Ops + * @param name the name for this regularizer, if null the class name will be used. * @param l1 L1 regularization factor, if null it is set to 0. * @param l2 L2 regularization factor, if null it is set to 0. * @throws IllegalArgumentException if the l1 or l2 regularization factor is {@link Float#isNaN} * of {@link Float#isInfinite} */ - public L1L2(Ops tf, float l1, float l2) { - super(tf); + public L1L2(String name, float l1, float l2) { + super(name); if (Float.isNaN(l1) || Float.isInfinite(l1)) { throw new IllegalArgumentException( String.format( @@ -73,25 +82,23 @@ public L1L2(Ops tf, float l1, float l2) { this.l2 = l2; } - /** {@inheritDoc} */ @Override - public Operand call(Operand input) { - Ops tf = getTF(); + public Operand call(Ops tf, Operand input) { if (this.getL1() == 0f && this.getL2() == 0f) { - return tf.dtypes.cast(tf.constant(0), input.type()); + return cast(tf, tf.constant(0), input.type()); } - Operand regularization = tf.dtypes.cast(tf.constant(0), input.type()); + Operand regularization = cast(tf, tf.constant(0), input.type()); if (this.getL1() != 0.f) { - Operand l1Op = tf.dtypes.cast(tf.constant(this.getL1()), input.type()); + Operand l1Op = cast(tf, tf.constant(this.getL1()), input.type()); Operand abs = tf.math.abs(input); Operand reduceSum = tf.reduceSum(abs, LossesHelper.allAxes(tf, input)); regularization = tf.math.add(regularization, tf.math.mul(l1Op, reduceSum)); } if (this.getL2() != 0.f) { - Operand l2Op = tf.dtypes.cast(tf.constant(this.getL2()), input.type()); + Operand l2Op = cast(tf, tf.constant(this.getL2()), input.type()); Operand sqr = tf.math.square(input); Operand reduceSum = tf.reduceSum(sqr, LossesHelper.allAxes(tf, input)); regularization = tf.math.add(regularization, tf.math.mul(l2Op, reduceSum)); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L2.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L2.java index 7b8f5b28a70..9092b80b08f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L2.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/L2.java @@ -14,8 +14,6 @@ =======================================================================*/ package org.tensorflow.framework.regularizers; -import org.tensorflow.op.Ops; - /** * A regularizer that applies a L2 (Ridge Regression) regularization penalty. * @@ -23,24 +21,43 @@ */ public class L2 extends L1L2 { + /** + * Create a regularizer that applies an L2 regularization penalty of {@link + * #DEFAULT_REGULARIZATION_PENALTY} and a name based on the class name. + */ + public L2() { + this(null, DEFAULT_REGULARIZATION_PENALTY); + } + /** * Create a regularizer that applies an L2 regularization penalty of {@link * #DEFAULT_REGULARIZATION_PENALTY} * - * @param tf the TensorFlow Ops + * @param name the name for this AbstractRegularizer + */ + public L2(String name) { + this(name, DEFAULT_REGULARIZATION_PENALTY); + } + + /** + * Create a regularizer that applies an L1 regularization penalty and a name based on the class + * name. + * + * @param l2 the L2 regularization penalty + * @throws IllegalArgumentException if the l2 regularization factor is NaN or is infinite. */ - public L2(Ops tf) { - this(tf, DEFAULT_REGULARIZATION_PENALTY); + public L2(float l2) { + this(null, l2); } /** * Create a regularizer that applies an L1 regularization penalty * - * @param tf the TensorFlow Ops + * @param name the name for this AbstractRegularizer * @param l2 the L2 regularization penalty * @throws IllegalArgumentException if the l2 regularization factor is NaN or is infinite. */ - public L2(Ops tf, float l2) { - super(tf, 0f, l2); + public L2(String name, float l2) { + super(name, 0f, l2); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/Regularizer.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/Regularizer.java index 5d9ff0e3e10..085f06e115c 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/Regularizer.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/Regularizer.java @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,77 +15,18 @@ package org.tensorflow.framework.regularizers; import org.tensorflow.Operand; -import org.tensorflow.framework.losses.Loss; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -/** - * Base class for Regularizers - * - *

    Regularizers allow you to apply penalties on layer parameters or layer activity during - * optimization. These penalties are summed into the loss function that the network optimizes. - */ -public abstract class Regularizer { - - public static final float DEFAULT_REGULARIZATION_PENALTY = 0.01f; - - private final Ops tf; - private final String name; - - /** - * Creates a Regularizer, using {@link Class#getSimpleName()} for the name - * - * @param tf the TensorFlow ops. - */ - protected Regularizer(Ops tf) { - this(tf, null); - } - /** - * Creates a Regularizer - * - * @param tf the TensorFlow ops. - * @param name the name of this regularizer, if null use {@link Class#getSimpleName()} for the - * name. - */ - protected Regularizer(Ops tf, String name) { - this.tf = tf; - this.name = name == null ? this.getClass().getSimpleName() : name; - } - - /** - * Returns this Regularizer as a Loss This is a convenience to use regularize a loss. Only - * sampleWeights are applied to the regularizer. - * - * @return this Regularizer as a Loss - */ - public Loss asLoss() { - return new RegularizerLoss(this.tf, this); - } +public interface Regularizer { /** * Computes a regularization penalty from an input. * + * @param tf the TensorFlow Ops * @param input the weighted input * @return the result of computing the regularization penalty * @param the data type of the input and result */ - public abstract Operand call(Operand input); - - /** - * Gets the TensorFlow Ops - * - * @return the TensorFlow Ops - */ - public Ops getTF() { - return tf; - } - - /** - * Gets the name for this regularizer - * - * @return the name for this regularizer - */ - public String getName() { - return name; - } + Operand call(Ops tf, Operand input); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/RegularizerLoss.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/RegularizerLoss.java index 582cd038f8f..11c7ee492e9 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/RegularizerLoss.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/regularizers/RegularizerLoss.java @@ -15,50 +15,49 @@ package org.tensorflow.framework.regularizers; import org.tensorflow.Operand; -import org.tensorflow.framework.losses.Loss; +import org.tensorflow.framework.losses.impl.AbstractLoss; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; /** - * A Regularizer call wrapped as a Loss instance + * A AbstractRegularizer call wrapped as a AbstractLoss instance * *

    This class facilitates using a regularizer as a loss, only sampleWeights are * regularized. */ -class RegularizerLoss extends Loss { +class RegularizerLoss extends AbstractLoss { - private final Regularizer regularizer; + private final AbstractRegularizer regularizer; /** - * Creates a Loss using {@link Class#getSimpleName()} as the name and a Loss Reduction of {@link - * Loss#REDUCTION_DEFAULT} + * Creates a AbstractLoss using {@link Class#getSimpleName()} as the name and a AbstractLoss + * Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} * - * @param tf the TensorFlow Ops * @param regularizer the regularizer used to calculate the loss */ - public RegularizerLoss(Ops tf, Regularizer regularizer) { - this(tf, null, regularizer); + public RegularizerLoss(AbstractRegularizer regularizer) { + this(null, regularizer); } /** - * Creates a Loss using a Loss Reduction of {@link Loss#REDUCTION_DEFAULT} + * Creates a AbstractLoss using a AbstractLoss Reduction of {@link AbstractLoss#REDUCTION_DEFAULT} * - * @param tf the TensorFlow Ops - * @param name the name of this Loss, if null the name will be {@link Class#getSimpleName()}. + * @param name the name of this AbstractLoss, if null the name will be {@link + * Class#getSimpleName()}. * @param regularizer the regularizer used to calculate the loss */ - public RegularizerLoss(Ops tf, String name, Regularizer regularizer) { - super(tf, name); + public RegularizerLoss(String name, AbstractRegularizer regularizer) { + super(name); this.regularizer = regularizer; } /** {@inheritDoc} */ @Override public Operand call( - Operand labels, Operand predictions, Operand sampleWeights) { + Ops tf, Operand labels, Operand predictions, Operand sampleWeights) { if (sampleWeights == null) { throw new IllegalArgumentException("sampleWeights cannot be null"); } - return regularizer.call(sampleWeights); + return regularizer.call(tf, sampleWeights); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ELUTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ELUTest.java index 914b94dfada..9f3fa75e95d 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ELUTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ELUTest.java @@ -14,36 +14,17 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertThrows; - -/** @author Jim Clarke */ public class ELUTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public ELUTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - - - /** Test of ELU call method */ @Test public void testCallFloat() { @@ -52,8 +33,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ELU instance = new ELU<>(tf); - Operand result = instance.call(tf.constant(input)); + ELU instance = new ELU<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -66,8 +47,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ELU instance = new ELU<>(tf); - Operand result = instance.call(tf.constant(input)); + ELU instance = new ELU<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -80,8 +61,8 @@ public void testAlpha() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ELU instance = new ELU<>(tf, 2.0f); - Operand result = instance.call(tf.constant(input)); + ELU instance = new ELU<>(2.0f); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ExponentialTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ExponentialTest.java index 1157c582168..f82c19987d1 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ExponentialTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ExponentialTest.java @@ -14,35 +14,17 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertThrows; - /** @author Jim Clarke */ public class ExponentialTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public ExponentialTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - - - /** Test of Exponential call method. */ @Test public void testCallFloat() { @@ -60,8 +42,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Exponential instance = new Exponential<>(tf); - Operand result = instance.call(tf.constant(input)); + Exponential instance = new Exponential<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -78,8 +60,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Exponential instance = new Exponential<>(tf); - Operand result = instance.call(tf.constant(input)); + Exponential instance = new Exponential<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/HardSigmoidTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/HardSigmoidTest.java index 35f57c47f66..0e32201c3e6 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/HardSigmoidTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/HardSigmoidTest.java @@ -14,35 +14,17 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertThrows; - /** @author Jim Clarke */ public class HardSigmoidTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public HardSigmoidTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - - - /** Test of HardSigmoid call method. */ @Test public void testCallFloat() { @@ -51,8 +33,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - HardSigmoid instance = new HardSigmoid<>(tf); - Operand result = instance.call(tf.constant(input)); + HardSigmoid instance = new HardSigmoid<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -65,8 +47,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - HardSigmoid instance = new HardSigmoid<>(tf); - Operand result = instance.call(tf.constant(input)); + HardSigmoid instance = new HardSigmoid<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/LinearTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/LinearTest.java index 7974035c680..817940688e8 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/LinearTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/LinearTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; @@ -26,20 +26,6 @@ public class LinearTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public LinearTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of Linear call method. */ @Test public void testCallInt() { @@ -48,8 +34,8 @@ public void testCallInt() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Linear instance = new Linear<>(tf); - Operand result = instance.call(tf.constant(input)); + Linear instance = new Linear<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -62,8 +48,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Linear instance = new Linear<>(tf); - Operand result = instance.call(tf.constant(input)); + Linear instance = new Linear<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -76,8 +62,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Linear instance = new Linear<>(tf); - Operand result = instance.call(tf.constant(input)); + Linear instance = new Linear<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ReLUTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ReLUTest.java index a0aa2c4b453..94f803d6b1c 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ReLUTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ReLUTest.java @@ -14,30 +14,20 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; -import org.tensorflow.types.*; +import org.tensorflow.types.TFloat16; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; /** @author Jim Clarke */ public class ReLUTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public ReLUTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of ReLU call method */ @Test public void testCallFloat() { @@ -46,8 +36,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(tf); - Operand result = instance.call(tf.constant(input)); + ReLU instance = new ReLU<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -60,8 +50,8 @@ public void testCallInt() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(tf); - Operand result = instance.call(tf.constant(input)); + ReLU instance = new ReLU<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -74,8 +64,8 @@ public void testCallLong() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(tf); - Operand result = instance.call(tf.constant(input)); + ReLU instance = new ReLU<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -88,9 +78,9 @@ public void testCallFloat16() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(tf); + ReLU instance = new ReLU<>(); Operand result = - instance.call(tf.dtypes.cast(tf.constant(input), TFloat16.class)); + instance.call(tf, tf.dtypes.cast(tf.constant(input), TFloat16.class)); session.evaluate(tf.dtypes.cast(tf.constant(expected), TFloat16.class), result); } } @@ -103,8 +93,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(tf); - Operand result = instance.call(tf.constant(input)); + ReLU instance = new ReLU<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -112,12 +102,12 @@ public void testCallDouble() { @Test public void testAlpha() { double[] input = {-10., -5., 0.0, 5., 10.}; - double[] expected = {-5. , -2.5, 0., 5., 10.}; + double[] expected = {-5., -2.5, 0., 5., 10.}; for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(tf, 0.5f, ReLU.MAX_VALUE_DEFAULT, ReLU.THRESHOLD_DEFAULT); - Operand result = instance.call(tf.constant(input)); + ReLU instance = new ReLU<>(0.5f, ReLU.MAX_VALUE_DEFAULT, ReLU.THRESHOLD_DEFAULT); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -129,8 +119,8 @@ public void testMaxValue() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(tf, ReLU.ALPHA_DEFAULT, 5, ReLU.THRESHOLD_DEFAULT); - Operand result = instance.call(tf.constant(input)); + ReLU instance = new ReLU<>(ReLU.ALPHA_DEFAULT, 5, ReLU.THRESHOLD_DEFAULT); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -138,12 +128,12 @@ public void testMaxValue() { @Test public void testThreshold() { double[] input = {-10., -5., 0.0, 5., 10.}; - double[] expected = {-0., -0., 0., 0., 10.}; + double[] expected = {-0., -0., 0., 0., 10.}; for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(tf, ReLU.ALPHA_DEFAULT, ReLU.MAX_VALUE_DEFAULT, 5.0f); - Operand result = instance.call(tf.constant(input)); + ReLU instance = new ReLU<>(ReLU.ALPHA_DEFAULT, ReLU.MAX_VALUE_DEFAULT, 5.0f); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SELUTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SELUTest.java index 8bad6f1f066..df1cfb9bd05 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SELUTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SELUTest.java @@ -14,35 +14,17 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertThrows; - /** @author Jim Clarke */ public class SELUTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public SELUTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - - - /** Test of SELU call method */ @Test public void testCallFloat() { @@ -53,8 +35,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - SELU instance = new SELU<>(tf); - Operand result = instance.call(tf.constant(input)); + SELU instance = new SELU<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -64,15 +46,20 @@ public void testCallFloat() { public void testCallDouble() { double[] input = {1, -2, 3, -4, -1, 2, -3, 4}; double[] expected = { - 1.0507009873554805, -1.520166468595695, 3.1521029620664414, - -1.7258986281898947, -1.1113307378125628, 2.101401974710961, - -1.670568728767112, 4.202803949421922, + 1.0507009873554805, + -1.520166468595695, + 3.1521029620664414, + -1.7258986281898947, + -1.1113307378125628, + 2.101401974710961, + -1.670568728767112, + 4.202803949421922, }; for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - SELU instance = new SELU<>(tf); - Operand result = instance.call(tf.constant(input)); + SELU instance = new SELU<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SigmoidTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SigmoidTest.java index 9dca622c3ec..0c59eeaba6e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SigmoidTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SigmoidTest.java @@ -14,34 +14,17 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertThrows; - /** @author Jim Clarke */ public class SigmoidTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public SigmoidTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - - /** Test of Sigmoid call method */ @Test public void testCallFloat() { @@ -59,8 +42,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Sigmoid instance = new Sigmoid<>(tf); - Operand result = instance.call(tf.constant(input)); + Sigmoid instance = new Sigmoid<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -77,8 +60,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Sigmoid instance = new Sigmoid<>(tf); - Operand result = instance.call(tf.constant(input)); + Sigmoid instance = new Sigmoid<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftmaxTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftmaxTest.java index 05ec3a4f716..aeb971905a2 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftmaxTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftmaxTest.java @@ -14,35 +14,18 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertThrows; - /** @author Jim Clarke */ public class SoftmaxTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public SoftmaxTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - - /** Test of Softmax method, of class Activations. */ @Test public void testSoftmaxOpsOperandFloat() { @@ -54,8 +37,8 @@ public void testSoftmaxOpsOperandFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(tf); - Operand result = instance.call(tf.constant(input)); + Softmax instance = new Softmax<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -71,8 +54,8 @@ public void testSoftmaxOpsOperandDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(tf); - Operand result = instance.call(tf.constant(input)); + Softmax instance = new Softmax<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -88,8 +71,8 @@ public void testSoftmaxOpsOperandDoubleNegative() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(tf); - Operand result = instance.call(tf.constant(input)); + Softmax instance = new Softmax<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -99,14 +82,14 @@ public void testSoftmaxOpsOperandDoubleNegative() { public void testSoftmax1D() { double[] input = {1, -2, 3, -4, -5, 6, 7, 8}; double[] expected = { - 6.0352829e-04, 3.0047902e-05, 4.4595040e-03, 4.0665414e-06, - 1.4959969e-06, 8.9571528e-02, 2.4348068e-01, 6.6184908e-01 + 6.0352829e-04, 3.0047902e-05, 4.4595040e-03, 4.0665414e-06, + 1.4959969e-06, 8.9571528e-02, 2.4348068e-01, 6.6184908e-01 }; for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(tf); - Operand result = instance.call(tf.constant(input)); + Softmax instance = new Softmax<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } @@ -116,14 +99,14 @@ public void testSoftmax1D() { public void testSoftmax3D() { double[][][] input = {{{1, -2}, {3, -4}}, {{-5, 6}, {-7, 8}}}; double[][][] expected = { - {{9.5257413e-01, 4.7425874e-02}, {9.9908900e-01, 9.1105123e-04}}, - {{1.6701422e-05, 9.9998331e-01}, {3.0590220e-07, 9.9999964e-01}} + {{9.5257413e-01, 4.7425874e-02}, {9.9908900e-01, 9.1105123e-04}}, + {{1.6701422e-05, 9.9998331e-01}, {3.0590220e-07, 9.9999964e-01}} }; for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(tf); - Operand result = instance.call(tf.constant(input)); + Softmax instance = new Softmax<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftplusTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftplusTest.java index a17f2650d62..e896807d9f7 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftplusTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftplusTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; @@ -26,20 +26,6 @@ public class SoftplusTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public SoftplusTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of Softplus call method */ @Test public void testCallFloat() { @@ -50,8 +36,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softplus instance = new Softplus<>(tf); - Operand result = instance.call(tf.constant(input)); + Softplus instance = new Softplus<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -68,8 +54,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softplus instance = new Softplus<>(tf); - Operand result = instance.call(tf.constant(input)); + Softplus instance = new Softplus<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftsignTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftsignTest.java index 43591ab4761..2f9a17caf59 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftsignTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftsignTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; @@ -26,20 +26,6 @@ public class SoftsignTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public SoftsignTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of Softsign call method */ @Test public void testCallFloat() { @@ -48,8 +34,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softsign instance = new Softsign<>(tf); - Operand result = instance.call(tf.constant(input)); + Softsign instance = new Softsign<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -71,8 +57,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softsign instance = new Softsign<>(tf); - Operand result = instance.call(tf.constant(input)); + Softsign instance = new Softsign<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SwishTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SwishTest.java index 7576789320b..8dabfaf379a 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SwishTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SwishTest.java @@ -14,35 +14,17 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertThrows; - /** @author Jim Clarke */ public class SwishTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public SwishTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - - - /** Test of Swish call method */ @Test public void testCallFloat() { @@ -60,8 +42,8 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Swish instance = new Swish<>(tf); - Operand result = instance.call(tf.constant(input)); + Swish instance = new Swish<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -83,8 +65,8 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Swish instance = new Swish<>(tf); - Operand result = instance.call(tf.constant(input)); + Swish instance = new Swish<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/TanhTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/TanhTest.java index 5162e141c44..696f96a367e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/TanhTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/TanhTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.activations; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; @@ -25,35 +25,25 @@ public class TanhTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public TanhTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of Tanh call method. */ @Test public void testCallFloat() { float[] input = {1, -2, 3, -4, -5, 6, -7, 8}; float[] expected = { - 0.76159416F, -0.96402758F, - 0.99505475F, -0.9993293F, - -0.9999092F, 0.99998771F, - -0.99999834F, 0.99999977F + 0.76159416F, + -0.96402758F, + 0.99505475F, + -0.9993293F, + -0.9999092F, + 0.99998771F, + -0.99999834F, + 0.99999977F }; for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Tanh instance = new Tanh<>(tf); - Operand result = instance.call(tf.constant(input)); + Tanh instance = new Tanh<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } @@ -63,16 +53,20 @@ public void testCallFloat() { public void testCallDouble() { double[] input = {1, -2, 3, -4, -5, 6, -7, 8}; double[] expected = { - 0.76159416, -0.96402758, - 0.99505475, -0.9993293, - -0.9999092, 0.99998771, - -0.99999834, 0.99999977 + 0.76159416, + -0.96402758, + 0.99505475, + -0.9993293, + -0.9999092, + 0.99998771, + -0.99999834, + 0.99999977 }; for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Tanh instance = new Tanh<>(tf); - Operand result = instance.call(tf.constant(input)); + Tanh instance = new Tanh<>(); + Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/MaxNormTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/MaxNormTest.java index 1f80388e88f..c4f8f0ee89e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/MaxNormTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/MaxNormTest.java @@ -1,5 +1,7 @@ package org.tensorflow.framework.constraints; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -7,9 +9,6 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; - class MaxNormTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -35,8 +34,8 @@ public void testCall() { for (AtomicInteger i = new AtomicInteger(); i.get() < testValues.length; i.getAndIncrement()) { - MaxNorm instance = new MaxNorm(tf, testValues[i.get()]); - Operand result = instance.call(weights); + MaxNorm instance = new MaxNorm(testValues[i.get()]); + Operand result = instance.call(tf, weights); session.evaluate(result, v -> v.floatValue() <= testValues[i.get()]); } } @@ -47,13 +46,13 @@ public void testCall1() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - MaxNorm instance = new MaxNorm(tf, 2.0); + MaxNorm instance = new MaxNorm(2.0); Operand weights = tf.constant( new float[][] { {0, 1, 3, 3}, {0, 0, 0, 3}, {0, 0, 0, 3}, }); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); float[] expected = { 0, 1, 2, 1.1547005f, 0, 0, 0, 1.1547005f, diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/MinMaxNormTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/MinMaxNormTest.java index 8c2c3a54ff9..0d127b35b01 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/MinMaxNormTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/MinMaxNormTest.java @@ -1,5 +1,7 @@ package org.tensorflow.framework.constraints; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.ND; @@ -10,9 +12,6 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; - class MinMaxNormTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -39,8 +38,8 @@ public void testCall() { for (AtomicInteger i = new AtomicInteger(); i.get() < testValues.length; i.getAndIncrement()) { - MinMaxNorm instance = new MinMaxNorm(tf, testValues[i.get()], testValues[i.get()] * 2); - Operand result = instance.call(weights); + MinMaxNorm instance = new MinMaxNorm(testValues[i.get()], testValues[i.get()] * 2); + Operand result = instance.call(tf, weights); if (tfMode == TestSession.Mode.EAGER) evaluate(session, result.asTensor(), testValues[i.get()]); else diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/NonNegTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/NonNegTest.java index 6a6fdc13536..1a24c188860 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/NonNegTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/NonNegTest.java @@ -17,8 +17,8 @@ public void testTFloat32() { Ops tf = session.getTF(); float[][] array = {{-1, 2, -3, 4}, {-10, 11, 12, -13}}; Operand weights = tf.constant(array); - NonNeg instance = new NonNeg(tf); - Operand result = instance.call(weights); + NonNeg instance = new NonNeg(); + Operand result = instance.call(tf, weights); float[] expected = {0, 2, 0, 4, 0, 11, 12, 0}; session.evaluate(expected, result); } @@ -31,8 +31,8 @@ public void testTFloat64() { Ops tf = session.getTF(); final double[][] array = {{-1, 2, -3, 4}, {-10, 11, 12, -13}}; Operand weights = tf.constant(array); - NonNeg instance = new NonNeg(tf); - Operand result = instance.call(weights); + NonNeg instance = new NonNeg(); + Operand result = instance.call(tf, weights); double[] expected = {0, 2, 0, 4, 0, 11, 12, 0}; session.evaluate(expected, result); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/UnitNormTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/UnitNormTest.java index 6437ebcd760..9c784b7f31e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/UnitNormTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/constraints/UnitNormTest.java @@ -28,8 +28,8 @@ public void testTFloat32() { }; Operand weights = tf.constant(array); - UnitNorm instance = new UnitNorm(tf, 1); - Operand result = instance.call(weights); + UnitNorm instance = new UnitNorm(1); + Operand result = instance.call(tf, weights); Operand expected = tf.constant(expectedArray); session.evaluate(expected, result); } @@ -50,9 +50,9 @@ public void testCallTFloat64() { {{0.72920675, 0.40984813, 0.55712338}, {0.68429305, 0.91215323, 0.83042956}}, {{0.97694125, 0.99972269, 0.13576831}, {0.21350717, 0.02353181, 0.99074035}} }; - UnitNorm instance = new UnitNorm(tf, 1); + UnitNorm instance = new UnitNorm(1); Operand weights = tf.constant(array); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); Operand expected = tf.constant(expectedArray); session.evaluate(expected, result); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/ConstantTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/ConstantTest.java index 4e81e0620e6..5907deae547 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/ConstantTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/ConstantTest.java @@ -14,35 +14,27 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; + +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; -import org.tensorflow.types.*; - -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; +import org.tensorflow.types.TBool; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.TString; +import org.tensorflow.types.TUint8; /** Test the Constant initializer */ public class ConstantTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public ConstantTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class Constant. */ @Test public void testCallUInt() { @@ -51,8 +43,9 @@ public void testCallUInt() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Constant instance = new Constant<>(tf, 0xf); - Operand operand = instance.call(tf.constant(shape), TUint8.class); + Constant instance = new Constant<>(0xf); + + Operand operand = instance.call(tf, tf.constant(shape), TUint8.class); session.evaluate(expected, operand); } } @@ -67,8 +60,9 @@ public void testCallInt() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Constant instance = new Constant<>(tf, 0xf); - Operand operand = instance.call(tf.constant(shape), TInt32.class); + Constant instance = new Constant<>(0xf); + + Operand operand = instance.call(tf, tf.constant(shape), TInt32.class); session.evaluate(expected, operand); } } @@ -83,8 +77,9 @@ public void testCallLong() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Constant instance = new Constant<>(tf, 0xffL); - Operand operand = instance.call(tf.constant(shape), TInt64.class); + Constant instance = new Constant<>(0xffL); + + Operand operand = instance.call(tf, tf.constant(shape), TInt64.class); session.evaluate(expected, operand); } } @@ -97,8 +92,9 @@ public void testCallFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Constant instance = new Constant<>(tf, 12.F); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + Constant instance = new Constant<>(12.F); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -112,8 +108,9 @@ public void testCallDouble() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Constant instance = new Constant<>(tf, 11.); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + Constant instance = new Constant<>(11.); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -129,8 +126,9 @@ public void testCallString() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Constant instance = new Constant<>(tf, 22); - instance.call(tf.constant(shape), TString.class); + Constant instance = new Constant<>(22); + + instance.call(tf, tf.constant(shape), TString.class); fail("IllegalArgumentException should have been thrown for TString"); } }); @@ -145,8 +143,9 @@ public void testCallBool() { Shape shape = Shape.of(2, 2); Boolean[] expected = {true, true, true, true}; - Constant instance = new Constant<>(tf, true); - Operand operand = instance.call(tf.constant(shape), TBool.class); + Constant instance = new Constant<>(true); + + Operand operand = instance.call(tf, tf.constant(shape), TBool.class); session.evaluate(expected, operand); } } @@ -158,9 +157,10 @@ public void testReproducible() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Constant instance = new Constant<>(tf, 11.); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Constant instance = new Constant<>(11.); + + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/GlorotTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/GlorotTest.java index e9769806928..166011c3b64 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/GlorotTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/GlorotTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.VarianceScaling.Distribution; import org.tensorflow.framework.utils.TestSession; @@ -29,20 +29,6 @@ public class GlorotTest { private static final long SEED = 1000L; private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public GlorotTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class Glorot. */ @Test public void testCallNormalFloat() { @@ -51,9 +37,9 @@ public void testCallNormalFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Glorot instance = new Glorot<>(tf, Distribution.TRUNCATED_NORMAL, SEED); + Glorot instance = new Glorot<>(Distribution.TRUNCATED_NORMAL, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -68,8 +54,9 @@ public void testCallNormalDouble() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Glorot instance = new Glorot<>(tf, Distribution.TRUNCATED_NORMAL, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + Glorot instance = new Glorot<>(Distribution.TRUNCATED_NORMAL, SEED); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -82,8 +69,9 @@ public void testCallUniformFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Glorot instance = new Glorot<>(tf, Distribution.UNIFORM, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + Glorot instance = new Glorot<>(Distribution.UNIFORM, SEED); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -97,8 +85,9 @@ public void testCallUniformDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Glorot instance = new Glorot<>(tf, Distribution.UNIFORM, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + Glorot instance = new Glorot<>(Distribution.UNIFORM, SEED); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -109,9 +98,10 @@ public void testCallNormalReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Glorot instance = new Glorot<>(tf, Distribution.TRUNCATED_NORMAL, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Glorot instance = new Glorot<>(Distribution.TRUNCATED_NORMAL, SEED); + + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -122,9 +112,10 @@ public void testCallUniformReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Glorot instance = new Glorot<>(tf, Distribution.UNIFORM, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Glorot instance = new Glorot<>(Distribution.UNIFORM, SEED); + + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -135,10 +126,10 @@ public void testCallNORMALReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Glorot instance = - new Glorot<>(tf, Distribution.NORMAL, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Glorot instance = new Glorot<>(Distribution.NORMAL, SEED); + + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/HeTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/HeTest.java index 8953fa3005e..7b183358f85 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/HeTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/HeTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.VarianceScaling.Distribution; import org.tensorflow.framework.utils.TestSession; @@ -29,20 +29,6 @@ public class HeTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; int counter; - public HeTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class He. */ @Test public void testCallNormalFloat() { @@ -51,8 +37,9 @@ public void testCallNormalFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - He instance = new He<>(tf, Distribution.TRUNCATED_NORMAL, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + He instance = new He<>(Distribution.TRUNCATED_NORMAL, SEED); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -66,8 +53,9 @@ public void testCallNormalDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - He instance = new He<>(tf, Distribution.TRUNCATED_NORMAL, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + He instance = new He<>(Distribution.TRUNCATED_NORMAL, SEED); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -80,8 +68,9 @@ public void testCallFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - He instance = new He<>(tf, Distribution.UNIFORM, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + He instance = new He<>(Distribution.UNIFORM, SEED); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -95,8 +84,9 @@ public void testCallDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - He instance = new He<>(tf, Distribution.UNIFORM, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + He instance = new He<>(Distribution.UNIFORM, SEED); + + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -107,9 +97,10 @@ public void testCallNormalReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - He instance = new He<>(tf, Distribution.TRUNCATED_NORMAL, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + He instance = new He<>(Distribution.TRUNCATED_NORMAL, SEED); + + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -120,9 +111,10 @@ public void testCallUniformReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - He instance = new He<>(tf, Distribution.UNIFORM, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + He instance = new He<>(Distribution.UNIFORM, SEED); + + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -133,9 +125,10 @@ public void testCallNORMALReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - He instance = new He<>(tf, Distribution.NORMAL, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + He instance = new He<>(Distribution.NORMAL, SEED); + + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/IdentityTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/IdentityTest.java index 6eee5473937..3f5c6cdb363 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/IdentityTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/IdentityTest.java @@ -14,37 +14,19 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import org.tensorflow.types.TInt32; - -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; /** Test the Identity initializer */ public class IdentityTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public IdentityTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class Constant. */ @Test public void testCallFloat() { @@ -64,8 +46,8 @@ public void testCallFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(10, 10); - Identity instance = new Identity<>(tf, 2.); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + Identity instance = new Identity<>(2.); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -90,8 +72,8 @@ public void testCallDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(10, 10); - Identity instance = new Identity<>(tf, 2.); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + Identity instance = new Identity<>(2.); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -103,9 +85,9 @@ public void testReproducible() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Identity instance = new Identity<>(tf, 2.); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Identity instance = new Identity<>(2.); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/LeCunTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/LeCunTest.java index 336850a5549..8858bac13dd 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/LeCunTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/LeCunTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.VarianceScaling.Distribution; import org.tensorflow.framework.utils.TestSession; @@ -29,20 +29,6 @@ public class LeCunTest { private static final long SEED = 1000L; private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public LeCunTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class LeCun. */ @Test public void testCallNormalFloat() { @@ -51,8 +37,8 @@ public void testCallNormalFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - LeCun instance = new LeCun<>(tf, Distribution.TRUNCATED_NORMAL, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + LeCun instance = new LeCun<>(Distribution.TRUNCATED_NORMAL, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -66,8 +52,8 @@ public void testCallNormalDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - LeCun instance = new LeCun<>(tf, Distribution.TRUNCATED_NORMAL, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + LeCun instance = new LeCun<>(Distribution.TRUNCATED_NORMAL, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -80,8 +66,8 @@ public void testCallUniformFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - LeCun instance = new LeCun<>(tf, Distribution.UNIFORM, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + LeCun instance = new LeCun<>(Distribution.UNIFORM, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -95,8 +81,8 @@ public void testCallUniformDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - LeCun instance = new LeCun<>(tf, Distribution.UNIFORM, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + LeCun instance = new LeCun<>(Distribution.UNIFORM, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -107,9 +93,9 @@ public void testCallNormalReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - LeCun instance = new LeCun<>(tf, Distribution.TRUNCATED_NORMAL, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + LeCun instance = new LeCun<>(Distribution.TRUNCATED_NORMAL, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -120,9 +106,9 @@ public void testCallUniformReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - LeCun instance = new LeCun<>(tf, Distribution.UNIFORM, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + LeCun instance = new LeCun<>(Distribution.UNIFORM, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -133,9 +119,9 @@ public void testCallNORMALReproducible() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - LeCun instance = new LeCun<>(tf, Distribution.NORMAL, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + LeCun instance = new LeCun<>(Distribution.NORMAL, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/OnesTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/OnesTest.java index 053ba5dd7ff..0bb0498e0cb 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/OnesTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/OnesTest.java @@ -14,35 +14,27 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; + +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; -import org.tensorflow.types.*; - -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; +import org.tensorflow.types.TBool; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.TString; +import org.tensorflow.types.TUint8; /** Test the Ones initializer */ public class OnesTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public OnesTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class Ones. */ @Test public void testCallUInt() { @@ -51,8 +43,8 @@ public void testCallUInt() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Ones instance = new Ones<>(tf); - Operand operand = instance.call(tf.constant(shape), TUint8.class); + Ones instance = new Ones<>(); + Operand operand = instance.call(tf, tf.constant(shape), TUint8.class); session.evaluate(expected, operand); } } @@ -65,8 +57,8 @@ public void testCallInt() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Ones instance = new Ones<>(tf); - Operand operand = instance.call(tf.constant(shape), TInt32.class); + Ones instance = new Ones<>(); + Operand operand = instance.call(tf, tf.constant(shape), TInt32.class); session.evaluate(expected, operand); } } @@ -79,8 +71,8 @@ public void testCallLong() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Ones instance = new Ones<>(tf); - Operand operand = instance.call(tf.constant(shape), TInt64.class); + Ones instance = new Ones<>(); + Operand operand = instance.call(tf, tf.constant(shape), TInt64.class); session.evaluate(expected, operand); } } @@ -93,8 +85,8 @@ public void testCallFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Ones instance = new Ones<>(tf); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + Ones instance = new Ones<>(); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -108,8 +100,8 @@ public void testCallDouble() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Ones instance = new Ones<>(tf); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + Ones instance = new Ones<>(); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -125,8 +117,8 @@ public void testCallString() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Ones instance = new Ones<>(tf); - instance.call(tf.constant(shape), TString.class); + Ones instance = new Ones<>(); + instance.call(tf, tf.constant(shape), TString.class); fail("IllegalArgumentException should have been thrown for TString"); } }); @@ -140,8 +132,8 @@ public void testCallBool() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Ones instance = new Ones<>(tf); - Operand operand = instance.call(tf.constant(shape), TBool.class); + Ones instance = new Ones<>(); + Operand operand = instance.call(tf, tf.constant(shape), TBool.class); session.evaluate(expected, operand); } } @@ -153,9 +145,23 @@ public void testReproducible() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Ones instance = new Ones<>(tf); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Ones instance = new Ones<>(); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); + session.evaluate(operand1, operand2); + } + } + + @Test + public void testFunctional() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + Shape shape = Shape.of(2, 2); + + Initializer instance = (ltf, dims, type) -> ltf.ones(dims, type); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/OrthogonalTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/OrthogonalTest.java index 22b89d9177c..c933e669dfd 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/OrthogonalTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/OrthogonalTest.java @@ -14,17 +14,13 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import org.tensorflow.types.TInt32; - -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; /** Test the Orthogonal initializer */ public class OrthogonalTest { @@ -33,20 +29,6 @@ public class OrthogonalTest { private static final double GAIN_VALUE = 1.0; private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public OrthogonalTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class Orthogonal. */ @Test public void testCallFloat() { @@ -156,8 +138,8 @@ public void testCallFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(10, 10); - Orthogonal instance = new Orthogonal<>(tf, GAIN_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + Orthogonal instance = new Orthogonal<>(GAIN_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -271,8 +253,8 @@ public void testCallDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(10, 10); - Orthogonal instance = new Orthogonal<>(tf, GAIN_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + Orthogonal instance = new Orthogonal<>(GAIN_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -284,9 +266,9 @@ public void testReproducible() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Orthogonal instance = new Orthogonal<>(tf, GAIN_VALUE, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Orthogonal instance = new Orthogonal<>(GAIN_VALUE, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/RandomNormalTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/RandomNormalTest.java index 3b2b3bdb243..dada058af42 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/RandomNormalTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/RandomNormalTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; @@ -30,20 +30,6 @@ public class RandomNormalTest { private static final double STDDEV_VALUE = 3.0; private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public RandomNormalTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class RandomNormal. */ @Test public void testCalltestSoftmaxFloat() { @@ -52,9 +38,8 @@ public void testCalltestSoftmaxFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - RandomNormal instance = - new RandomNormal<>(tf, MEAN_VALUE, STDDEV_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + RandomNormal instance = new RandomNormal<>(MEAN_VALUE, STDDEV_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -68,9 +53,8 @@ public void testCalltestSoftmaxDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - RandomNormal instance = - new RandomNormal<>(tf, MEAN_VALUE, STDDEV_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + RandomNormal instance = new RandomNormal<>(MEAN_VALUE, STDDEV_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -82,10 +66,9 @@ public void testReproducible() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - RandomNormal instance = - new RandomNormal<>(tf, MEAN_VALUE, STDDEV_VALUE, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + RandomNormal instance = new RandomNormal<>(MEAN_VALUE, STDDEV_VALUE, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/RandomUniformTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/RandomUniformTest.java index 23e26083a9b..1a1b3f755b7 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/RandomUniformTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/RandomUniformTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; @@ -31,20 +31,6 @@ public class RandomUniformTest { private static final double MAX_VALUE = 10.0; private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public RandomUniformTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class RandomUniform. */ @Test public void testCallInt() { @@ -53,9 +39,8 @@ public void testCallInt() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - RandomUniform instance = - new RandomUniform<>(tf, MIN_VALUE, MAX_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TInt32.class); + RandomUniform instance = new RandomUniform<>(MIN_VALUE, MAX_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TInt32.class); session.evaluate(expected, operand); } } @@ -68,9 +53,8 @@ public void testCallFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - RandomUniform instance = - new RandomUniform<>(tf, MIN_VALUE, MAX_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + RandomUniform instance = new RandomUniform<>(MIN_VALUE, MAX_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -84,9 +68,8 @@ public void testCallDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - RandomUniform instance = - new RandomUniform<>(tf, MIN_VALUE, MAX_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + RandomUniform instance = new RandomUniform<>(MIN_VALUE, MAX_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -98,10 +81,9 @@ public void testReproducible() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - RandomUniform instance = - new RandomUniform<>(tf, MIN_VALUE, MAX_VALUE, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + RandomUniform instance = new RandomUniform<>(MIN_VALUE, MAX_VALUE, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/TruncatedNormalTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/TruncatedNormalTest.java index 96bf915e199..6ea19fde349 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/TruncatedNormalTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/TruncatedNormalTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; @@ -30,20 +30,6 @@ public class TruncatedNormalTest { private static final double STDDEV_VALUE = 3.0; private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public TruncatedNormalTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class TruncatedNormal. */ @Test public void testCallFloat() { @@ -52,9 +38,8 @@ public void testCallFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - TruncatedNormal instance = - new TruncatedNormal<>(tf, MEAN_VALUE, STDDEV_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + TruncatedNormal instance = new TruncatedNormal<>(MEAN_VALUE, STDDEV_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -68,9 +53,8 @@ public void testCallDouble() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - TruncatedNormal instance = - new TruncatedNormal<>(tf, MEAN_VALUE, STDDEV_VALUE, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + TruncatedNormal instance = new TruncatedNormal<>(MEAN_VALUE, STDDEV_VALUE, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -82,10 +66,9 @@ public void testReproducible() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - TruncatedNormal instance = - new TruncatedNormal<>(tf, MEAN_VALUE, STDDEV_VALUE, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + TruncatedNormal instance = new TruncatedNormal<>(MEAN_VALUE, STDDEV_VALUE, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/VarianceScalingTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/VarianceScalingTest.java index 159affb07e2..56aa95ecf73 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/VarianceScalingTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/VarianceScalingTest.java @@ -14,7 +14,7 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; @@ -28,20 +28,6 @@ public class VarianceScalingTest { private static final long SEED = 1000L; private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public VarianceScalingTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class VarianceScaling. */ @Test public void testCallFloat1FanInTruncatedNormal() { @@ -52,12 +38,11 @@ public void testCallFloat1FanInTruncatedNormal() { Shape shape = Shape.of(2, 2); VarianceScaling instance = new VarianceScaling<>( - tf, 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.TRUNCATED_NORMAL, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -73,12 +58,11 @@ public void testCallDouble1FanInTruncatedNormal() { Shape shape = Shape.of(2, 2); VarianceScaling instance = new VarianceScaling<>( - tf, 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.TRUNCATED_NORMAL, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -93,12 +77,8 @@ public void testCallFloat1FanInNormal() { Shape shape = Shape.of(2, 2); VarianceScaling instance = new VarianceScaling<>( - tf, - 1.0, - VarianceScaling.Mode.FAN_IN, - VarianceScaling.Distribution.NORMAL, - SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.NORMAL, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -114,12 +94,8 @@ public void testCalltestSoftmaxDouble1FanInNormal() { Shape shape = Shape.of(2, 2); VarianceScaling instance = new VarianceScaling<>( - tf, - 1.0, - VarianceScaling.Mode.FAN_IN, - VarianceScaling.Distribution.NORMAL, - SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.NORMAL, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -134,8 +110,8 @@ public void testCalltestSoftmaxFloat1FanInUNIFORM() { Shape shape = Shape.of(2, 2); VarianceScaling instance = new VarianceScaling<>( - tf, 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.UNIFORM, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.UNIFORM, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -151,8 +127,8 @@ public void testCalltestSoftmaxDouble1FanInUNIFORM() { Shape shape = Shape.of(2, 2); VarianceScaling instance = new VarianceScaling<>( - tf, 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.UNIFORM, SEED); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.UNIFORM, SEED); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -166,9 +142,9 @@ public void testReproducible1() { VarianceScaling instance = new VarianceScaling<>( - tf, 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.UNIFORM, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.UNIFORM, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -182,13 +158,9 @@ public void testReproducible2() { VarianceScaling instance = new VarianceScaling<>( - tf, - 1.0, - VarianceScaling.Mode.FAN_IN, - VarianceScaling.Distribution.NORMAL, - SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + 1.0, VarianceScaling.Mode.FAN_IN, VarianceScaling.Distribution.NORMAL, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -202,13 +174,12 @@ public void testReproducible3() { VarianceScaling instance = new VarianceScaling<>( - tf, 1.0, VarianceScaling.Mode.FAN_OUT, VarianceScaling.Distribution.TRUNCATED_NORMAL, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } @@ -222,9 +193,9 @@ public void testReproducible4() { VarianceScaling instance = new VarianceScaling<>( - tf, 1.0, VarianceScaling.Mode.FAN_AVG, VarianceScaling.Distribution.UNIFORM, SEED); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + 1.0, VarianceScaling.Mode.FAN_AVG, VarianceScaling.Distribution.UNIFORM, SEED); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/ZerosTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/ZerosTest.java index 21bad6ff360..772baee1b61 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/ZerosTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/initializers/ZerosTest.java @@ -14,32 +14,24 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; -import org.tensorflow.types.*; +import org.tensorflow.types.TBool; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.TString; +import org.tensorflow.types.TUint8; /** Test the Zeros initializer */ public class ZerosTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - public ZerosTest() {} - - @BeforeAll - public static void setUpClass() {} - - @AfterAll - public static void tearDownClass() {} - - @BeforeEach - public void setUp() {} - - @AfterEach - public void tearDown() {} - /** Test of call method, of class Zeros. */ @Test public void testCallUInt() { @@ -48,8 +40,8 @@ public void testCallUInt() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Zeros instance = new Zeros<>(tf); - Operand operand = instance.call(tf.constant(shape), TUint8.class); + Zeros instance = new Zeros<>(); + Operand operand = instance.call(tf, tf.constant(shape), TUint8.class); session.evaluate(expected, operand); } } @@ -62,8 +54,8 @@ public void testCallInt() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Zeros instance = new Zeros<>(tf); - Operand operand = instance.call(tf.constant(shape), TInt32.class); + Zeros instance = new Zeros<>(); + Operand operand = instance.call(tf, tf.constant(shape), TInt32.class); session.evaluate(expected, operand); } } @@ -76,8 +68,8 @@ public void testCallLong() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Zeros instance = new Zeros<>(tf); - Operand operand = instance.call(tf.constant(shape), TInt64.class); + Zeros instance = new Zeros<>(); + Operand operand = instance.call(tf, tf.constant(shape), TInt64.class); session.evaluate(expected, operand); } } @@ -90,8 +82,8 @@ public void testCallFloat() { try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Zeros instance = new Zeros<>(tf); - Operand operand = instance.call(tf.constant(shape), TFloat32.class); + Zeros instance = new Zeros<>(); + Operand operand = instance.call(tf, tf.constant(shape), TFloat32.class); session.evaluate(expected, operand); } } @@ -105,8 +97,8 @@ public void testCallDouble() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Zeros instance = new Zeros<>(tf); - Operand operand = instance.call(tf.constant(shape), TFloat64.class); + Zeros instance = new Zeros<>(); + Operand operand = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(expected, operand); } } @@ -119,8 +111,8 @@ public void testCallString() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Zeros instance = new Zeros<>(tf); - Operand operand = instance.call(tf.constant(shape), TString.class); + Zeros instance = new Zeros<>(); + Operand operand = instance.call(tf, tf.constant(shape), TString.class); session.evaluateString(operand, String::isEmpty); } } @@ -134,8 +126,8 @@ public void testCallBool() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Zeros instance = new Zeros<>(tf); - Operand operand = instance.call(tf.constant(shape), TBool.class); + Zeros instance = new Zeros<>(); + Operand operand = instance.call(tf, tf.constant(shape), TBool.class); session.evaluate(expected, operand); } } @@ -147,9 +139,23 @@ public void testReproducible() { Ops tf = session.getTF(); Shape shape = Shape.of(2, 2); - Zeros instance = new Zeros<>(tf); - Operand operand1 = instance.call(tf.constant(shape), TFloat64.class); - Operand operand2 = instance.call(tf.constant(shape), TFloat64.class); + Zeros instance = new Zeros<>(); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); + session.evaluate(operand1, operand2); + } + } + + @Test + public void testFunctional() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + Shape shape = Shape.of(2, 2); + + Initializer instance = (ltf, dims, type) -> ltf.zeros(dims, type); + Operand operand1 = instance.call(tf, tf.constant(shape), TFloat64.class); + Operand operand2 = instance.call(tf, tf.constant(shape), TFloat64.class); session.evaluate(operand1, operand2); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/BinaryCrossentropyTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/BinaryCrossentropyTest.java index d2128b80839..d5afdfb0da4 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/BinaryCrossentropyTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/BinaryCrossentropyTest.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,8 +23,6 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; -import static org.junit.jupiter.api.Assertions.assertThrows; - public class BinaryCrossentropyTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -32,25 +32,24 @@ public void testAllCorrectUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - BinaryCrossentropy instance = new BinaryCrossentropy(tf); + BinaryCrossentropy instance = new BinaryCrossentropy(); + float[] trueArray = {1f, 0f, 0f, 0f, 1f, 0f, 0f, 0f, 1f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 3))); - Operand loss = instance.call(yTrue, yTrue); + Operand loss = instance.call(tf, yTrue, yTrue); float expected = 0.0f; testSession.evaluate(expected, loss); // Test with logits. float[] logitsArray = { - 100.0f, -100.0f, -100.0f, - -100.0f, 100.0f, -100.0f, - -100.0f, -100.0f, 100.0f + 100.0f, -100.0f, -100.0f, -100.0f, 100.0f, -100.0f, -100.0f, -100.0f, 100.0f }; Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new BinaryCrossentropy(tf, true); + instance = new BinaryCrossentropy(true); - loss = instance.call(yTrue, logits); + loss = instance.call(tf, yTrue, logits); testSession.evaluate(expected, loss); } } @@ -67,7 +66,8 @@ public void testInvalidPredictionsRange() { catchClass, () -> { Ops tf = testSession.getTF(); - BinaryCrossentropy instance = new BinaryCrossentropy(tf); + BinaryCrossentropy instance = new BinaryCrossentropy(); + float[] trueArray = {1f, 0f, 0f, 0f, 1f, 0f, 0f, 0f, 1f}; float[] predArray = {2f, 1f, -1f, 0f}; Operand yTrue = @@ -75,7 +75,7 @@ public void testInvalidPredictionsRange() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 2))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); testSession.run(loss); }); } @@ -87,26 +87,25 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - BinaryCrossentropy instance = new BinaryCrossentropy(tf); + BinaryCrossentropy instance = new BinaryCrossentropy(); + float[] trueArray = {1f, 0f, 1f, 0f}; float[] predArray = {1f, 1f, 1f, 0f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 2))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 2))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 3.83331f; testSession.evaluate(expected, loss); // Test with logits. float[] trueArray1 = {1f, 0f, 1f, 0f, 1f, 1f}; - float[] logitsArray = { - 100.0f, -100.0f, 100.0f, - 100.0f, 100.0f, -100.0f - }; + float[] logitsArray = {100.0f, -100.0f, 100.0f, 100.0f, 100.0f, -100.0f}; Operand yTrue1 = tf.reshape(tf.constant(trueArray1), tf.constant(Shape.of(2, 3))); Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(2, 3))); - instance = new BinaryCrossentropy(tf, true); - loss = instance.call(yTrue1, logits); + instance = new BinaryCrossentropy(true); + + loss = instance.call(tf, yTrue1, logits); expected = 33.33333f; testSession.evaluate(expected, loss); } @@ -118,27 +117,26 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - BinaryCrossentropy instance = new BinaryCrossentropy(tf); + BinaryCrossentropy instance = new BinaryCrossentropy(); + float[] trueArray = {1f, 0f, 1f, 0f}; float[] predArray = {1f, 1f, 1f, 0f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 2))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 2))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 8.816612f; testSession.evaluate(expected, loss); // Test with logits. float[] trueArray1 = {1f, 0f, 1f, 0f, 1f, 1f}; - float[] logitsArray = { - 100.0f, -100.0f, 100.0f, - 100.0f, 100.0f, -100.0f - }; + float[] logitsArray = {100.0f, -100.0f, 100.0f, 100.0f, 100.0f, -100.0f}; Operand yTrue1 = tf.reshape(tf.constant(trueArray1), tf.constant(Shape.of(2, 3))); Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(2, 3))); - instance = new BinaryCrossentropy(tf, true); - loss = instance.call(yTrue1, logits, sampleWeight); + instance = new BinaryCrossentropy(true); + + loss = instance.call(tf, yTrue1, logits, sampleWeight); expected = 76.66667f; testSession.evaluate(expected, loss); } @@ -149,7 +147,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - BinaryCrossentropy instance = new BinaryCrossentropy(tf); + BinaryCrossentropy instance = new BinaryCrossentropy(); + float[] trueArray = {1f, 0f, 1f, 0f}; float[] predArray = {1f, 1f, 1f, 0f}; float[] sampleWeightArray = {1.2f, 3.4f}; @@ -157,23 +156,21 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 2))); Operand sampleWeight = tf.reshape(tf.constant(sampleWeightArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 4.59997f; testSession.evaluate(expected, loss); // Test with logits. float[] trueArray1 = {1f, 0f, 1f, 0f, 1f, 1f}; - float[] logitsArray = { - 100.0f, -100.0f, 100.0f, - 100.0f, 100.0f, -100.0f - }; + float[] logitsArray = {100.0f, -100.0f, 100.0f, 100.0f, 100.0f, -100.0f}; float[] sampleWeightArray1 = {4f, 3f}; Operand yTrue1 = tf.reshape(tf.constant(trueArray1), tf.constant(Shape.of(2, 3))); Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight1 = tf.constant(sampleWeightArray1); - instance = new BinaryCrossentropy(tf, true); - loss = instance.call(yTrue1, logits, sampleWeight1); + instance = new BinaryCrossentropy(true); + + loss = instance.call(tf, yTrue1, logits, sampleWeight1); expected = 100f; testSession.evaluate(expected, loss); } @@ -187,17 +184,15 @@ public void testNoReduction() { // Test with logits. float[] trueArray = {1f, 0f, 1f, 0f, 1f, 1f}; - float[] logitsArray = { - 100.0f, -100.0f, 100.0f, - 100.0f, 100.0f, -100.0f - }; + float[] logitsArray = {100.0f, -100.0f, 100.0f, 100.0f, 100.0f, -100.0f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(2, 3))); BinaryCrossentropy instance = new BinaryCrossentropy( - tf, true, BinaryCrossentropy.LABEL_SMOOTHING_DEFAULT, Reduction.NONE); - Operand loss = instance.call(yTrue, logits); + true, BinaryCrossentropy.LABEL_SMOOTHING_DEFAULT, Reduction.NONE); + + Operand loss = instance.call(tf, yTrue, logits); Float[] expected = {0.f, 66.666664f}; testSession.evaluate(expected, loss); } @@ -215,8 +210,9 @@ public void testLabelSmoothing() { Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(1, 3))); - BinaryCrossentropy instance = new BinaryCrossentropy(tf, true, labelSmoothing); - Operand loss = instance.call(yTrue, logits); + BinaryCrossentropy instance = new BinaryCrossentropy(true, labelSmoothing); + + Operand loss = instance.call(tf, yTrue, logits); float expected = (100.0f + 50.0f * labelSmoothing) / 3.0f; testSession.evaluate(expected, loss); } catch (Exception expected) { diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CategoricalCrossentropyTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CategoricalCrossentropyTest.java index 13b287de3cd..25f5e5a54f1 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CategoricalCrossentropyTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CategoricalCrossentropyTest.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -23,8 +25,6 @@ import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; -import static org.junit.jupiter.api.Assertions.assertThrows; - public class CategoricalCrossentropyTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -48,8 +48,9 @@ public void testAllCorrectUnweighted() { }; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); - CategoricalCrossentropy instance = new CategoricalCrossentropy(tf); - Operand loss = instance.call(yTrue, yPred); + CategoricalCrossentropy instance = new CategoricalCrossentropy(); + + Operand loss = instance.call(tf, yTrue, yPred); float expected = 0F; testSession.evaluate(expected, loss); @@ -62,8 +63,9 @@ public void testAllCorrectUnweighted() { yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 3))); Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new CategoricalCrossentropy(tf, true); - loss = instance.call(yTrue, logits); + instance = new CategoricalCrossentropy(true); + + loss = instance.call(tf, yTrue, logits); testSession.setEpsilon(1e-3F); testSession.evaluate(0.0F, loss); } @@ -81,23 +83,20 @@ public void testInvalidPredictionsRange() { catchClass, () -> { Ops tf = testSession.getTF(); - CategoricalCrossentropy instance = new CategoricalCrossentropy(tf); + CategoricalCrossentropy instance = new CategoricalCrossentropy(); + float[] trueArray = { 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L }; - float[] predArray = { - -1.F, 0.F, 0.F, - 0.F, 1.F, 0.F, - 0.F, 0.F, 1.F - }; + float[] predArray = {-1.F, 0.F, 0.F, 0.F, 1.F, 0.F, 0.F, 0.F, 1.F}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 2))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); testSession.run(loss); }); } @@ -109,7 +108,8 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CategoricalCrossentropy instance = new CategoricalCrossentropy(tf); + CategoricalCrossentropy instance = new CategoricalCrossentropy(); + int[] trueArray = {1, 0, 0, 0, 1, 0, 0, 0, 1}; float[] predArray = { .9F, .05F, .05F, @@ -118,7 +118,7 @@ public void testUnweighted() { }; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 0.32396814F; testSession.evaluate(expected, loss); @@ -130,8 +130,9 @@ public void testUnweighted() { }; Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new CategoricalCrossentropy(tf, true); - loss = instance.call(yTrue, logits); + instance = new CategoricalCrossentropy(true); + + loss = instance.call(tf, yTrue, logits); expected = 0.0573755F; testSession.evaluate(expected, loss); } @@ -158,8 +159,9 @@ public void testScalarWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); Operand sampleWeight = tf.constant(2.3F); - CategoricalCrossentropy instance = new CategoricalCrossentropy(tf); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + CategoricalCrossentropy instance = new CategoricalCrossentropy(); + + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = .7451267F; testSession.evaluate(expected, loss); @@ -171,8 +173,9 @@ public void testScalarWeighted() { }; Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new CategoricalCrossentropy(tf, true); - loss = instance.call(yTrue, logits, sampleWeight); + instance = new CategoricalCrossentropy(true); + + loss = instance.call(tf, yTrue, logits, sampleWeight); expected = 0.13196386F; testSession.evaluate(expected, loss); } @@ -183,7 +186,8 @@ public void testSsampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CategoricalCrossentropy instance = new CategoricalCrossentropy(tf); + CategoricalCrossentropy instance = new CategoricalCrossentropy(); + float[] sampeWeightArray = {1.2F, 3.4F, 5.6F}; int[] trueArray = { 1, 0, 0, @@ -199,7 +203,7 @@ public void testSsampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampeWeightArray), tf.constant(Shape.of(3, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 1.0696F; testSession.evaluate(expected, loss); @@ -211,8 +215,9 @@ public void testSsampleWeighted() { }; Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new CategoricalCrossentropy(tf, true); - loss = instance.call(yTrue, logits, sampleWeight); + instance = new CategoricalCrossentropy(true); + + loss = instance.call(tf, yTrue, logits, sampleWeight); expected = 0.31829F; testSession.evaluate(expected, loss); } @@ -234,9 +239,9 @@ public void testNoReduction() { Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 3))); Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - CategoricalCrossentropy instance = - new CategoricalCrossentropy(tf, true, 0.0F, Reduction.NONE); - Operand loss = instance.call(yTrue, logits); + CategoricalCrossentropy instance = new CategoricalCrossentropy(true, 0.0F, Reduction.NONE); + + Operand loss = instance.call(tf, yTrue, logits); Float[] expected = {0.001822F, 0.000459F, 0.169846F}; testSession.evaluate(expected, loss); } @@ -254,8 +259,9 @@ public void testLabelSmoothing() { Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(1, 3))); - CategoricalCrossentropy instance = new CategoricalCrossentropy(tf, true, labelSmoothing); - Operand loss = instance.call(yTrue, logits); + CategoricalCrossentropy instance = new CategoricalCrossentropy(true, labelSmoothing); + + Operand loss = instance.call(tf, yTrue, logits); float expected = 400.0F * labelSmoothing / 3.0F; testSession.evaluate(expected, loss); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CategoricalHingeTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CategoricalHingeTest.java index b0d0442b3c7..d00f5374d61 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CategoricalHingeTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CategoricalHingeTest.java @@ -31,12 +31,13 @@ public void testReductionNone() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CategoricalHinge instance = new CategoricalHinge(tf, Reduction.NONE); + CategoricalHinge instance = new CategoricalHinge(Reduction.NONE); + int[] trueArray = {1, 9, 2, -5}; float[] predArray = {4f, 8f, 12f, 8f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 2))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 2))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); Float[] expected = {0.0f, 65.0f}; testSession.evaluate(expected, loss); } @@ -48,12 +49,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CategoricalHinge instance = new CategoricalHinge(tf); + CategoricalHinge instance = new CategoricalHinge(); + int[] trueArray = {1, 9, 2, -5}; float[] predArray = {4f, 8f, 12f, 8f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 2))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 2))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 32.5f; testSession.evaluate(expected, loss); } @@ -65,17 +67,18 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CategoricalHinge instance = new CategoricalHinge(tf); + CategoricalHinge instance = new CategoricalHinge(); + int[] trueArray = {1, 9, 2, -5, -2, 6}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 83.95f; testSession.evaluate(expected, loss); - Operand loss2 = instance.call(yTrue, yPred, sampleWeight); + Operand loss2 = instance.call(tf, yTrue, yPred, sampleWeight); testSession.evaluate(loss, loss2); } } @@ -85,7 +88,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CategoricalHinge instance = new CategoricalHinge(tf); + CategoricalHinge instance = new CategoricalHinge(); + int[] trueArray = {1, 9, 2, -5, -2, 6}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] weightsNp = {1.2f, 3.4f}; @@ -93,7 +97,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.reshape(tf.constant(weightsNp), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 124.1f; testSession.evaluate(expected, loss); } @@ -104,13 +108,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CategoricalHinge instance = new CategoricalHinge(tf); + CategoricalHinge instance = new CategoricalHinge(); + int[] trueArray = {1, 9, 2, -5, -2, 6}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(0f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -121,7 +126,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CategoricalHinge instance = new CategoricalHinge(tf); + CategoricalHinge instance = new CategoricalHinge(); + int[] trueArray = {1, 9, 2, -5, -2, 6}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] weightsNp = {3, 6, 5, 0, 4, 2}; @@ -130,7 +136,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(weightsNp), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 4.0f; testSession.evaluate(expected, loss); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CosineSimilarityTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CosineSimilarityTest.java index 8350d1403ed..2f21929a969 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CosineSimilarityTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/CosineSimilarityTest.java @@ -33,11 +33,12 @@ public void testReductionNone() { float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; - CosineSimilarity instance = new CosineSimilarity(tf, Reduction.NONE); + CosineSimilarity instance = new CosineSimilarity(Reduction.NONE); + Shape shape = Shape.of(2, 3); Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(shape)); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(shape)); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); Float[] expected = {-0.720488f, 0.3460499f}; testSession.evaluate(expected, loss); } @@ -52,11 +53,12 @@ public void testUnweighted() { float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] expectedLoss = {0.720488f, -0.3460499f}; - CosineSimilarity instance = new CosineSimilarity(tf); + CosineSimilarity instance = new CosineSimilarity(); + Shape shape = Shape.of(2, 3); Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(shape)); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(shape)); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = -mean(expectedLoss); testSession.evaluate(expected, loss); } @@ -71,12 +73,13 @@ public void testScalarWeighted() { float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] expectedLoss = {0.720488f, -0.3460499f}; - CosineSimilarity instance = new CosineSimilarity(tf); + CosineSimilarity instance = new CosineSimilarity(); + Shape shape = Shape.of(2, 3); Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(shape)); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(shape)); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = -mean(mul(expectedLoss, 2.3f)); testSession.evaluate(expected, loss); } @@ -90,14 +93,15 @@ public void testSampleWeighted() { float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] expectedLoss = {0.720488f, -0.3460499f}; - CosineSimilarity instance = new CosineSimilarity(tf); + CosineSimilarity instance = new CosineSimilarity(); + float[] weightsArray = {1.2f, 3.4f}; Shape shape = Shape.of(2, 3); Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(shape)); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(shape)); Operand sampleWeight = tf.reshape(tf.constant(weightsArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = -mean(mul(expectedLoss, weightsArray)); testSession.evaluate(expected, loss); } @@ -108,14 +112,15 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - CosineSimilarity instance = new CosineSimilarity(tf); + CosineSimilarity instance = new CosineSimilarity(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Shape shape = Shape.of(2, 3); Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(shape)); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(shape)); Operand sampleWeight = tf.constant(0f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -128,14 +133,15 @@ public void testTimestepWeighted() { Ops tf = testSession.getTF(); float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; - CosineSimilarity instance = new CosineSimilarity(tf); + CosineSimilarity instance = new CosineSimilarity(); + Shape shape = Shape.of(2, 3, 1); Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(shape)); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(shape)); float[] weightsArray = {3, 6, 5, 0, 4, 2}; Operand sampleWeight = tf.reshape(tf.constant(weightsArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = -2.0f; testSession.evaluate(expected, loss); } @@ -149,11 +155,12 @@ public void testAxis() { float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] expectedLoss = {0.720488f, -0.3460499f}; - CosineSimilarity instance = new CosineSimilarity(tf, 1); + CosineSimilarity instance = new CosineSimilarity(1); + Shape shape = Shape.of(2, 3); Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(shape)); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(shape)); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = -mean(expectedLoss); testSession.evaluate(expected, loss); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/HingeTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/HingeTest.java index 4770511207e..9ad9f35491c 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/HingeTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/HingeTest.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,8 +23,6 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; -import static org.junit.jupiter.api.Assertions.assertThrows; - public class HingeTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -33,12 +33,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Hinge instance = new Hinge(tf); + Hinge instance = new Hinge(); + float[] trueArray = {0f, 1f, 0f, 1f, 0f, 0f, 1f, 1f}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 4))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 0.50625f; testSession.evaluate(expected, loss); } @@ -56,14 +57,15 @@ public void testInvalidLabelValue() { catchClass, () -> { Ops tf = testSession.getTF(); - Hinge instance = new Hinge(tf); + Hinge instance = new Hinge(); + float[] trueArray = {2f, 1f, 0f, 1f, 0f, 0f, 1f, 1f}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 4))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); testSession.run(loss); }); } @@ -75,13 +77,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Hinge instance = new Hinge(tf); + Hinge instance = new Hinge(); + float[] trueArray = {0f, 1f, 0f, 1f, 0f, 0f, 1f, 1f}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 4))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 1.164375f; testSession.evaluate(expected, loss); @@ -94,7 +97,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Hinge instance = new Hinge(tf); + Hinge instance = new Hinge(); + float[] sampleArray = {1.2f, 3.4f}; float[] trueArray = {0f, 1f, 0f, 1f, 0f, 0f, 1f, 1f}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; @@ -102,7 +106,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 1.06125f; testSession.evaluate(expected, loss); } @@ -113,13 +117,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Hinge instance = new Hinge(tf); + Hinge instance = new Hinge(); + float[] trueArray = {0f, 1f, 0f, 1f, 0f, 0f, 1f, 1f}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 4))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -130,7 +135,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Hinge instance = new Hinge(tf, Reduction.AUTO); + Hinge instance = new Hinge(Reduction.AUTO); + float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f, 1f, 3f}; float[] trueArray = {0f, 1f, 0f, 1f, 0f, 0f, 1f, 1f}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; @@ -140,7 +146,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 4))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 2.0125f; testSession.evaluate(expected, loss); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/HuberTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/HuberTest.java index d1751f223a1..86a71e5ecbb 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/HuberTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/HuberTest.java @@ -32,8 +32,9 @@ public void testAllCorrect() { float[] trueArray = {.9f, .2f, .2f, .8f, .4f, .6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); - Huber instance = new Huber(tf); - Operand loss = instance.call(yTrue, yTrue); + Huber instance = new Huber(); + + Operand loss = instance.call(tf, yTrue, yTrue); float expected = 0.0f; testSession.evaluate(expected, loss); } @@ -50,8 +51,9 @@ public void testUnweighted() { float[] predArray = {1.f, 0.f, 1.f, 1.f, 0.f, 0.f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Huber instance = new Huber(tf); - Operand loss = instance.call(yTrue, yPred); + Huber instance = new Huber(); + + Operand loss = instance.call(tf, yTrue, yPred); float expected = 0.10416666666666669f; testSession.evaluate(expected, loss); } @@ -67,9 +69,10 @@ public void testScalarWeighted() { float[] predArray = {1.f, 0.f, 1.f, 1.f, 0.f, 0.f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Huber instance = new Huber(tf); + Huber instance = new Huber(); + Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0.23958333333333337f; testSession.evaluate(expected, loss); @@ -87,10 +90,11 @@ public void testSampleWeighted() { float[] predArray = {1.f, 0.f, 1.f, 1.f, 0.f, 0.f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Huber instance = new Huber(tf); + Huber instance = new Huber(); + Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0.22766666666666668f; testSession.evaluate(expected, loss); } @@ -105,9 +109,10 @@ public void testZeroWeighted() { float[] predArray = {1.f, 0.f, 1.f, 1.f, 0.f, 0.f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Huber instance = new Huber(tf); + Huber instance = new Huber(); + Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -125,10 +130,11 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3, 1))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); - Huber instance = new Huber(tf); + Huber instance = new Huber(); + Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = .4025f; testSession.evaluate(expected, loss); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/KLDivergenceTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/KLDivergenceTest.java index d57b61b18dd..1d7ee87b920 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/KLDivergenceTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/KLDivergenceTest.java @@ -30,12 +30,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - KLDivergence instance = new KLDivergence(tf); + KLDivergence instance = new KLDivergence(); + float[] predArray = {.4f, .9f, .12f, .36f, .3f, .4f}; float[] trueArray = {.5f, .8f, .12f, .7f, .43f, .8f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 0.5960738398643668f; testSession.evaluate(expected, loss); } @@ -47,13 +48,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - KLDivergence instance = new KLDivergence(tf); + KLDivergence instance = new KLDivergence(); + float[] predArray = {.4f, .9f, .12f, .36f, .3f, .4f}; float[] trueArray = {.5f, .8f, .12f, .7f, .43f, .8f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 1.3709698316880434f; testSession.evaluate(expected, loss); } @@ -64,7 +66,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - KLDivergence instance = new KLDivergence(tf); + KLDivergence instance = new KLDivergence(); + float[] predArray = {.4f, .9f, .12f, .36f, .3f, .4f}; float[] trueArray = {.5f, .8f, .12f, .7f, .43f, .8f}; float[] sampleArray = {1.2f, 3.4f}; @@ -72,7 +75,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 2.0075711736936492f; testSession.evaluate(expected, loss); } @@ -83,13 +86,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - KLDivergence instance = new KLDivergence(tf); + KLDivergence instance = new KLDivergence(); + float[] predArray = {.4f, .9f, .12f, .36f, .3f, .4f}; float[] trueArray = {.5f, .8f, .12f, .7f, .43f, .8f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -100,7 +104,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - KLDivergence instance = new KLDivergence(tf, Reduction.AUTO); + KLDivergence instance = new KLDivergence(Reduction.AUTO); + float[] predArray = {.4f, .9f, .12f, .36f, .3f, .4f}; float[] trueArray = {.5f, .8f, .12f, .7f, .43f, .8f}; float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f}; @@ -110,7 +115,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0.2495994912084345f; testSession.evaluate(expected, loss); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/LogCoshTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/LogCoshTest.java index c4347b3fccb..ce6782cee3b 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/LogCoshTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/LogCoshTest.java @@ -30,12 +30,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - LogCosh instance = new LogCosh(tf); + LogCosh instance = new LogCosh(); + float[] predArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 4.829245330860459f; testSession.evaluate(expected, loss); } @@ -47,13 +48,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - LogCosh instance = new LogCosh(tf); + LogCosh instance = new LogCosh(); + float[] predArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 11.107264260979056f; testSession.evaluate(expected, loss); } @@ -64,7 +66,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - LogCosh instance = new LogCosh(tf); + LogCosh instance = new LogCosh(); + float[] predArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {1.2f, 3.4f}; @@ -72,7 +75,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 12.001114667519486f; testSession.evaluate(expected, loss); } @@ -83,13 +86,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - LogCosh instance = new LogCosh(tf); + LogCosh instance = new LogCosh(); + float[] predArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -100,7 +104,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - LogCosh instance = new LogCosh(tf, Reduction.AUTO); + LogCosh instance = new LogCosh(Reduction.AUTO); + float[] predArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f}; @@ -110,7 +115,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 11.653484271934046f; testSession.evaluate(expected, loss); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanAbsoluteErrorTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanAbsoluteErrorTest.java index 3498c6d53aa..cbcb2c37391 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanAbsoluteErrorTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanAbsoluteErrorTest.java @@ -31,10 +31,11 @@ public void testAllCorrectUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf); + MeanAbsoluteError instance = new MeanAbsoluteError(); + float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yTrue); + Operand loss = instance.call(tf, yTrue, yTrue); float expected = 0.0f; testSession.evaluate(expected, loss); } @@ -46,12 +47,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf); + MeanAbsoluteError instance = new MeanAbsoluteError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 5.5f; testSession.evaluate(expected, loss); } @@ -63,13 +65,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf); + MeanAbsoluteError instance = new MeanAbsoluteError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 12.65f; testSession.evaluate(expected, loss); } @@ -80,7 +83,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf); + MeanAbsoluteError instance = new MeanAbsoluteError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {1.2f, 3.4f}; @@ -88,7 +92,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 81.4f / 6f; testSession.evaluate(expected, loss); } @@ -99,13 +103,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf); + MeanAbsoluteError instance = new MeanAbsoluteError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -116,7 +121,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf, Reduction.AUTO); + MeanAbsoluteError instance = new MeanAbsoluteError(Reduction.AUTO); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f}; @@ -126,7 +132,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 83f / 6f; testSession.evaluate(expected, loss); @@ -141,7 +147,8 @@ public void testInvalidSampleWeight() { () -> { try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf); + MeanAbsoluteError instance = new MeanAbsoluteError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f}; @@ -151,7 +158,7 @@ public void testInvalidSampleWeight() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 2))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 83f / 6f; testSession.evaluate(expected, loss); } @@ -163,13 +170,14 @@ public void testNoReduction() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf, Reduction.NONE); + MeanAbsoluteError instance = new MeanAbsoluteError(Reduction.NONE); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); Float[] expected = {10.733333f, 14.566667f}; testSession.evaluate(expected, loss); } @@ -180,13 +188,14 @@ public void testSumReduction() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsoluteError instance = new MeanAbsoluteError(tf, Reduction.SUM); + MeanAbsoluteError instance = new MeanAbsoluteError(Reduction.SUM); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); Float[] expected = {25.29999f}; testSession.evaluate(expected, loss); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanAbsolutePercentageErrorTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanAbsolutePercentageErrorTest.java index 7816a8a288a..b521f2f5644 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanAbsolutePercentageErrorTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanAbsolutePercentageErrorTest.java @@ -30,10 +30,11 @@ public void testAllCorrectUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(tf); + MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(); + float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yTrue); + Operand loss = instance.call(tf, yTrue, yTrue); float expected = 0.0f; testSession.evaluate(expected, loss); } @@ -45,12 +46,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(tf); + MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 211.85184f; testSession.evaluate(expected, loss); } @@ -62,13 +64,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(tf); + MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 487.25922f; testSession.evaluate(expected, loss); } @@ -79,7 +82,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(tf); + MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {1.2f, 3.4f}; @@ -87,7 +91,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 422.8889f; testSession.evaluate(expected, loss); } @@ -98,13 +102,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(tf); + MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -115,7 +120,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(tf, Reduction.AUTO); + MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(Reduction.AUTO); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f}; @@ -125,7 +131,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 694.4445f; testSession.evaluate(expected, loss); } @@ -136,13 +142,14 @@ public void testNoReduction() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(tf, Reduction.NONE); + MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(Reduction.NONE); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); Float[] expected = {621.8518f, 352.66666f}; testSession.evaluate(expected, loss); } @@ -153,13 +160,14 @@ public void testSumReduction() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(tf, Reduction.SUM); + MeanAbsolutePercentageError instance = new MeanAbsolutePercentageError(Reduction.SUM); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 974.51843f; testSession.evaluate(expected, loss); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanSquaredErrorTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanSquaredErrorTest.java index 1a971f0492b..e9fd0d7e349 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanSquaredErrorTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanSquaredErrorTest.java @@ -31,10 +31,11 @@ public void testAllCorrectUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf); + MeanSquaredError instance = new MeanSquaredError(); + float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yTrue); + Operand loss = instance.call(tf, yTrue, yTrue); float expected = 0.0f; testSession.evaluate(expected, loss); } @@ -46,12 +47,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf); + MeanSquaredError instance = new MeanSquaredError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 49.5f; testSession.evaluate(expected, loss); } @@ -63,13 +65,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf); + MeanSquaredError instance = new MeanSquaredError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 113.85f; testSession.evaluate(expected, loss); } @@ -80,7 +83,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf); + MeanSquaredError instance = new MeanSquaredError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {1.2f, 3.4f}; @@ -88,7 +92,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 127.96667f; testSession.evaluate(expected, loss); } @@ -99,13 +103,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf); + MeanSquaredError instance = new MeanSquaredError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -116,7 +121,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf, Reduction.AUTO); + MeanSquaredError instance = new MeanSquaredError(Reduction.AUTO); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f}; @@ -126,7 +132,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 97.833336f; testSession.evaluate(expected, loss); @@ -141,7 +147,8 @@ public void testInvalidSampleWeight() { () -> { try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf); + MeanSquaredError instance = new MeanSquaredError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f}; @@ -151,7 +158,7 @@ public void testInvalidSampleWeight() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 2))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 173.25f; testSession.evaluate(expected, loss); } @@ -163,13 +170,14 @@ public void testNoReduction() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf, Reduction.NONE); + MeanSquaredError instance = new MeanSquaredError(Reduction.NONE); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); Float[] expected = {84.333336f, 143.36665f}; testSession.evaluate(expected, loss); } @@ -180,13 +188,14 @@ public void testSumReduction() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredError instance = new MeanSquaredError(tf, Reduction.SUM); + MeanSquaredError instance = new MeanSquaredError(Reduction.SUM); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); Float[] expected = {227.69998f}; testSession.evaluate(expected, loss); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanSquaredLogarithmicErrorTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanSquaredLogarithmicErrorTest.java index 558f9c84659..0c6d411c53f 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanSquaredLogarithmicErrorTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/MeanSquaredLogarithmicErrorTest.java @@ -31,10 +31,11 @@ public void testAllCorrectUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(); + float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yTrue); + Operand loss = instance.call(tf, yTrue, yTrue); float expected = 0.0f; testSession.evaluate(expected, loss); } @@ -46,12 +47,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 1.4370421f; testSession.evaluate(expected, loss); } @@ -63,13 +65,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 3.3051968f; testSession.evaluate(expected, loss); } @@ -80,7 +83,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {1.2f, 3.4f}; @@ -88,7 +92,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 3.7856376f; testSession.evaluate(expected, loss); } @@ -99,13 +103,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -116,7 +121,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf, Reduction.AUTO); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(Reduction.AUTO); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f}; @@ -126,7 +132,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 2.647374f; testSession.evaluate(expected, loss); @@ -141,7 +147,8 @@ public void testInvalidSampleWeight() { () -> { try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f}; @@ -151,7 +158,7 @@ public void testInvalidSampleWeight() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 2))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 83f / 6f; testSession.evaluate(expected, loss); } @@ -163,13 +170,14 @@ public void testNoReduction() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf, Reduction.NONE); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(Reduction.NONE); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); Float[] expected = {2.3006392f, 4.3097544f}; testSession.evaluate(expected, loss); } @@ -180,13 +188,14 @@ public void testSumReduction() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(tf, Reduction.SUM); + MeanSquaredLogarithmicError instance = new MeanSquaredLogarithmicError(Reduction.SUM); + float[] trueArray = {1f, 9f, 2f, -5f, -2f, 6f}; float[] predArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); Float[] expected = {6.6103935f}; testSession.evaluate(expected, loss); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/PoissonTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/PoissonTest.java index 55c59ca5ac6..c354c83bfe2 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/PoissonTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/PoissonTest.java @@ -30,12 +30,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Poisson instance = new Poisson(tf); + Poisson instance = new Poisson(); + float[] predArray = {1f, 9f, 2f, 5f, 2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = -3.306581945521002f; testSession.evaluate(expected, loss); } @@ -47,13 +48,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Poisson instance = new Poisson(tf); + Poisson instance = new Poisson(); + float[] predArray = {1f, 9f, 2f, 5f, 2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = -7.605138474698304f; testSession.evaluate(expected, loss); } @@ -64,7 +66,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Poisson instance = new Poisson(tf); + Poisson instance = new Poisson(); + float[] predArray = {1f, 9f, 2f, 5f, 2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {1.2f, 3.4f}; @@ -72,7 +75,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = -6.147338926788071f; testSession.evaluate(expected, loss); } @@ -83,13 +86,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Poisson instance = new Poisson(tf); + Poisson instance = new Poisson(); + float[] predArray = {1f, 9f, 2f, 5f, 2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 3))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -100,7 +104,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - Poisson instance = new Poisson(tf, Reduction.AUTO); + Poisson instance = new Poisson(Reduction.AUTO); + float[] predArray = {1f, 9f, 2f, 5f, 2f, 6f}; float[] trueArray = {4f, 8f, 12f, 8f, 1f, 3f}; float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f}; @@ -110,7 +115,7 @@ public void testTimestepWeighted() { tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 3, 1))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 3))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = -12.263126013890561f; testSession.evaluate(expected, loss); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/SparseCategoricalCrossentropyTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/SparseCategoricalCrossentropyTest.java index a6a0ff35c78..d3fdcff03b7 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/SparseCategoricalCrossentropyTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/SparseCategoricalCrossentropyTest.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -23,8 +25,6 @@ import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; -import static org.junit.jupiter.api.Assertions.assertThrows; - public class SparseCategoricalCrossentropyTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -44,8 +44,9 @@ public void testAllCorrectUnweighted() { }; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 1))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); - SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(tf); - Operand loss = instance.call(yTrue, yPred); + SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(); + + Operand loss = instance.call(tf, yTrue, yPred); float expected = 0.0f; testSession.evaluate(expected, loss); @@ -57,8 +58,9 @@ public void testAllCorrectUnweighted() { }; Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new SparseCategoricalCrossentropy(tf, true); - loss = instance.call(yTrue, logits); + instance = new SparseCategoricalCrossentropy(true); + + loss = instance.call(tf, yTrue, logits); testSession.evaluate(0.0f, loss); } } @@ -75,7 +77,8 @@ public void testInvalidPredictionsRange() { catchClass, () -> { Ops tf = testSession.getTF(); - SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(tf); + SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(); + int[] trueArray = {0, 1, 2}; float[] predArray = { 1.9f, .05f, .05f, @@ -86,7 +89,7 @@ public void testInvalidPredictionsRange() { tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 1))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); testSession.run(loss); }); } @@ -98,7 +101,8 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(tf); + SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(); + int[] trueArray = {0, 1, 2}; float[] predArray = { .9f, .05f, .05f, @@ -107,7 +111,7 @@ public void testUnweighted() { }; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(3, 1))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 0.32396814f; testSession.evaluate(expected, loss); @@ -119,8 +123,9 @@ public void testUnweighted() { }; Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new SparseCategoricalCrossentropy(tf, true); - loss = instance.call(yTrue, logits); + instance = new SparseCategoricalCrossentropy(true); + + loss = instance.call(tf, yTrue, logits); expected = 0.05737559f; testSession.evaluate(expected, loss); } @@ -143,8 +148,9 @@ public void testScalarWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); Operand sampleWeight = tf.constant(2.3f); - SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(tf); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(); + + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = .7451267f; testSession.evaluate(expected, loss); @@ -156,8 +162,9 @@ public void testScalarWeighted() { }; Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new SparseCategoricalCrossentropy(tf, true); - loss = instance.call(yTrue, logits, sampleWeight); + instance = new SparseCategoricalCrossentropy(true); + + loss = instance.call(tf, yTrue, logits, sampleWeight); expected = 0.13196386f; testSession.evaluate(expected, loss); } @@ -168,7 +175,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(tf); + SparseCategoricalCrossentropy instance = new SparseCategoricalCrossentropy(); + float[] sampleWeightArray = {1.2f, 3.4f, 5.6f}; int[] trueArray = {0, 1, 2}; float[] predArray = { @@ -180,7 +188,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(3, 3))); Operand sampleWeight = tf.reshape(tf.constant(sampleWeightArray), tf.constant(Shape.of(3, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 1.0696f; testSession.evaluate(expected, loss); @@ -192,8 +200,9 @@ public void testSampleWeighted() { }; Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); - instance = new SparseCategoricalCrossentropy(tf, true); - loss = instance.call(yTrue, logits, sampleWeight); + instance = new SparseCategoricalCrossentropy(true); + + loss = instance.call(tf, yTrue, logits, sampleWeight); expected = 0.31829f; testSession.evaluate(expected, loss); } @@ -216,8 +225,9 @@ public void testNoReduction() { Operand logits = tf.reshape(tf.constant(logitsArray), tf.constant(Shape.of(3, 3))); SparseCategoricalCrossentropy instance = - new SparseCategoricalCrossentropy(tf, true, Reduction.NONE); - Operand loss = instance.call(yTrue, logits); + new SparseCategoricalCrossentropy(true, Reduction.NONE); + + Operand loss = instance.call(tf, yTrue, logits); Float[] expected = {0.001822f, 0.000459f, 0.169846f}; testSession.evaluate(expected, loss); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/SquaredHingeTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/SquaredHingeTest.java index 57a012bbe9d..533e1179f7d 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/SquaredHingeTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/losses/SquaredHingeTest.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,8 +23,6 @@ import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; -import static org.junit.jupiter.api.Assertions.assertThrows; - public class SquaredHingeTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -32,12 +32,13 @@ public void testUnweighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - SquaredHinge instance = new SquaredHinge(tf); + SquaredHinge instance = new SquaredHinge(); + float[] trueArray = {0, 1, 0, 1, 0, 0, 1, 1}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 4))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); float expected = 0.364062f; testSession.evaluate(expected, loss); } @@ -55,14 +56,15 @@ public void testInvalidLabelValue() { catchClass, () -> { Ops tf = testSession.getTF(); - SquaredHinge instance = new SquaredHinge(tf); + SquaredHinge instance = new SquaredHinge(); + float[] trueArray = {0, 2, 0, 1, 0, 0, 1, 1}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 4))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); - Operand loss = instance.call(yTrue, yPred); + Operand loss = instance.call(tf, yTrue, yPred); testSession.run(loss); }); } @@ -74,13 +76,14 @@ public void testScalarWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - SquaredHinge instance = new SquaredHinge(tf); + SquaredHinge instance = new SquaredHinge(); + float[] trueArray = {0, 1, 0, 1, 0, 0, 1, 1}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 4))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); Operand sampleWeight = tf.constant(2.3f); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0.8373437f; testSession.evaluate(expected, loss); } @@ -91,7 +94,8 @@ public void testSampleWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - SquaredHinge instance = new SquaredHinge(tf); + SquaredHinge instance = new SquaredHinge(); + float[] sampleArray = {1.2f, 3.4f}; float[] trueArray = {0, 1, 0, 1, 0, 0, 1, 1}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; @@ -99,7 +103,7 @@ public void testSampleWeighted() { Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 1))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0.7043125f; testSession.evaluate(expected, loss); } @@ -110,13 +114,14 @@ public void testZeroWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - SquaredHinge instance = new SquaredHinge(tf); + SquaredHinge instance = new SquaredHinge(); + float[] trueArray = {0, 1, 0, 1, 0, 0, 1, 1}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = tf.reshape(tf.constant(trueArray), tf.constant(Shape.of(2, 4))); Operand yPred = tf.reshape(tf.constant(predArray), tf.constant(Shape.of(2, 4))); Operand sampleWeight = tf.constant(0.F); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 0f; testSession.evaluate(expected, loss); } @@ -127,7 +132,8 @@ public void testTimestepWeighted() { for (TestSession.Mode tfMode : tfModes) try (TestSession testSession = TestSession.createTestSession(tfMode)) { Ops tf = testSession.getTF(); - SquaredHinge instance = new SquaredHinge(tf, Reduction.AUTO); + SquaredHinge instance = new SquaredHinge(Reduction.AUTO); + float[] trueArray = {0, 1, 0, 1, 0, 0, 1, 1}; float[] predArray = {-0.3f, 0.2f, -0.1f, 1.6f, -0.25f, -1.f, 0.5f, 0.6f}; Operand yTrue = @@ -137,7 +143,7 @@ public void testTimestepWeighted() { float[] sampleArray = {3f, 6f, 5f, 0f, 4f, 2f, 1f, 3f}; Operand sampleWeight = tf.reshape(tf.constant(sampleArray), tf.constant(Shape.of(2, 4))); - Operand loss = instance.call(yTrue, yPred, sampleWeight); + Operand loss = instance.call(tf, yTrue, yPred, sampleWeight); float expected = 1.54250000f; testSession.evaluate(expected, loss); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java index d6786b71972..17188499ee7 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java @@ -1,13 +1,21 @@ package org.tensorflow.framework.optimizers; -import org.junit.jupiter.api.*; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Session; import org.tensorflow.Tensor; import org.tensorflow.framework.initializers.Glorot; import org.tensorflow.framework.initializers.VarianceScaling; import org.tensorflow.framework.utils.TestSession; -import org.tensorflow.ndarray.FloatNdArray; import org.tensorflow.ndarray.Shape; import org.tensorflow.ndarray.buffer.DataBuffers; import org.tensorflow.op.Op; @@ -25,13 +33,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TType; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; - /** Test cases for GradientDescent Optimizer */ public class GradientDescentTest { private final TestSession.Mode tfMode = TestSession.Mode.GRAPH; @@ -136,14 +137,14 @@ public void testDeterminism() { Ops tf = Ops.create(g); Glorot initializer = - new Glorot<>(tf, VarianceScaling.Distribution.TRUNCATED_NORMAL, 1L); + new Glorot<>(VarianceScaling.Distribution.TRUNCATED_NORMAL, 1L); // Inputs Placeholder input = tf.withName("input").placeholder(TFloat32.class, Placeholder.shape(Shape.of(-1, 20))); // Fully connected layer Variable fcWeights = - tf.variable(initializer.call(tf.array(20L, 200L), TFloat32.class)); + tf.variable(initializer.call(tf, tf.array(20L, 200L), TFloat32.class)); fcWeightName = fcWeights.op().name(); Variable fcBiases = tf.variable(tf.fill(tf.array(200), tf.constant(0.1f))); fcBiasName = fcBiases.op().name(); @@ -151,13 +152,13 @@ public void testDeterminism() { // Output layer Variable outputWeights = - tf.variable(initializer.call(tf.array(200L, 2L), TFloat32.class)); + tf.variable(initializer.call(tf, tf.array(200L, 2L), TFloat32.class)); outputWeightName = outputWeights.op().name(); Variable outputBiases = tf.variable(tf.fill(tf.array(2L), tf.constant(0.1f))); outputBiasName = outputBiases.op().name(); Add output = tf.math.add(tf.linalg.matMul(relu, outputWeights), outputBiases); - // Loss + // AbstractLoss Placeholder placeholder = tf.withName("output").placeholder(TFloat32.class, Placeholder.shape(Shape.of(-1, 2))); Mean loss = @@ -205,12 +206,15 @@ public void testDeterminism() { .fetch(outputBiasName) .run()); - TFloat32 lossVal = (TFloat32) s.runner() - .addTarget(trainName) - .feed("input", dataTensor) - .feed("output", targetTensor) - .fetch(lossName) - .run().get(0); + TFloat32 lossVal = + (TFloat32) + s.runner() + .addTarget(trainName) + .feed("input", dataTensor) + .feed("output", targetTensor) + .fetch(lossName) + .run() + .get(0); initialLoss[i] = lossVal.getFloat(); lossVal.close(); @@ -222,12 +226,15 @@ public void testDeterminism() { .fetch(outputBiasName) .run()); - lossVal = (TFloat32) s.runner() - .addTarget(trainName) - .feed("input", dataTensor) - .feed("output", targetTensor) - .fetch(lossName) - .run().get(0); + lossVal = + (TFloat32) + s.runner() + .addTarget(trainName) + .feed("input", dataTensor) + .feed("output", targetTensor) + .fetch(lossName) + .run() + .get(0); postTrainingLoss[i] = lossVal.getFloat(); lossVal.close(); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L1L2Test.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L1L2Test.java index 181ae367f07..00da1f7e789 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L1L2Test.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L1L2Test.java @@ -1,5 +1,7 @@ package org.tensorflow.framework.regularizers; +import static org.junit.jupiter.api.Assertions.assertEquals; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -7,8 +9,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertEquals; - class L1L2Test extends CommonTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -17,25 +17,25 @@ public void testCreate() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1L2 instance = new L1L2(tf, 0.2f, 0.3f); + L1L2 instance = new L1L2(0.2f, 0.3f); assertEquals(0.2f, instance.getL1()); assertEquals(0.3f, instance.getL2()); - instance = new L1L2(tf, 0, 0); + instance = new L1L2(0, 0); assertEquals(0.f, instance.getL1()); assertEquals(0.f, instance.getL2()); - instance = new L1L2(tf, 0.5f, 0); + instance = new L1L2(0.5f, 0); assertEquals(0.5f, instance.getL1()); assertEquals(0.f, instance.getL2()); - instance = new L1L2(tf, 0, 0.5f); + instance = new L1L2(0, 0.5f); assertEquals(0.f, instance.getL1()); assertEquals(0.5f, instance.getL2()); - instance = new L1L2(tf); - assertEquals(Regularizer.DEFAULT_REGULARIZATION_PENALTY, instance.getL1()); - assertEquals(Regularizer.DEFAULT_REGULARIZATION_PENALTY, instance.getL2()); + instance = new L1L2(); + assertEquals(AbstractRegularizer.DEFAULT_REGULARIZATION_PENALTY, instance.getL1()); + assertEquals(AbstractRegularizer.DEFAULT_REGULARIZATION_PENALTY, instance.getL2()); } } @@ -44,8 +44,8 @@ public void testCallDefaultsConstant() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1L2 instance = new L1L2(tf); - Operand result = instance.call(tf.constant(555f)); + L1L2 instance = new L1L2(); + Operand result = instance.call(tf, tf.constant(555f)); session.evaluate(3085.8f, result); } } @@ -55,10 +55,10 @@ public void testCallL1L2_0() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1L2 instance = new L1L2(tf, 0, 0); + L1L2 instance = new L1L2(0, 0); Operand weights = tf.constant(new float[][] {{1.0f, 0.9f, 0.8f}, {1.2f, 0.7f, 1.1f}}); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); session.evaluate(0, result); } } @@ -68,10 +68,10 @@ public void testCallL1L2TFloat32() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1L2 instance = new L1L2(tf, 0.01f, 0.02f); + L1L2 instance = new L1L2(0.01f, 0.02f); float[][] w = {{1.0f, 0.9f, 0.8f}, {1.2f, 0.7f, 1.1f}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); float expected = regularizeL1L2(w, 0.01f, 0.02f); session.setEpsilon(.09f); session.evaluate(expected, result); @@ -83,10 +83,10 @@ public void testCallL1L2TFloat64() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1L2 instance = new L1L2(tf, 0.01f, 0.02f); + L1L2 instance = new L1L2(0.01f, 0.02f); double[][] w = {{1.0, 0.9, 0.8}, {1.2, 0.7, 1.1}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); double expected = regularizeL1L2(w, 0.01f, 0.02f); session.setEpsilon(.09f); session.evaluate(expected, result); @@ -98,10 +98,10 @@ public void testCallL2_0() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1L2 instance = new L1L2(tf, 0.01f, 0); + L1L2 instance = new L1L2(0.01f, 0); float[][] w = {{1.0f, 0.9f, 0.8f}, {1.2f, 0.7f, 1.1f}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); float expected = regularizeL1(w, 0.01f); session.evaluate(expected, result); } @@ -112,10 +112,10 @@ public void testCallL1_0() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1L2 instance = new L1L2(tf, 0, 0.02f); + L1L2 instance = new L1L2(0, 0.02f); double[][] w = {{1.0, 0.9, 0.8}, {1.2, 0.7, 1.1}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); double expected = regularizeL2(w, 0.02f); session.setEpsilon(.01f); session.evaluate(expected, result); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L1Test.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L1Test.java index 0e42a257816..9a5efe2437e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L1Test.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L1Test.java @@ -1,5 +1,7 @@ package org.tensorflow.framework.regularizers; +import static org.junit.jupiter.api.Assertions.assertEquals; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -7,8 +9,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertEquals; - class L1Test extends CommonTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -17,16 +17,16 @@ public void testCreate() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1 instance = new L1(tf, 0.2f); + L1 instance = new L1(0.2f); assertEquals(0.2f, instance.getL1()); assertEquals(0.f, instance.getL2()); - instance = new L1(tf, 0f); + instance = new L1(0f); assertEquals(0.f, instance.getL1()); assertEquals(0.f, instance.getL2()); - instance = new L1(tf); - assertEquals(Regularizer.DEFAULT_REGULARIZATION_PENALTY, instance.getL1()); + instance = new L1(); + assertEquals(AbstractRegularizer.DEFAULT_REGULARIZATION_PENALTY, instance.getL1()); assertEquals(0.f, instance.getL2()); } } @@ -36,10 +36,10 @@ public void testCallL10() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1 instance = new L1(tf, 0.0f); + L1 instance = new L1(0.0f); float[][] w = {{1.0f, 0.9f, 0.8f}, {1.2f, 0.7f, 1.1f}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); session.evaluate(0f, result); } } @@ -49,11 +49,11 @@ public void testCallL1TFloat32() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1 instance = new L1(tf); + L1 instance = new L1(); float[][] w = {{1.0f, 0.9f, 0.8f}, {1.2f, 0.7f, 1.1f}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); - float expected = regularizeL1(w, Regularizer.DEFAULT_REGULARIZATION_PENALTY); + Operand result = instance.call(tf, weights); + float expected = regularizeL1(w, AbstractRegularizer.DEFAULT_REGULARIZATION_PENALTY); session.evaluate(expected, result); } } @@ -63,10 +63,10 @@ public void testCallL1TFloat64() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1 instance = new L1(tf, 0.02f); + L1 instance = new L1(0.02f); double[][] w = {{1.0, 0.9, 0.8}, {1.2, 0.7, 1.1}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); double expected = regularizeL1(w, 0.02f); session.evaluate(expected, result); } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L2Test.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L2Test.java index aba036ee306..6153c36c38c 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L2Test.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/L2Test.java @@ -1,5 +1,7 @@ package org.tensorflow.framework.regularizers; +import static org.junit.jupiter.api.Assertions.assertEquals; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -7,8 +9,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -import static org.junit.jupiter.api.Assertions.assertEquals; - class L2Test extends CommonTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -17,16 +17,16 @@ public void testCreate() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L2 instance = new L2(tf, 0.2f); + L2 instance = new L2(0.2f); assertEquals(0.2f, instance.getL2()); assertEquals(0.f, instance.getL1()); - instance = new L2(tf, 0f); + instance = new L2(0f); assertEquals(0.f, instance.getL2()); assertEquals(0.f, instance.getL1()); - L2 instance64 = new L2(tf); - assertEquals(Regularizer.DEFAULT_REGULARIZATION_PENALTY, instance64.getL2()); + L2 instance64 = new L2(); + assertEquals(AbstractRegularizer.DEFAULT_REGULARIZATION_PENALTY, instance64.getL2()); assertEquals(0.f, instance64.getL1()); } } @@ -36,10 +36,10 @@ public void testCallL20() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L2 instance = new L2(tf, 0.0f); + L2 instance = new L2(0.0f); float[][] w = {{1.0f, 0.9f, 0.8f}, {1.2f, 0.7f, 1.1f}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); session.evaluate(0, result); } } @@ -49,11 +49,11 @@ public void testCallL2TFloat32() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L2 instance = new L2(tf); + L2 instance = new L2(); float[][] w = {{1.0f, 0.9f, 0.8f}, {1.2f, 0.7f, 1.1f}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); - float expected = regularizeL2(w, Regularizer.DEFAULT_REGULARIZATION_PENALTY); + Operand result = instance.call(tf, weights); + float expected = regularizeL2(w, AbstractRegularizer.DEFAULT_REGULARIZATION_PENALTY); session.setEpsilon(.01f); session.evaluate(expected, result); } @@ -64,10 +64,10 @@ public void testCallL2TFloat64() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L2 instance = new L2(tf, 0.02f); + L2 instance = new L2(0.02f); double[][] w = {{1.0, 0.9, 0.8}, {1.2, 0.7, 1.1}}; Operand weights = tf.constant(w); - Operand result = instance.call(weights); + Operand result = instance.call(tf, weights); double expected = regularizeL2(w, 0.02f); session.setEpsilon(.01f); session.evaluate(expected, result); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/RegularizerLossTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/RegularizerLossTest.java index fe2624cec3d..6918f631e6a 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/RegularizerLossTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/regularizers/RegularizerLossTest.java @@ -14,13 +14,13 @@ public void testCreate() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - L1L2 regularizer = new L1L2(tf, 0.01f, 0f); + L1L2 regularizer = new L1L2(0.01f, 0f); float[][] w = {{1.0f, 0.9f, 0.8f}, {1.2f, 0.7f, 1.1f}}; Operand weights = tf.constant(w); - Operand regularizerResult = regularizer.call(weights); - RegularizerLoss lossInstance = new RegularizerLoss(tf, regularizer); + Operand regularizerResult = regularizer.call(tf, weights); + RegularizerLoss lossInstance = new RegularizerLoss(regularizer); - Operand loss = lossInstance.call(null, null, weights); + Operand loss = lossInstance.call(tf, null, null, weights); session.evaluate(regularizerResult, loss); } } From 7b5a1ca635a6e45cb091e0abae8fafc061553b02 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Tue, 8 Jun 2021 13:08:32 -0400 Subject: [PATCH 34/60] Skip tests in check-format job --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 72c045818fe..9a5e466d990 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,7 +62,7 @@ jobs: export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac))))) echo $JAVA_HOME mvn -version - mvn clean install -Pdev,jdk11 -B -U -e -Dlint.skip=true + mvn clean install -Pdev,jdk11 -B -U -e -Dlint.skip=true -Dmaven.test.skip=true - name: Run format checks run: | mvn spotless:check -Pdev,jdk11 -B -U -e From cea76cdf7be49f643b0865770c10b6502f615e84 Mon Sep 17 00:00:00 2001 From: Samuel Audet Date: Thu, 10 Jun 2021 22:05:44 +0900 Subject: [PATCH 35/60] Upgrade for TensorFlow 2.5.0 (#303) * Upgrade for TensorFlow 2.5.0 * Upgrade version of bazel to build TF2.5 * Disable libiomp5 path fix on MacOSX * Disabling MKL builds on all platforms Co-authored-by: klessard --- .github/workflows/ci.yml | 12 +- tensorflow-core/pom.xml | 5 +- tensorflow-core/tensorflow-core-api/.bazelrc | 34 +- tensorflow-core/tensorflow-core-api/WORKSPACE | 33 +- tensorflow-core/tensorflow-core-api/build.sh | 13 +- .../external/tensorflow-proto.patch | 149 +- .../external/tensorflow-visibility.patch | 34 +- .../api_def_CollectiveBcastRecvV2.pbtxt | 6 + .../api_def_CollectiveBcastSendV2.pbtxt | 6 + .../api_def_DataServiceDatasetV2.pbtxt | 6 + .../api_def/api_def_FinalizeDataset.pbtxt | 6 + .../bazel/api_def/api_def_GetOptions.pbtxt | 6 + ...mbeddingFrequencyEstimatorParameters.pbtxt | 6 + ...ncyEstimatorParametersGradAccumDebug.pbtxt | 6 + .../api_def/api_def_OptionsDataset.pbtxt | 6 + .../api_def_ParallelBatchDataset.pbtxt | 6 + ...mbeddingFrequencyEstimatorParameters.pbtxt | 6 + ...ncyEstimatorParametersGradAccumDebug.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscAbs.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscAdd.pbtxt | 6 + .../api_def_RiscBinaryArithmetic.pbtxt | 6 + .../api_def_RiscBinaryComparison.pbtxt | 6 + .../bazel/api_def/api_def_RiscBitcast.pbtxt | 6 + .../bazel/api_def/api_def_RiscBroadcast.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscCast.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscCeil.pbtxt | 6 + .../bazel/api_def/api_def_RiscCholesky.pbtxt | 6 + .../bazel/api_def/api_def_RiscConcat.pbtxt | 6 + .../bazel/api_def/api_def_RiscCondition.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscConv.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscCos.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscDiv.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscDot.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscExp.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscFft.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscFloor.pbtxt | 6 + .../bazel/api_def/api_def_RiscGather.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscImag.pbtxt | 6 + .../bazel/api_def/api_def_RiscIsFinite.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscLog.pbtxt | 6 + .../api_def/api_def_RiscLogicalAnd.pbtxt | 6 + .../api_def/api_def_RiscLogicalNot.pbtxt | 6 + .../bazel/api_def/api_def_RiscLogicalOr.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscMax.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscMin.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscMul.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscNeg.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscPad.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscPool.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscPow.pbtxt | 6 + .../api_def/api_def_RiscRandomUniform.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscReal.pbtxt | 6 + .../bazel/api_def/api_def_RiscReduce.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscRem.pbtxt | 6 + .../bazel/api_def/api_def_RiscReshape.pbtxt | 6 + .../bazel/api_def/api_def_RiscReverse.pbtxt | 6 + .../bazel/api_def/api_def_RiscScatter.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscShape.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscSign.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscSlice.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscSort.pbtxt | 6 + .../bazel/api_def/api_def_RiscSqueeze.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscSub.pbtxt | 6 + .../bazel/api_def/api_def_RiscTranspose.pbtxt | 6 + .../api_def/api_def_RiscTriangularSolve.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscUnary.pbtxt | 6 + .../src/bazel/api_def/api_def_RiscWhile.pbtxt | 6 + .../api_def_StatelessRandomGetAlg.pbtxt | 6 + ...api_def_StatelessRandomGetKeyCounter.pbtxt | 6 + .../api_def/api_def_TPUReshardVariables.pbtxt | 6 + .../src/bazel/api_def/api_def_XlaConvV2.pbtxt | 3 + .../src/bazel/api_def/api_def_XlaDotV2.pbtxt | 3 + .../api_def_XlaSetDynamicDimensionSize.pbtxt | 3 + .../api_def/api_def_XlaVariadicSort.pbtxt | 3 + .../src/bazel/api_def/import/api_import.cc | 18 +- .../tensorflow/op/DataExperimentalOps.java | 72 - .../org/tensorflow/op/DataOps.java | 3 - .../org/tensorflow/op/ImageOps.java | 4 +- .../annotations/org/tensorflow/op/IoOps.java | 13 +- .../org/tensorflow/op/LinalgOps.java | 16 +- .../org/tensorflow/op/MathOps.java | 2 +- .../annotations/org/tensorflow/op/NnOps.java | 26 +- .../annotations/org/tensorflow/op/Ops.java | 314 +- .../annotations/org/tensorflow/op/XlaOps.java | 15 +- .../internal/c_api/global/tensorflow.java | 362 +- .../tensorflow/op/collective/GatherV2.java | 32 +- .../tensorflow/op/collective/ReduceV2.java | 33 +- .../op/core/InitializeTableFromTextFile.java | 26 + .../org/tensorflow/op/core/InplaceAdd.java | 3 +- .../op/core/RemoteFusedGraphExecute.java | 99 - .../gen/java/org/tensorflow/op/core/Rpc.java | 228 - .../org/tensorflow/op/core/StopGradient.java | 34 +- .../org/tensorflow/op/core/TensorArray.java | 4 +- .../op/core/TensorForestTreePredict.java | 82 - .../TensorForestTreeResourceHandleOp.java | 141 - .../org/tensorflow/op/core/TopKUnique.java | 4 +- .../tensorflow/op/core/TopKWithUnique.java | 11 +- .../java/org/tensorflow/op/core/TryRpc.java | 252 - .../tensorflow/op/core/UniqueWithCounts.java | 24 +- .../org/tensorflow/op/core/XlaConvV2.java | 108 + .../java/org/tensorflow/op/core/XlaDotV2.java | 94 + .../op/core/XlaSetDynamicDimensionSize.java | 91 + .../data/experimental/DataServiceDataset.java | 32 +- .../op/debugging/CheckNumerics.java | 6 +- .../op/image/CombinedNonMaxSuppression.java | 4 +- .../tensorflow/op/image/ExtractGlimpse.java | 4 +- .../op/image/ImageProjectiveTransformV3.java | 4 +- .../tensorflow/op/io/DecodeJsonExample.java | 13 +- .../org/tensorflow/op/linalg/BandPart.java | 16 +- .../java/org/tensorflow/op/math/Softplus.java | 2 +- .../gen/java/org/tensorflow/op/nn/Elu.java | 22 +- .../gen/java/org/tensorflow/op/nn/Relu.java | 4 +- .../op/ragged/RaggedTensorToTensor.java | 2 +- .../op/rawops/CollectiveBcastRecvV2.java | 157 + .../op/rawops/CollectiveBcastSendV2.java | 153 + .../op/rawops/DataServiceDatasetV2.java | 176 + .../tensorflow/op/rawops/FinalizeDataset.java | 129 + .../GetOptions.java} | 40 +- ...EmbeddingFrequencyEstimatorParameters.java | 161 + ...encyEstimatorParametersGradAccumDebug.java | 164 + .../tensorflow/op/rawops/OptionsDataset.java | 93 + .../op/rawops/ParallelBatchDataset.java | 138 + ...EmbeddingFrequencyEstimatorParameters.java | 181 + ...encyEstimatorParametersGradAccumDebug.java | 194 + .../StatelessRandomGetAlg.java} | 40 +- .../rawops/StatelessRandomGetKeyCounter.java | 86 + .../RiscAbs.java} | 52 +- .../java/org/tensorflow/op/risc/RiscAdd.java | 84 + .../op/risc/RiscBinaryArithmetic.java | 84 + .../op/risc/RiscBinaryComparison.java | 83 + .../org/tensorflow/op/risc/RiscBitcast.java | 83 + .../org/tensorflow/op/risc/RiscBroadcast.java | 83 + .../java/org/tensorflow/op/risc/RiscCast.java | 83 + .../java/org/tensorflow/op/risc/RiscCeil.java | 79 + .../org/tensorflow/op/risc/RiscCholesky.java | 79 + .../org/tensorflow/op/risc/RiscConcat.java | 84 + .../java/org/tensorflow/op/risc/RiscConv.java | 180 + .../java/org/tensorflow/op/risc/RiscCos.java | 79 + .../java/org/tensorflow/op/risc/RiscDiv.java | 81 + .../java/org/tensorflow/op/risc/RiscDot.java | 147 + .../java/org/tensorflow/op/risc/RiscExp.java | 79 + .../RiscFft.java} | 50 +- .../org/tensorflow/op/risc/RiscFloor.java | 79 + .../org/tensorflow/op/risc/RiscGather.java | 124 + .../java/org/tensorflow/op/risc/RiscImag.java | 99 + .../RiscIsFinite.java} | 43 +- .../java/org/tensorflow/op/risc/RiscLog.java | 79 + .../tensorflow/op/risc/RiscLogicalAnd.java | 78 + .../tensorflow/op/risc/RiscLogicalNot.java | 76 + .../org/tensorflow/op/risc/RiscLogicalOr.java | 78 + .../java/org/tensorflow/op/risc/RiscMax.java | 83 + .../java/org/tensorflow/op/risc/RiscMin.java | 81 + .../java/org/tensorflow/op/risc/RiscMul.java | 81 + .../java/org/tensorflow/op/risc/RiscNeg.java | 79 + .../java/org/tensorflow/op/risc/RiscPad.java | 84 + .../java/org/tensorflow/op/risc/RiscPool.java | 134 + .../java/org/tensorflow/op/risc/RiscPow.java | 81 + .../tensorflow/op/risc/RiscRandomUniform.java | 117 + .../java/org/tensorflow/op/risc/RiscReal.java | 99 + .../org/tensorflow/op/risc/RiscReduce.java | 84 + .../java/org/tensorflow/op/risc/RiscRem.java | 81 + .../org/tensorflow/op/risc/RiscReshape.java | 82 + .../org/tensorflow/op/risc/RiscReverse.java | 82 + .../org/tensorflow/op/risc/RiscScatter.java | 85 + .../org/tensorflow/op/risc/RiscShape.java | 98 + .../java/org/tensorflow/op/risc/RiscSign.java | 79 + .../org/tensorflow/op/risc/RiscSlice.java | 85 + .../java/org/tensorflow/op/risc/RiscSort.java | 84 + .../org/tensorflow/op/risc/RiscSqueeze.java | 146 + .../java/org/tensorflow/op/risc/RiscSub.java | 81 + .../org/tensorflow/op/risc/RiscTranspose.java | 83 + .../op/risc/RiscTriangularSolve.java | 147 + .../org/tensorflow/op/risc/RiscUnary.java | 81 + .../EnqueueTPUEmbeddingRaggedTensorBatch.java | 51 + .../EnqueueTPUEmbeddingSparseTensorBatch.java | 51 + .../op/tpu/TPUReshardVariables.java | 69 + .../gen/java/org/tensorflow/op/xla/Pad.java | 10 +- .../java/org/tensorflow/op/xla/Sharding.java | 42 +- .../proto/data/AutoShardPolicy.java | 188 + .../proto/data/DatasetOptionsProtos.java | 144 + .../proto/data/DistributeOptions.java | 642 ++ .../data/DistributeOptionsOrBuilder.java | 25 + .../proto/data/ExternalStatePolicy.java | 116 + .../proto/data/MapVectorization.java | 697 ++ .../proto/data/MapVectorizationOrBuilder.java | 23 + .../proto/data/OptimizationOptions.java | 2870 +++++++++ .../data/OptimizationOptionsOrBuilder.java | 146 + .../org/tensorflow/proto/data/Options.java | 1567 +++++ .../proto/data/OptionsOrBuilder.java | 109 + .../proto/data/ThreadingOptions.java | 695 ++ .../proto/data/ThreadingOptionsOrBuilder.java | 23 + .../data/experimental/ServiceConfig.java | 295 +- .../experimental/SnapshotMetadataRecord.java | 2 +- .../SnapshotMetadataRecordOrBuilder.java | 2 +- .../data/experimental/SnapshotProtos.java | 39 +- .../data/experimental/SnapshotRecord.java | 2 +- .../experimental/SnapshotRecordOrBuilder.java | 2 +- .../experimental/SnapshotTensorMetadata.java | 2 +- .../SnapshotTensorMetadataOrBuilder.java | 2 +- .../data/experimental/TensorMetadata.java | 2 +- .../experimental/TensorMetadataOrBuilder.java | 2 +- .../proto/data/model/AutotuneAlgorithm.java | 107 + .../proto/data/model/ModelProto.java | 5592 +++++++++++++++++ .../proto/data/model/ModelProtoOrBuilder.java | 66 + .../proto/data/model/ModelProtos.java | 111 + .../proto/data/model/NodeClass.java | 143 + .../tensorflow/proto/example/BytesList.java | 2 + .../proto/framework/ConfigProto.java | 133 + .../proto/framework/ConfigProtos.java | 101 +- .../tensorflow/proto/framework/DataClass.java | 6 +- .../proto/framework/ExtensionTypeVariant.java | 679 ++ .../proto/framework/FunctionSpec.java | 104 +- .../framework/FunctionSpecOrBuilder.java | 8 +- .../tensorflow/proto/framework/GraphDef.java | 12 - .../proto/framework/GraphDefOrBuilder.java | 3 - .../framework/LogNormalDistribution.java | 537 ++ .../LogNormalDistributionOrBuilder.java | 19 + .../proto/framework/NormalDistribution.java | 537 ++ .../NormalDistributionOrBuilder.java | 19 + .../org/tensorflow/proto/framework/OpDef.java | 482 ++ .../proto/framework/OpDefProtos.java | 57 +- .../tensorflow/proto/framework/OpInfo.java | 3048 +++++++++ .../proto/framework/OpInfoOrBuilder.java | 199 + .../proto/framework/OpPerformance.java | 3074 +++++++++ .../framework/OpPerformanceDataProtos.java | 186 + .../proto/framework/OpPerformanceList.java | 773 +++ .../framework/OpPerformanceListOrBuilder.java | 33 + .../framework/OpPerformanceOrBuilder.java | 174 + .../RemoteFusedGraphExecuteInfo.java | 3007 --------- .../RemoteFusedGraphExecuteInfoOrBuilder.java | 239 - .../RemoteFusedGraphExecuteInfoProto.java | 85 - .../proto/framework/RewriterConfig.java | 207 + .../framework/RewriterConfigOrBuilder.java | 31 + .../proto/framework/RewriterConfigProtos.java | 81 +- .../proto/framework/SavedObject.java | 46 +- .../framework/SavedObjectGraphProtos.java | 26 +- .../proto/framework/SavedObjectOrBuilder.java | 10 +- .../proto/framework/SavedUserObject.java | 21 + .../framework/SavedUserObjectOrBuilder.java | 6 + .../proto/framework/SessionInfo.java | 485 ++ .../proto/framework/SessionInfoOrBuilder.java | 14 + .../proto/framework/SignatureDef.java | 12 +- .../proto/framework/SpecializedType.java | 17 + .../proto/framework/StructProtos.java | 14 +- .../proto/framework/TypeSpecProto.java | 17 - .../proto/framework/TypesProtos.java | 12 +- .../proto/profiler/XEventMetadata.java | 204 +- .../profiler/XEventMetadataOrBuilder.java | 25 + .../proto/profiler/XPlaneProtos.java | 17 +- .../java/org/tensorflow/proto/util/Event.java | 108 +- .../tensorflow/proto/util/EventOrBuilder.java | 27 +- .../tensorflow/proto/util/EventProtos.java | 70 +- .../org/tensorflow/proto/util/LogMessage.java | 6 +- .../proto/util/LogMessageOrBuilder.java | 2 +- .../src/gen/resources/ops.pb | Bin 1462296 -> 1480980 bytes .../src/gen/resources/ops.pbtxt | 3397 ++++++++-- .../internal/c_api/presets/tensorflow.java | 23 +- 257 files changed, 35396 insertions(+), 5641 deletions(-) create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecvV2.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSendV2.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV2.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_FinalizeDataset.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetOptions.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_OptionsDataset.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_ParallelBatchDataset.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscAbs.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscAdd.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBinaryArithmetic.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBinaryComparison.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBitcast.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBroadcast.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCast.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCeil.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCholesky.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscConcat.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCondition.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscConv.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCos.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscDiv.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscDot.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscExp.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscFft.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscFloor.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscGather.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscImag.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscIsFinite.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLog.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalAnd.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalNot.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalOr.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMax.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMin.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMul.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscNeg.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPad.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPool.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPow.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscRandomUniform.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReal.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReduce.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscRem.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReshape.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReverse.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscScatter.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscShape.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSign.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSlice.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSort.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSqueeze.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSub.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscTranspose.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscTriangularSolve.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscUnary.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscWhile.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetAlg.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetKeyCounter.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_TPUReshardVariables.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaConvV2.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaDotV2.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaSetDynamicDimensionSize.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicSort.pbtxt delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataExperimentalOps.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RemoteFusedGraphExecute.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rpc.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreePredict.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeResourceHandleOp.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TryRpc.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaConvV2.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaDotV2.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSetDynamicDimensionSize.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/DataServiceDatasetV2.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/FinalizeDataset.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{core/TensorForestTreeSerialize.java => rawops/GetOptions.java} (55%) create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/OptionsDataset.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/ParallelBatchDataset.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{core/TensorForestTreeSize.java => rawops/StatelessRandomGetAlg.java} (56%) create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{core/TensorForestCreateTreeVariable.java => risc/RiscAbs.java} (51%) create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{core/TensorForestTreeDeserialize.java => risc/RiscFft.java} (53%) create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{core/TensorForestTreeIsInitializedOp.java => risc/RiscIsFinite.java} (52%) create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutoShardPolicy.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptions.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptionsOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ExternalStatePolicy.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/MapVectorization.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/MapVectorizationOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptions.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptionsOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/Options.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptionsOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptions.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptionsOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/AutotuneAlgorithm.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtoOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/NodeClass.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ExtensionTypeVariant.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/LogNormalDistribution.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/LogNormalDistributionOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NormalDistribution.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NormalDistributionOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpInfo.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpInfoOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformance.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceDataProtos.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceList.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceListOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceOrBuilder.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfo.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfoOrBuilder.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfoProto.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SessionInfo.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SessionInfoOrBuilder.java diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a5e466d990..969ebe9aecc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -89,7 +89,7 @@ jobs: needs: prepare strategy: matrix: - ext: ["", -mkl, -gpu] #, -mkl-gpu] + ext: ["", -gpu] #, -mkl, -mkl-gpu] steps: - name: Install environment run: | @@ -104,7 +104,7 @@ jobs: tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ ln -sf /opt/apache-maven-3.6.3/bin/mvn /usr/bin/mvn echo Downloading Bazel - curl -L https://github.com/bazelbuild/bazel/releases/download/3.1.0/bazel-3.1.0-installer-linux-x86_64.sh -o bazel.sh --retry 10 + curl -L https://github.com/bazelbuild/bazel/releases/download/3.7.2/bazel-3.7.2-installer-linux-x86_64.sh -o bazel.sh --retry 10 bash bazel.sh if [[ "${{ matrix.ext }}" == *-gpu ]]; then echo Installing CUDA @@ -153,13 +153,13 @@ jobs: needs: prepare strategy: matrix: - ext: ["", -mkl] + ext: [""] # , -mkl] steps: - name: Install environment run: | python3 -m pip install numpy six echo Downloading Bazel - curl -L https://github.com/bazelbuild/bazel/releases/download/3.1.0/bazel-3.1.0-installer-darwin-x86_64.sh -o bazel.sh --retry 10 + curl -L https://github.com/bazelbuild/bazel/releases/download/3.7.2/bazel-3.7.2-installer-darwin-x86_64.sh -o bazel.sh --retry 10 bash bazel.sh brew install libomp perl - name: Checkout repository @@ -189,7 +189,7 @@ jobs: needs: prepare strategy: matrix: - ext: ["", -gpu, -mkl] #, -mkl-gpu] + ext: ["", -gpu] #, -mkl, -mkl-gpu] steps: - name: Configure page file uses: al-cheb/configure-pagefile-action@v1.2 @@ -208,7 +208,7 @@ jobs: bash.exe -lc "find 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/' -iname '14.1*' -exec rm -Rf {} \;" echo Downloading Bazel mkdir C:\bazel - curl.exe -L https://github.com/bazelbuild/bazel/releases/download/3.1.0/bazel-3.1.0-windows-x86_64.exe -o C:/bazel/bazel.exe --retry 10 + curl.exe -L https://github.com/bazelbuild/bazel/releases/download/3.7.2/bazel-3.7.2-windows-x86_64.exe -o C:/bazel/bazel.exe --retry 10 set "EXT=${{ matrix.ext }}" if "%EXT:~-4%" == "-gpu" ( echo Removing some unused stuff to avoid running out of disk space diff --git a/tensorflow-core/pom.xml b/tensorflow-core/pom.xml index 190ce84ff50..d36b91776c0 100644 --- a/tensorflow-core/pom.xml +++ b/tensorflow-core/pom.xml @@ -102,9 +102,10 @@ tensorflow-core-platform - tensorflow-core-platform-mkl - tensorflow-core-platform-mkl-gpu tensorflow-core-platform-gpu + + + diff --git a/tensorflow-core/tensorflow-core-api/.bazelrc b/tensorflow-core/tensorflow-core-api/.bazelrc index 461b2996401..d15d83ee9a2 100644 --- a/tensorflow-core/tensorflow-core-api/.bazelrc +++ b/tensorflow-core/tensorflow-core-api/.bazelrc @@ -151,8 +151,8 @@ build --define open_source_build=true test --define open_source_build=true # For workaround https://github.com/bazelbuild/bazel/issues/8772 with Bazel >= 0.29.1 -build --java_toolchain=@org_tensorflow//third_party/toolchains/java:tf_java_toolchain -build --host_java_toolchain=@org_tensorflow//third_party/toolchains/java:tf_java_toolchain +build --java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain +build --host_java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain # Please note that MKL on MacOS or windows is still not supported. # If you would like to use a local MKL instead of downloading, please set the @@ -616,3 +616,33 @@ build:release_gpu_linux_cuda_10_1 --config=release_gpu_linux build:release_gpu_linux_cuda_10_1 --action_env CUDA_TOOLKIT_PATH="/usr/local/cuda-10.1" build:release_gpu_linux_cuda_10_1 --action_env=TF_CUDA_VERSION="10" build:release_gpu_linux_cuda_10_1 --action_env=TF_CUDNN_VERSION="7" + +# Address sanitizer +# CC=clang bazel build --config asan +build:asan --strip=never +build:asan --copt -fsanitize=address +build:asan --copt -DADDRESS_SANITIZER +build:asan --copt -g +build:asan --copt -O3 +build:asan --copt -fno-omit-frame-pointer +build:asan --linkopt -fsanitize=address + +# Memory sanitizer +# CC=clang bazel build --config msan +build:msan --strip=never +build:msan --copt -fsanitize=memory +build:msan --copt -DADDRESS_SANITIZER +build:msan --copt -g +build:msan --copt -O3 +build:msan --copt -fno-omit-frame-pointer +build:msan --linkopt -fsanitize=memory + +# Undefined Behavior Sanitizer +# CC=clang bazel build --config ubsan +build:ubsan --strip=never +build:ubsan --copt -fsanitize=undefined +build:ubsan --copt -g +build:ubsan --copt -O3 +build:ubsan --copt -fno-omit-frame-pointer +build:ubsan --linkopt -fsanitize=undefined +build:ubsan --linkopt -lubsan diff --git a/tensorflow-core/tensorflow-core-api/WORKSPACE b/tensorflow-core/tensorflow-core-api/WORKSPACE index bd5ce478f66..8a6251a2a02 100644 --- a/tensorflow-core/tensorflow-core-api/WORKSPACE +++ b/tensorflow-core/tensorflow-core-api/WORKSPACE @@ -17,28 +17,31 @@ http_archive( patch_args = ["-p1"], patch_cmds = ["grep -rl 'java_package' tensorflow/core | xargs sed -i.bak 's/^\(.* java_package = \"org\.tensorflow\.\)\(.*\"\)/\\1proto.\\2'/"], urls = [ - "https://github.com/tensorflow/tensorflow/archive/v2.4.1.tar.gz", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.5.0.tar.gz", ], - sha256 = "f681331f8fc0800883761c7709d13cda11942d4ad5ff9f44ad855e9dc78387e0", - strip_prefix = "tensorflow-2.4.1" + sha256 = "233875ea27fc357f6b714b2a0de5f6ff124b50c1ee9b3b41f9e726e9e677b86c", + strip_prefix = "tensorflow-2.5.0" ) # START: Upstream TensorFlow dependencies # TensorFlow build depends on these dependencies. # Needs to be in-sync with TensorFlow sources. -http_archive( - name = "io_bazel_rules_closure", - sha256 = "5b00383d08dd71f28503736db0500b6fb4dda47489ff5fc6bed42557c07c6ba9", - strip_prefix = "rules_closure-308b05b2419edb5c8ee0471b67a40403df940149", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/308b05b2419edb5c8ee0471b67a40403df940149.tar.gz", - "https://github.com/bazelbuild/rules_closure/archive/308b05b2419edb5c8ee0471b67a40403df940149.tar.gz", # 2019-06-13 - ], -) -# END: Upstream TensorFlow dependencies +load("@org_tensorflow//tensorflow:workspace3.bzl", "tf_workspace3") + +tf_workspace3() -load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace") -tf_workspace() +load("@org_tensorflow//tensorflow:workspace2.bzl", "tf_workspace2") + +tf_workspace2() + +load("@org_tensorflow//tensorflow:workspace1.bzl", "tf_workspace1") + +tf_workspace1() + +load("@org_tensorflow//tensorflow:workspace0.bzl", "tf_workspace0") + +tf_workspace0() +# END: Upstream TensorFlow dependencies load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps") grpc_deps() diff --git a/tensorflow-core/tensorflow-core-api/build.sh b/tensorflow-core/tensorflow-core-api/build.sh index 895832f3df1..fdddaafa18b 100755 --- a/tensorflow-core/tensorflow-core-api/build.sh +++ b/tensorflow-core/tensorflow-core-api/build.sh @@ -68,12 +68,13 @@ done echo "Listing $TENSORFLOW_BIN:" && ls -l $TENSORFLOW_BIN if [[ -x /usr/bin/install_name_tool ]] && [[ -e $BAZEL_BIN/external/llvm_openmp/libiomp5.dylib ]]; then - # Fix library with correct rpath on Mac - chmod +w $BAZEL_BIN/external/llvm_openmp/libiomp5.dylib $TENSORFLOW_BIN/libtensorflow_cc.2.dylib $TENSORFLOW_BIN/libtensorflow_framework.2.dylib - UGLYPATH=$(otool -L $TENSORFLOW_BIN/libtensorflow_cc.2.dylib | grep @loader_path | cut -f1 -d ' ') - install_name_tool -add_rpath @loader_path/. -id @rpath/libiomp5.dylib $BAZEL_BIN/external/llvm_openmp/libiomp5.dylib - install_name_tool -change $UGLYPATH @rpath/libiomp5.dylib $TENSORFLOW_BIN/libtensorflow_cc.2.dylib - install_name_tool -change $UGLYPATH @rpath/libiomp5.dylib $TENSORFLOW_BIN/libtensorflow_framework.2.dylib + # Fix library with correct rpath on Mac + chmod +w $BAZEL_BIN/external/llvm_openmp/libiomp5.dylib $TENSORFLOW_BIN/libtensorflow_cc.2.dylib $TENSORFLOW_BIN/libtensorflow_framework.2.dylib + UGLYPATH=$(otool -L $TENSORFLOW_BIN/libtensorflow_cc.2.dylib | grep @loader_path | cut -f1 -d ' ') + echo $UGLYPATH + install_name_tool -add_rpath @loader_path/. -id @rpath/libiomp5.dylib $BAZEL_BIN/external/llvm_openmp/libiomp5.dylib + install_name_tool -change $UGLYPATH @rpath/libiomp5.dylib $TENSORFLOW_BIN/libtensorflow_cc.2.dylib + install_name_tool -change $UGLYPATH @rpath/libiomp5.dylib $TENSORFLOW_BIN/libtensorflow_framework.2.dylib fi GEN_SRCS_DIR=src/gen/java diff --git a/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch b/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch index 3dac55ccee7..3372dc23a83 100644 --- a/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch +++ b/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch @@ -1,6 +1,6 @@ -diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/bfc_memory_map.proto tensorflow-2.4.1-proto/tensorflow/core/protobuf/bfc_memory_map.proto ---- tensorflow-2.4.1/tensorflow/core/protobuf/bfc_memory_map.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/protobuf/bfc_memory_map.proto 2021-02-08 09:43:41.885495355 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/protobuf/bfc_memory_map.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/bfc_memory_map.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/protobuf/bfc_memory_map.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/bfc_memory_map.proto 2021-04-27 10:18:43.910313526 +0900 @@ -3,6 +3,9 @@ package tensorflow; @@ -11,9 +11,9 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/bfc_memory_map.proto tensorf // Some of the data from AllocatorStats message MemAllocatorStats { -diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/data/experimental/snapshot.proto tensorflow-2.4.1-proto/tensorflow/core/protobuf/data/experimental/snapshot.proto ---- tensorflow-2.4.1/tensorflow/core/protobuf/data/experimental/snapshot.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/protobuf/data/experimental/snapshot.proto 2021-02-08 09:40:24.584065472 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/protobuf/snapshot.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/snapshot.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/protobuf/snapshot.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/snapshot.proto 2021-04-27 10:19:52.002448627 +0900 @@ -6,6 +6,10 @@ import "tensorflow/core/framework/tensor_shape.proto"; import "tensorflow/core/framework/types.proto"; @@ -25,9 +25,9 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/data/experimental/snapshot.p // Each SnapshotRecord represents one batch of pre-processed input data. A batch // consists of a list of tensors that we encode as TensorProtos. This message // doesn't store the structure of the batch. -diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/device_properties.proto tensorflow-2.4.1-proto/tensorflow/core/protobuf/device_properties.proto ---- tensorflow-2.4.1/tensorflow/core/protobuf/device_properties.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/protobuf/device_properties.proto 2021-02-08 09:41:23.317918806 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/protobuf/device_properties.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/device_properties.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/protobuf/device_properties.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/device_properties.proto 2021-04-27 10:19:52.002448627 +0900 @@ -19,6 +19,8 @@ option cc_enable_arenas = true; @@ -37,9 +37,9 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/device_properties.proto tens option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; message DeviceProperties { -diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/saved_object_graph.proto tensorflow-2.4.1-proto/tensorflow/core/protobuf/saved_object_graph.proto ---- tensorflow-2.4.1/tensorflow/core/protobuf/saved_object_graph.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/protobuf/saved_object_graph.proto 2021-02-08 09:41:50.066852012 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/protobuf/saved_object_graph.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/saved_object_graph.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/protobuf/saved_object_graph.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/saved_object_graph.proto 2021-04-27 10:19:52.003448629 +0900 @@ -11,6 +11,9 @@ option cc_enable_arenas = true; @@ -50,9 +50,9 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/saved_object_graph.proto ten // A SavedObjectGraph is part of object-based SavedModels in TF 2.0. It // describes the directed graph of Python objects (or equivalent in other -diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/struct.proto tensorflow-2.4.1-proto/tensorflow/core/protobuf/struct.proto ---- tensorflow-2.4.1/tensorflow/core/protobuf/struct.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/protobuf/struct.proto 2021-02-08 09:42:06.645810614 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/protobuf/struct.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/struct.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/protobuf/struct.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/struct.proto 2021-04-27 10:19:52.003448629 +0900 @@ -7,6 +7,9 @@ import "tensorflow/core/framework/types.proto"; @@ -63,9 +63,9 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/struct.proto tensorflow-2.4. // `StructuredValue` represents a dynamically typed value representing various // data structures that are inspired by Python data structures typically used in -diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/trackable_object_graph.proto tensorflow-2.4.1-proto/tensorflow/core/protobuf/trackable_object_graph.proto ---- tensorflow-2.4.1/tensorflow/core/protobuf/trackable_object_graph.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/protobuf/trackable_object_graph.proto 2021-02-08 09:42:24.581760720 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/protobuf/trackable_object_graph.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/trackable_object_graph.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/protobuf/trackable_object_graph.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/trackable_object_graph.proto 2021-04-27 10:19:52.003448629 +0900 @@ -4,6 +4,9 @@ option cc_enable_arenas = true; @@ -76,29 +76,29 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/trackable_object_graph.proto // A TensorBundle addition which saves extra information about the objects which // own variables, allowing for more robust checkpoint loading into modified -diff -ruN tensorflow-2.4.1/tensorflow/core/protobuf/transport_options.proto tensorflow-2.4.1-proto/tensorflow/core/protobuf/transport_options.proto ---- tensorflow-2.4.1/tensorflow/core/protobuf/transport_options.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/protobuf/transport_options.proto 2021-02-08 09:42:56.660650580 +0900 -@@ -3,6 +3,7 @@ - package tensorflow; - - option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; -+option java_package = "org.tensorflow.distruntime"; - - // Extra data needed on a non-RDMA RecvBufResponse. - message RecvBufRespExtra { -diff -ruN tensorflow-2.4.1/tensorflow/core/lib/core/error_codes.proto tensorflow-2.4.1-proto/tensorflow/core/lib/core/error_codes.proto ---- tensorflow-2.4.1/tensorflow/core/lib/core/error_codes.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/lib/core/error_codes.proto 2021-02-08 09:40:24.590065457 +0900 + diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/protobuf/transport_options.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/transport_options.proto + --- tensorflow-2.5.0-rc1/tensorflow/core/protobuf/transport_options.proto 2021-01-21 09:25:54.000000000 +0900 + +++ tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/transport_options.proto 2021-02-08 09:42:56.660650580 +0900 + @@ -3,6 +3,7 @@ + package tensorflow; + + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; + +option java_package = "org.tensorflow.distruntime"; + + // Extra data needed on a non-RDMA RecvBufResponse. + message RecvBufRespExtra { +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/lib/core/error_codes.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/lib/core/error_codes.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/lib/core/error_codes.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/lib/core/error_codes.proto 2021-04-27 10:19:52.003448629 +0900 @@ -1,3 +1,5 @@ syntax = "proto3"; +option java_package = "org.tensorflow.framework"; + import public "tensorflow/core/protobuf/error_codes.proto"; -diff -ruN tensorflow-2.4.1/tensorflow/core/profiler/protobuf/xplane.proto tensorflow-2.4.1-proto/tensorflow/core/profiler/protobuf/xplane.proto ---- tensorflow-2.4.1/tensorflow/core/profiler/protobuf/xplane.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/profiler/protobuf/xplane.proto 2021-02-08 09:40:24.591065455 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/profiler/protobuf/xplane.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/profiler/protobuf/xplane.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/profiler/protobuf/xplane.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/profiler/protobuf/xplane.proto 2021-04-27 10:19:52.004448631 +0900 @@ -3,6 +3,9 @@ package tensorflow.profiler; @@ -109,9 +109,9 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/profiler/protobuf/xplane.proto tensor // A container of parallel XPlanes, generated by one or more profiling sources. // Next ID: 5 -diff -ruN tensorflow-2.4.1/tensorflow/core/util/memmapped_file_system.proto tensorflow-2.4.1-proto/tensorflow/core/util/memmapped_file_system.proto ---- tensorflow-2.4.1/tensorflow/core/util/memmapped_file_system.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/util/memmapped_file_system.proto 2021-02-08 09:40:24.592065452 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/util/memmapped_file_system.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/util/memmapped_file_system.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/util/memmapped_file_system.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/util/memmapped_file_system.proto 2021-04-27 10:19:52.004448631 +0900 @@ -17,6 +17,9 @@ package tensorflow; @@ -122,9 +122,9 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/util/memmapped_file_system.proto tens // A message that describes one region of memmapped file. message MemmappedFileSystemDirectoryElement { -diff -ruN tensorflow-2.4.1/tensorflow/core/profiler/profiler_options.proto tensorflow-2.4.1-proto/tensorflow/core/profiler/profiler_options.proto ---- tensorflow-2.4.1/tensorflow/core/profiler/profiler_options.proto 2021-01-21 09:25:54.000000000 +0900 -+++ tensorflow-2.4.1-proto/tensorflow/core/profiler/profiler_options.proto 2021-02-08 09:40:24.593065450 +0900 +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/profiler/profiler_options.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/profiler/profiler_options.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/profiler/profiler_options.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/profiler/profiler_options.proto 2021-04-27 10:19:52.004448631 +0900 @@ -1,6 +1,9 @@ syntax = "proto3"; @@ -135,16 +135,69 @@ diff -ruN tensorflow-2.4.1/tensorflow/core/profiler/profiler_options.proto tenso // Next ID: 11 message ProfileOptions { - -diff --git a/tensorflow/core/protobuf/data/experimental/service_config.proto b/tensorflow/core/protobuf/data/experimental/service_config.proto -index 3dcd2cd48d..ae2cfdd94f 100644 ---- a/tensorflow/core/protobuf/data/experimental/service_config.proto -+++ b/tensorflow/core/protobuf/data/experimental/service_config.proto +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/protobuf/service_config.proto tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/service_config.proto +--- tensorflow-2.5.0-rc1/tensorflow/core/protobuf/service_config.proto 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-proto/tensorflow/core/protobuf/service_config.proto 2021-04-27 10:20:13.501491398 +0900 @@ -1,6 +1,7 @@ syntax = "proto3"; - + package tensorflow.data.experimental; +option java_package = "org.tensorflow.data.experimental"; - + // Configuration for a tf.data service DispatchServer. message DispatcherConfig { +diff --git a/tensorflow/core/framework/dataset_options.proto b/tensorflow/core/framework/dataset_options.proto +index 3d71a560956..4c427640148 100644 +--- a/tensorflow/core/framework/dataset_options.proto ++++ b/tensorflow/core/framework/dataset_options.proto +@@ -2,6 +2,10 @@ syntax = "proto3"; + + package tensorflow.data; + ++option java_outer_classname = "DatasetOptionsProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.data"; ++ + // Represents the type of auto-sharding we enable. + enum AutoShardPolicy { + // AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding. +diff --git a/tensorflow/core/framework/model.proto b/tensorflow/core/framework/model.proto +index ba74d7a2b7e..721dee57867 100644 +--- a/tensorflow/core/framework/model.proto ++++ b/tensorflow/core/framework/model.proto +@@ -3,6 +3,9 @@ syntax = "proto3"; + package tensorflow.data.model; + + option cc_enable_arenas = true; ++option java_outer_classname = "ModelProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.data.model"; + + // Class of a node in the performance model. + enum NodeClass { +diff --git a/tensorflow/core/grappler/costs/op_performance_data.proto b/tensorflow/core/grappler/costs/op_performance_data.proto +index 5ef5fd927b8..7c9a6ca2141 100644 +--- a/tensorflow/core/grappler/costs/op_performance_data.proto ++++ b/tensorflow/core/grappler/costs/op_performance_data.proto +@@ -17,6 +17,9 @@ syntax = "proto3"; + + package tensorflow; + option cc_enable_arenas = true; ++option java_outer_classname = "OpPerformanceDataProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.framework"; + + import "tensorflow/core/framework/tensor.proto"; + import "tensorflow/core/framework/tensor_shape.proto"; +diff --git a/tensorflow/core/protobuf/extension_type_variant.proto b/tensorflow/core/protobuf/extension_type_variant.proto +index 536db3b2435..88c4701b505 100644 +--- a/tensorflow/core/protobuf/extension_type_variant.proto ++++ b/tensorflow/core/protobuf/extension_type_variant.proto +@@ -3,6 +3,7 @@ syntax = "proto3"; + package tensorflow; + + import "tensorflow/core/protobuf/struct.proto"; ++option java_package = "org.tensorflow.framework"; + + // Metadata for ExtensionTypeVariant, used when serializing as Variant. + // diff --git a/tensorflow-core/tensorflow-core-api/external/tensorflow-visibility.patch b/tensorflow-core/tensorflow-core-api/external/tensorflow-visibility.patch index 03fda9811c3..acfab123fd5 100644 --- a/tensorflow-core/tensorflow-core-api/external/tensorflow-visibility.patch +++ b/tensorflow-core/tensorflow-core-api/external/tensorflow-visibility.patch @@ -1,8 +1,7 @@ -diff --git a/tensorflow/BUILD b/tensorflow/BUILD -index 55406a5686..35d1547dfb 100644 ---- a/tensorflow/BUILD -+++ b/tensorflow/BUILD -@@ -33,7 +33,7 @@ load( +diff -ruN tensorflow-2.5.0-rc1/tensorflow/BUILD tensorflow-2.5.0-rc1-visibility/tensorflow/BUILD +--- tensorflow-2.5.0-rc1/tensorflow/BUILD 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-visibility/tensorflow/BUILD 2021-04-27 10:26:14.196211286 +0900 +@@ -38,7 +38,7 @@ load("@bazel_skylib//:bzl_library.bzl", "bzl_library") package( @@ -11,27 +10,26 @@ index 55406a5686..35d1547dfb 100644 licenses = ["notice"], # Apache 2.0 ) -diff --ruN a/tensorflow/core/api_def/BUILD b/tensorflow/core/api_def/BUILD ---- a/tensorflow/core/api_def/BUILD 2020-03-26 18:19:19.000000000 -0400 -+++ b/tensorflow/core/api_def/BUILD 2020-04-01 22:50:37.000000000 -0400 -@@ -28,7 +28,7 @@ package( - filegroup( +diff -ruN tensorflow-2.5.0-rc1/tensorflow/core/api_def/BUILD tensorflow-2.5.0-rc1-visibility/tensorflow/core/api_def/BUILD +--- tensorflow-2.5.0-rc1/tensorflow/core/api_def/BUILD 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-visibility/tensorflow/core/api_def/BUILD 2021-04-27 10:29:38.712785268 +0900 +@@ -29,7 +29,7 @@ + alias( name = "base_api_def", - srcs = glob(["base_api/*"]), + actual = "//tensorflow/core/api_def/base_api:base_api_def", - visibility = ["//tensorflow:internal"], + visibility = ["//visibility:public"], ) - filegroup( -diff -ruN a/tensorflow/tools/api/lib/BUILD b/tensorflow/tools/api/lib/BUILD ---- a/tensorflow/tools/api/lib/BUILD 2020-03-26 18:19:19.000000000 -0400 -+++ b/tensorflow/tools/api/lib/BUILD 2020-04-01 22:50:37.000000000 -0400 -@@ -13,6 +13,7 @@ + alias( +diff -ruN tensorflow-2.5.0-rc1/tensorflow/tools/api/lib/BUILD tensorflow-2.5.0-rc1-visibility/tensorflow/tools/api/lib/BUILD +--- tensorflow-2.5.0-rc1/tensorflow/tools/api/lib/BUILD 2021-04-13 01:43:40.000000000 +0900 ++++ tensorflow-2.5.0-rc1-visibility/tensorflow/tools/api/lib/BUILD 2021-04-27 10:26:14.196211286 +0900 +@@ -16,6 +16,7 @@ tf_proto_library( name = "api_objects_proto", srcs = ["api_objects.proto"], + visibility = ["//visibility:public"], ) - + py_library( - \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecvV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecvV2.pbtxt new file mode 100644 index 00000000000..bc995cab1bb --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecvV2.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "CollectiveBcastRecvV2" + endpoint { + name: "rawops.CollectiveBcastRecvV2" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSendV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSendV2.pbtxt new file mode 100644 index 00000000000..226379d303e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSendV2.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "CollectiveBcastSendV2" + endpoint { + name: "rawops.CollectiveBcastSendV2" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV2.pbtxt new file mode 100644 index 00000000000..da39be5c1c1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV2.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "DataServiceDatasetV2" + endpoint { + name: "rawops.DataServiceDatasetV2" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_FinalizeDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_FinalizeDataset.pbtxt new file mode 100644 index 00000000000..ab2a5fa846a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_FinalizeDataset.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "FinalizeDataset" + endpoint { + name: "rawops.FinalizeDataset" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetOptions.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetOptions.pbtxt new file mode 100644 index 00000000000..188a9290620 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetOptions.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "GetOptions" + endpoint { + name: "rawops.GetOptions" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt new file mode 100644 index 00000000000..99f5e920acf --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "LoadTPUEmbeddingFrequencyEstimatorParameters" + endpoint { + name: "rawops.LoadTPUEmbeddingFrequencyEstimatorParameters" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt new file mode 100644 index 00000000000..0ced843d210 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" + endpoint { + name: "rawops.LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_OptionsDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_OptionsDataset.pbtxt new file mode 100644 index 00000000000..e90dfd7bd04 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_OptionsDataset.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "OptionsDataset" + endpoint { + name: "rawops.OptionsDataset" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_ParallelBatchDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_ParallelBatchDataset.pbtxt new file mode 100644 index 00000000000..f05138a1bd8 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_ParallelBatchDataset.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "ParallelBatchDataset" + endpoint { + name: "rawops.ParallelBatchDataset" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt new file mode 100644 index 00000000000..b69d019664f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RetrieveTPUEmbeddingFrequencyEstimatorParameters" + endpoint { + name: "rawops.RetrieveTPUEmbeddingFrequencyEstimatorParameters" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt new file mode 100644 index 00000000000..734b2cb441e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" + endpoint { + name: "rawops.RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscAbs.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscAbs.pbtxt new file mode 100644 index 00000000000..c2ab94f053b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscAbs.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscAbs" + endpoint { + name: "risc.RiscAbs" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscAdd.pbtxt new file mode 100644 index 00000000000..5694b59c62f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscAdd.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscAdd" + endpoint { + name: "risc.RiscAdd" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBinaryArithmetic.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBinaryArithmetic.pbtxt new file mode 100644 index 00000000000..910399fa401 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBinaryArithmetic.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscBinaryArithmetic" + endpoint { + name: "risc.RiscBinaryArithmetic" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBinaryComparison.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBinaryComparison.pbtxt new file mode 100644 index 00000000000..014e43b1444 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBinaryComparison.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscBinaryComparison" + endpoint { + name: "risc.RiscBinaryComparison" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBitcast.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBitcast.pbtxt new file mode 100644 index 00000000000..3393f70a8b5 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBitcast.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscBitcast" + endpoint { + name: "risc.RiscBitcast" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBroadcast.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBroadcast.pbtxt new file mode 100644 index 00000000000..755892ca968 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscBroadcast.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscBroadcast" + endpoint { + name: "risc.RiscBroadcast" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCast.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCast.pbtxt new file mode 100644 index 00000000000..d1bffc26bff --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCast.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscCast" + endpoint { + name: "risc.RiscCast" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCeil.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCeil.pbtxt new file mode 100644 index 00000000000..286b8298d51 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCeil.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscCeil" + endpoint { + name: "risc.RiscCeil" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCholesky.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCholesky.pbtxt new file mode 100644 index 00000000000..cdb5975e035 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCholesky.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscCholesky" + endpoint { + name: "risc.RiscCholesky" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscConcat.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscConcat.pbtxt new file mode 100644 index 00000000000..670cb46be04 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscConcat.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscConcat" + endpoint { + name: "risc.RiscConcat" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCondition.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCondition.pbtxt new file mode 100644 index 00000000000..2284aeed689 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCondition.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscCondition" + endpoint { + name: "risc.RiscCondition" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscConv.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscConv.pbtxt new file mode 100644 index 00000000000..4e2342a8da9 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscConv.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscConv" + endpoint { + name: "risc.RiscConv" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCos.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCos.pbtxt new file mode 100644 index 00000000000..d9905d7e1b0 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscCos.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscCos" + endpoint { + name: "risc.RiscCos" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscDiv.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscDiv.pbtxt new file mode 100644 index 00000000000..651d569b479 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscDiv.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscDiv" + endpoint { + name: "risc.RiscDiv" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscDot.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscDot.pbtxt new file mode 100644 index 00000000000..4eac65da4f8 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscDot.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscDot" + endpoint { + name: "risc.RiscDot" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscExp.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscExp.pbtxt new file mode 100644 index 00000000000..35bb77b83c6 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscExp.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscExp" + endpoint { + name: "risc.RiscExp" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscFft.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscFft.pbtxt new file mode 100644 index 00000000000..a3dcbe69337 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscFft.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscFft" + endpoint { + name: "risc.RiscFft" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscFloor.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscFloor.pbtxt new file mode 100644 index 00000000000..9f5d762d1a4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscFloor.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscFloor" + endpoint { + name: "risc.RiscFloor" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscGather.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscGather.pbtxt new file mode 100644 index 00000000000..c4fe724889d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscGather.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscGather" + endpoint { + name: "risc.RiscGather" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscImag.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscImag.pbtxt new file mode 100644 index 00000000000..70d8136856b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscImag.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscImag" + endpoint { + name: "risc.RiscImag" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscIsFinite.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscIsFinite.pbtxt new file mode 100644 index 00000000000..5418f7a9906 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscIsFinite.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscIsFinite" + endpoint { + name: "risc.RiscIsFinite" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLog.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLog.pbtxt new file mode 100644 index 00000000000..b0bb8f3aaed --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLog.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscLog" + endpoint { + name: "risc.RiscLog" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalAnd.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalAnd.pbtxt new file mode 100644 index 00000000000..1ccb0264901 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalAnd.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscLogicalAnd" + endpoint { + name: "risc.RiscLogicalAnd" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalNot.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalNot.pbtxt new file mode 100644 index 00000000000..6f97af1c7b6 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalNot.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscLogicalNot" + endpoint { + name: "risc.RiscLogicalNot" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalOr.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalOr.pbtxt new file mode 100644 index 00000000000..97e37710419 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscLogicalOr.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscLogicalOr" + endpoint { + name: "risc.RiscLogicalOr" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMax.pbtxt new file mode 100644 index 00000000000..240f8119a9e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMax.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscMax" + endpoint { + name: "risc.RiscMax" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMin.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMin.pbtxt new file mode 100644 index 00000000000..a8ccba66ae1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMin.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscMin" + endpoint { + name: "risc.RiscMin" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMul.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMul.pbtxt new file mode 100644 index 00000000000..21fc1e0e336 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscMul.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscMul" + endpoint { + name: "risc.RiscMul" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscNeg.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscNeg.pbtxt new file mode 100644 index 00000000000..894b769a72a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscNeg.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscNeg" + endpoint { + name: "risc.RiscNeg" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPad.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPad.pbtxt new file mode 100644 index 00000000000..729bba07740 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPad.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscPad" + endpoint { + name: "risc.RiscPad" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPool.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPool.pbtxt new file mode 100644 index 00000000000..9ed6a55dd07 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPool.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscPool" + endpoint { + name: "risc.RiscPool" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPow.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPow.pbtxt new file mode 100644 index 00000000000..3eac196376f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscPow.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscPow" + endpoint { + name: "risc.RiscPow" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscRandomUniform.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscRandomUniform.pbtxt new file mode 100644 index 00000000000..ef96f0a2796 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscRandomUniform.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscRandomUniform" + endpoint { + name: "risc.RiscRandomUniform" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReal.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReal.pbtxt new file mode 100644 index 00000000000..5b9691512fc --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReal.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscReal" + endpoint { + name: "risc.RiscReal" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReduce.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReduce.pbtxt new file mode 100644 index 00000000000..d5d614c828e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReduce.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscReduce" + endpoint { + name: "risc.RiscReduce" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscRem.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscRem.pbtxt new file mode 100644 index 00000000000..0bb38f8de55 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscRem.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscRem" + endpoint { + name: "risc.RiscRem" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReshape.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReshape.pbtxt new file mode 100644 index 00000000000..b2ab27447a3 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReshape.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscReshape" + endpoint { + name: "risc.RiscReshape" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReverse.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReverse.pbtxt new file mode 100644 index 00000000000..ccb027a8859 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscReverse.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscReverse" + endpoint { + name: "risc.RiscReverse" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscScatter.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscScatter.pbtxt new file mode 100644 index 00000000000..0eea45dcf04 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscScatter.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscScatter" + endpoint { + name: "risc.RiscScatter" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscShape.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscShape.pbtxt new file mode 100644 index 00000000000..dab7319a922 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscShape.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscShape" + endpoint { + name: "risc.RiscShape" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSign.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSign.pbtxt new file mode 100644 index 00000000000..a157b69acbb --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSign.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscSign" + endpoint { + name: "risc.RiscSign" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSlice.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSlice.pbtxt new file mode 100644 index 00000000000..fc630149b69 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSlice.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscSlice" + endpoint { + name: "risc.RiscSlice" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSort.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSort.pbtxt new file mode 100644 index 00000000000..2048ca7aab1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSort.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscSort" + endpoint { + name: "risc.RiscSort" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSqueeze.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSqueeze.pbtxt new file mode 100644 index 00000000000..f09b55721f9 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSqueeze.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscSqueeze" + endpoint { + name: "risc.RiscSqueeze" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSub.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSub.pbtxt new file mode 100644 index 00000000000..924d3f38189 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscSub.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscSub" + endpoint { + name: "risc.RiscSub" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscTranspose.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscTranspose.pbtxt new file mode 100644 index 00000000000..877ee6d6570 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscTranspose.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscTranspose" + endpoint { + name: "risc.RiscTranspose" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscTriangularSolve.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscTriangularSolve.pbtxt new file mode 100644 index 00000000000..f74b9a88a86 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscTriangularSolve.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscTriangularSolve" + endpoint { + name: "risc.RiscTriangularSolve" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscUnary.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscUnary.pbtxt new file mode 100644 index 00000000000..429c93bff49 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscUnary.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscUnary" + endpoint { + name: "risc.RiscUnary" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscWhile.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscWhile.pbtxt new file mode 100644 index 00000000000..e4810438b46 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RiscWhile.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RiscWhile" + endpoint { + name: "risc.RiscWhile" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetAlg.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetAlg.pbtxt new file mode 100644 index 00000000000..276b6f4422e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetAlg.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "StatelessRandomGetAlg" + endpoint { + name: "rawops.StatelessRandomGetAlg" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetKeyCounter.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetKeyCounter.pbtxt new file mode 100644 index 00000000000..e0e2f305b7f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetKeyCounter.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "StatelessRandomGetKeyCounter" + endpoint { + name: "rawops.StatelessRandomGetKeyCounter" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_TPUReshardVariables.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_TPUReshardVariables.pbtxt new file mode 100644 index 00000000000..7a6a824b2bd --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_TPUReshardVariables.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "TPUReshardVariables" + endpoint { + name: "tpu.TPUReshardVariables" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaConvV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaConvV2.pbtxt new file mode 100644 index 00000000000..d2c9637c0ba --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaConvV2.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "XlaConvV2" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaDotV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaDotV2.pbtxt new file mode 100644 index 00000000000..357866b27ac --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaDotV2.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "XlaDotV2" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaSetDynamicDimensionSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaSetDynamicDimensionSize.pbtxt new file mode 100644 index 00000000000..aeaeb87d701 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaSetDynamicDimensionSize.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "XlaSetDynamicDimensionSize" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicSort.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicSort.pbtxt new file mode 100644 index 00000000000..5ae24c7686a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicSort.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "XlaVariadicSort" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc index 0d43afb5e6a..429761e1ce7 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc @@ -161,26 +161,26 @@ int main(int argc, char* argv[]) { ApiDefMap python_api_map(op_defs); // Load Python API defs - string base_api_dir = tf_src_dir + "/tensorflow/core/api_def/base_api"; - string python_api_dir = tf_src_dir + "/tensorflow/core/api_def/python_api"; + string base_api_path = tf_src_dir + "/tensorflow/core/api_def/base_api/*.pbtxt"; + string python_api_path = tf_src_dir + "/tensorflow/core/api_def/python_api/*.pbtxt"; vector api_files; - TF_CHECK_OK(env->GetChildren(base_api_dir, &api_files)); + TF_CHECK_OK(env->GetMatchingPaths(base_api_path, &api_files)); LOG(INFO) << "Loading " << api_files.size() << " Base API definition files"; for (const auto& filename : api_files) { - TF_CHECK_OK(python_api_map.LoadFile(env, base_api_dir + "/" + filename)) << filename; + TF_CHECK_OK(python_api_map.LoadFile(env, filename)) << filename; } - TF_CHECK_OK(env->GetChildren(python_api_dir, &api_files)); + TF_CHECK_OK(env->GetMatchingPaths(python_api_path, &api_files)); LOG(INFO) << "Loading " << api_files.size() << " Python API definition files"; for (const auto& filename : api_files) { - TF_CHECK_OK(python_api_map.LoadFile(env, python_api_dir + "/" + filename)) << filename; + TF_CHECK_OK(python_api_map.LoadFile(env, filename)) << filename; } python_api_map.UpdateDocs(); // Load golden API member names with their module path - string golden_api_dir = tf_src_dir + "/tensorflow/tools/api/golden/v1"; + string golden_api_path = tf_src_dir + "/tensorflow/tools/api/golden/v1/*.pbtxt"; vector> golden_api_names; vector golden_api_files; - TF_CHECK_OK(env->GetChildren(golden_api_dir, &golden_api_files)); + TF_CHECK_OK(env->GetMatchingPaths(golden_api_path, &golden_api_files)); LOG(INFO) << "Loading " << golden_api_files.size() << " Python API golden files"; for (const auto& filename : golden_api_files) { // Skip the raw_ops API, as it contains all op endpoints @@ -188,7 +188,7 @@ int main(int argc, char* argv[]) { continue; } string contents; - TF_CHECK_OK(ReadFileToString(env, golden_api_dir + "/" + filename, &contents)); + TF_CHECK_OK(ReadFileToString(env, filename, &contents)); third_party::tensorflow::tools::api::TFAPIObject object; google::protobuf::TextFormat::ParseFromString(contents, &object); if (object.has_tf_module()) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataExperimentalOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataExperimentalOps.java deleted file mode 100644 index d512607172b..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataExperimentalOps.java +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2020 The TensorFlow Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ============================================================================== -// -// This class has been generated, DO NOT EDIT! -// -package org.tensorflow.op; - -import java.util.List; -import org.tensorflow.Operand; -import org.tensorflow.ndarray.Shape; -import org.tensorflow.op.data.experimental.DataServiceDataset; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.TString; -import org.tensorflow.types.family.TType; - -/** - * An API for building {@code data.experimental} operations as {@link Op Op}s - * - * @see {@link Ops} - */ -public final class DataExperimentalOps { - private final Scope scope; - - private final Ops ops; - - DataExperimentalOps(Ops ops) { - this.scope = ops.scope(); - this.ops = ops; - } - - /** - * The DataServiceDataset operation - * - * @param datasetId the datasetId value - * @param processingMode the processingMode value - * @param address the address value - * @param protocol the protocol value - * @param jobName the jobName value - * @param maxOutstandingRequests the maxOutstandingRequests value - * @param iterationCounter the iterationCounter value - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @param options carries optional attribute values - * @return a new instance of DataServiceDataset - */ - public DataServiceDataset dataServiceDataset(Operand datasetId, - Operand processingMode, Operand address, Operand protocol, - Operand jobName, Operand maxOutstandingRequests, - Operand iterationCounter, List> outputTypes, - List outputShapes, DataServiceDataset.Options... options) { - return DataServiceDataset.create(scope, datasetId, processingMode, address, protocol, jobName, maxOutstandingRequests, iterationCounter, outputTypes, outputShapes, options); - } - - /** - * Get the parent {@link Ops} object. - */ - public final Ops ops() { - return ops; - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java index 523b1596398..4197dac5fee 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java @@ -55,8 +55,6 @@ * @see {@link Ops} */ public final class DataOps { - public final DataExperimentalOps experimental; - private final Scope scope; private final Ops ops; @@ -64,7 +62,6 @@ public final class DataOps { DataOps(Ops ops) { this.scope = ops.scope(); this.ops = ops; - experimental = new DataExperimentalOps(ops); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java index 0ddfec44759..94bfe32ace0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java @@ -155,7 +155,9 @@ public AdjustSaturation adjustSaturation(Operand image * representing a single score corresponding to each box (each row of boxes). * @param maxOutputSizePerClass A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression per class - * @param maxTotalSize A scalar representing maximum number of boxes retained over all classes. + * @param maxTotalSize An int32 scalar representing the maximum number of boxes retained over all + * classes. Note that setting this value to a large number may result in OOM error + * depending on the system workload. * @param iouThreshold A 0-D float tensor representing the threshold for deciding whether * boxes overlap too much with respect to IOU. * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to remove diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java index 188270728d9..889c234eff1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java @@ -138,12 +138,13 @@ public DecodeCsv decodeCsv(Operand records, Iterable> record /** * Convert JSON-encoded Example records to binary protocol buffer strings. - * This op translates a tensor containing Example records, encoded using - * the standard JSON - * mapping , - * into a tensor containing the same records encoded as binary protocol - * buffers. The resulting tensor can then be fed to any of the other - * Example-parsing ops. + * Note: This is not a general purpose JSON parsing op. + *

    This op converts JSON-serialized + * {@code tf.train.Example} (created with {@code json_format.MessageToJson}, following the + * standard JSON mapping ) + * to a binary-serialized {@code tf.train.Example} (equivalent to + * {@code Example.SerializeToString()}) suitable for conversion to tensors with + * {@code tf.io.parse_example}. * * @param jsonExamples Each string is a JSON object serialized according to the JSON * mapping of the Example proto. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java index 2180a8b95f1..192973c6a32 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java @@ -95,25 +95,25 @@ public final class LinalgOps { *

    For example: *

        *  # if 'input' is [[ 0,  1,  2, 3]
    -   *                   [-1,  0,  1, 2]
    -   *                   [-2, -1,  0, 1]
    -   *                   [-3, -2, -1, 0]],
    +   *  #                [-1,  0,  1, 2]
    +   *  #                [-2, -1,  0, 1]
    +   *  #                [-3, -2, -1, 0]],
        *
    -   *  tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
    +   *  tf.linalg.band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
        *                                         [-1,  0,  1, 2]
        *                                         [ 0, -1,  0, 1]
        *                                         [ 0,  0, -1, 0]],
        *
    -   *  tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
    +   *  tf.linalg.band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
        *                                        [-1,  0,  1, 0]
        *                                        [-2, -1,  0, 1]
        *                                        [ 0, -2, -1, 0]]
        *  
    *

    Useful special cases: *

    -   *   tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
    -   *   tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
    -   *   tf.matrix_band_part(input, 0, 0) ==> Diagonal.
    +   *   tf.linalg.band_part(input, 0, -1) ==> Upper triangular part.
    +   *   tf.linalg.band_part(input, -1, 0) ==> Lower triangular part.
    +   *   tf.linalg.band_part(input, 0, 0) ==> Diagonal.
        *  
    * * @param data type for {@code band} output diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java index cc90d2e1b0f..9c796755cb4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java @@ -1960,7 +1960,7 @@ public Sinh sinh(Operand x) { } /** - * Computes softplus: {@code log(exp(features) + 1)}. + * The Softplus operation * * @param data type for {@code activations} output * @param features the features value diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index acab988384a..8b25a15522f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -954,8 +954,26 @@ public Dilation2dBackpropInput dilation2dBackpropInput(Op } /** - * Computes exponential linear: {@code exp(features) - 1} if < 0, {@code features} otherwise. - * See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * Computes the exponential linear function. + * The ELU function is defined as: + *
      + *
    • $ e ^ x - 1 $ if $ x < 0 $
    • + *
    • $ x $ if $ x >= 0 $
    • + *
    + *

    Examples: + *

    + *
    + *
    + *

    tf.nn.elu(1.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=1.0> + * tf.nn.elu(0.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=0.0> + * tf.nn.elu(-1000.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=-1.0> + *

    + *
    + *
    + *

    See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * * * @param data type for {@code activations} output @@ -1753,8 +1771,8 @@ public QuantizedReluX quantizedReluX(Operand *

    *
    - *

    tf.nn.relu([-2., 0., -0., 3.]).numpy() - * array([ 0., 0., -0., 3.], dtype=float32) + *

    tf.nn.relu([-2., 0., 3.]).numpy() + * array([0., 0., 3.], dtype=float32) *

    *
    * diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index c68b6ee8ff7..a4a7f5d6dbc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -166,7 +166,6 @@ import org.tensorflow.op.core.RefNextIteration; import org.tensorflow.op.core.RefSelect; import org.tensorflow.op.core.RefSwitch; -import org.tensorflow.op.core.RemoteFusedGraphExecute; import org.tensorflow.op.core.Reshape; import org.tensorflow.op.core.ResourceCountUpTo; import org.tensorflow.op.core.ResourceGather; @@ -187,7 +186,6 @@ import org.tensorflow.op.core.Reverse; import org.tensorflow.op.core.ReverseSequence; import org.tensorflow.op.core.Roll; -import org.tensorflow.op.core.Rpc; import org.tensorflow.op.core.ScatterAdd; import org.tensorflow.op.core.ScatterDiv; import org.tensorflow.op.core.ScatterMax; @@ -271,7 +269,6 @@ import org.tensorflow.op.core.Timestamp; import org.tensorflow.op.core.TopKUnique; import org.tensorflow.op.core.TopKWithUnique; -import org.tensorflow.op.core.TryRpc; import org.tensorflow.op.core.Unbatch; import org.tensorflow.op.core.UnbatchGrad; import org.tensorflow.op.core.Unique; @@ -284,6 +281,9 @@ import org.tensorflow.op.core.Variable; import org.tensorflow.op.core.VariableShape; import org.tensorflow.op.core.Where; +import org.tensorflow.op.core.XlaConvV2; +import org.tensorflow.op.core.XlaDotV2; +import org.tensorflow.op.core.XlaSetDynamicDimensionSize; import org.tensorflow.op.core.XlaSpmdFullToShardShape; import org.tensorflow.op.core.XlaSpmdShardToFullShape; import org.tensorflow.op.core.Zeros; @@ -2953,9 +2953,8 @@ public InitializeTableFromTextFile initializeTableFromTextFile( } /** - *
    -   *  Adds v into specified rows of x.
    -   *
    +   * Adds v into specified rows of x.
    +   *  
        *  Computes y = x; y[i, :] += v; return y.
        *  
    * @@ -4159,27 +4158,6 @@ public RefSwitch refSwitch(Operand data, Operand return RefSwitch.create(scope, data, pred); } - /** - * Execute a sub graph on a remote processor. - * The graph specifications(such as graph itself, input tensors and output names) - * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo - * as serialized_remote_fused_graph_execute_info. - * The specifications will be passed to a dedicated registered - * remote fused graph executor. The executor will send the graph specifications - * to a remote processor and execute that graph. The execution results - * will be passed to consumer nodes as outputs of this node. - * - * @param inputs Arbitrary number of tensors with arbitrary data types - * @param Toutputs the value of the Toutputs property - * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer - * of RemoteFusedGraphExecuteInfo which contains graph specifications. - * @return a new instance of RemoteFusedGraphExecute - */ - public RemoteFusedGraphExecute remoteFusedGraphExecute(Iterable> inputs, - List> Toutputs, String serializedRemoteFusedGraphExecuteInfo) { - return RemoteFusedGraphExecute.create(scope, inputs, Toutputs, serializedRemoteFusedGraphExecuteInfo); - } - /** * Reshapes a tensor. * Given {@code tensor}, this operation returns a tensor that has the same values @@ -4857,64 +4835,6 @@ public Roll roll(Operand input, Operand - *
  • {@code address} (the host+port or BNS address of the request)
  • - *
  • {@code method} (the RPC method name for the request)
  • - *
  • {@code request} (the serialized proto string, or vector of strings, - * of the RPC request argument).
  • - *
- *

For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - *

-   *  service MyService {
-   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
-   *    }
-   *  };
-   *  
- *

then call this op with arguments: - *

-   *  address = "localhost:2345"
-   *  method = "MyService/MyMethod"
-   *  
- *

The {@code request} tensor is a string tensor representing serialized {@code MyRequestProto} - * strings; and the output string tensor {@code response} will have the same shape - * and contain (upon successful completion) corresponding serialized - * {@code MyResponseProto} strings. - *

For example, to send a single, empty, {@code MyRequestProto}, call - * this op with {@code request = ""}. To send 5 parallel empty requests, - * call this op with {@code request = ["", "", "", "", ""]}. - *

More generally, one can create a batch of {@code MyRequestProto} serialized protos - * from regular batched tensors using the {@code encode_proto} op, and convert - * the response {@code MyResponseProto} serialized protos to batched tensors - * using the {@code decode_proto} op. - *

NOTE Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - *

If the connection fails or the remote worker returns an error - * status, the op reraises this exception locally. - *

See the {@code TryRpc} op if you prefer to handle RPC failures manually in the graph. - * - * @param address {@code 0-D} or {@code 1-D}. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code method} and {@code request}. - * @param method {@code 0-D} or {@code 1-D}. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code address} and {@code request}. - * @param request {@code 0-D} or {@code 1-D}. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code address} and {@code method}. - * @param options carries optional attribute values - * @return a new instance of Rpc - */ - public Rpc rpc(Operand address, Operand method, Operand request, - Rpc.Options... options) { - return Rpc.create(scope, address, method, request, options); - } - /** * Adds sparse updates to a variable reference. * This operation computes @@ -5930,7 +5850,39 @@ public StageSize stageSize(List> dtypes, StageSize.Option * in the graph it inputs are masked from the gradient generator. They are not * taken into account for computing gradients. *

This is useful any time you want to compute a value with TensorFlow but need - * to pretend that the value was a constant. Some examples include: + * to pretend that the value was a constant. For example, the softmax function + * for a vector x can be written as + *

+   *
+   *    def softmax(x):
+   *      numerator = tf.exp(x)
+   *      denominator = tf.reduce_sum(numerator)
+   *      return numerator / denominator
+   *  
+ *

This however is susceptible to overflow if the values in x are large. An + * alternative more stable way is to subtract the maximum of x from each of the + * values. + *

+   *
+   *    def stable_softmax(x):
+   *      z = x - tf.reduce_max(x)
+   *      numerator = tf.exp(z)
+   *      denominator = tf.reduce_sum(numerator)
+   *      return numerator / denominator
+   *  
+ *

However, when we backprop through the softmax to x, we dont want to backprop + * through the {@code tf.reduce_max(x)} (if the max values are not unique then the + * gradient could flow to the wrong input) calculation and treat that as a + * constant. Therefore, we should write this out as + *

+   *
+   *    def stable_softmax(x):
+   *      z = x - tf.stop_gradient(tf.reduce_max(x))
+   *      numerator = tf.exp(z)
+   *      denominator = tf.reduce_sum(numerator)
+   *      return numerator / denominator
+   *  
+ *

Some other examples include: *

For an HeUniform equivalent initializer, use {@link VarianceScaling.Distribution#UNIFORM} for + * the distribution parameter. * * @param The TType for the call operation * @see extends VarianceScaling { /** * Creates an He Initializer * - * @param tf the TensorFlow Ops * @param distribution The distribution type for the He initializer. * @param seed the seed for random number generation. An initializer created with a given seed * will always produce the same random tensor for a given shape and dtype. * @see VarianceScaling.Distribution */ - public He(Ops tf, Distribution distribution, long seed) { - super(tf, SCALE, Mode.FAN_IN, distribution, seed); + public He(Distribution distribution, long seed) { + super(SCALE, Mode.FAN_IN, distribution, seed); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java index f672c9f1e85..ea73f764a38 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java @@ -14,6 +14,8 @@ =======================================================================*/ package org.tensorflow.framework.initializers; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.utils.ShapeUtils; import org.tensorflow.ndarray.Shape; @@ -32,40 +34,34 @@ * Identity<TFloat32> initializer = * new org.tensorflow.framework.initializers.Identity<>(tf); * Operand<TFloat32> values = - * initializer.call(tf.constant(Shape.of(2,2)), TFloat32.class); + * initializer.call(Ops tf, tf.constant(Shape.of(2,2)), TFloat32.class); * * * @param The TType for the call operation */ public class Identity extends BaseInitializer { public static final double GAIN_DEFAULT = 1.0; - private final double gain; - /** - * Creates an Initializer that generates the identity matrix. - * - * @param tf the TensorFlow Ops - */ - public Identity(Ops tf) { - super(tf); - this.gain = GAIN_DEFAULT; + /** Creates an Initializer that generates the identity matrix. */ + public Identity() { + this(GAIN_DEFAULT); } /** * Creates an Initializer that generates the identity matrix. * - * @param tf the TensorFlow Ops * @param gain the gain to be applied to the Identity Matrix */ - public Identity(Ops tf, double gain) { - super(tf); + public Identity(double gain) { + super(); this.gain = gain; } /** {@inheritDoc} */ @Override - public Operand call(Operand dims, Class type) { + public Operand call(Ops tf, Operand dims, Class type) { + Shape shape = ShapeUtils.toShape(tf.scope(), dims); if (shape.numDimensions() != 2) { throw new IllegalArgumentException("2D matrix required, got " + shape.numDimensions()); @@ -75,9 +71,9 @@ public Operand call(Operand dims, Class type) { Shape diagShape = Shape.of(diagSize); Operand op; - Operand zero = tf.dtypes.cast(tf.constant(0), type); + Operand zero = cast(tf, tf.constant(0), type); Operand diagOnes = - tf.fill(tf.constant(diagShape.asArray()), tf.dtypes.cast(tf.constant(1.0), type)); + tf.fill(tf.constant(diagShape.asArray()), cast(tf, tf.constant(1.0), type)); if (isSquare) { op = tf.linalg.matrixDiag( @@ -91,6 +87,6 @@ public Operand call(Operand dims, Class type) { op = tf.linalg.matrixSetDiag(zeroMatrix, diagOnes, tf.constant(0)); } - return tf.math.mul(op, tf.dtypes.cast(tf.constant(gain), type)); + return tf.math.mul(op, cast(tf, tf.constant(gain), type)); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Initializer.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Initializer.java index 4beb218783b..d6593b770e2 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Initializer.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Initializer.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.initializers; import org.tensorflow.Operand; +import org.tensorflow.op.Ops; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -23,14 +24,18 @@ * * @param The data Type for initializer operation */ +@FunctionalInterface public interface Initializer { /** * Generates the operation used to perform the initialization. * + * @param tf the TensorFlow Ops * @param dims the shape dimensions * @param type the type of tensor + * @throws IllegalStateException if the object has not been initialized with the TensorFlow + * Platform. * @return An operand for the initialization. */ - Operand call(Operand dims, Class type); + Operand call(Ops tf, Operand dims, Class type); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/LeCun.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/LeCun.java index 38e68ef688b..364c5fb9285 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/LeCun.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/LeCun.java @@ -14,7 +14,6 @@ =======================================================================*/ package org.tensorflow.framework.initializers; -import org.tensorflow.op.Ops; import org.tensorflow.types.family.TFloating; /** @@ -27,7 +26,7 @@ * stddev = sqrt(1 / fanIn) where fanIn is the number of input units in the * weight tensor. * - *

    *
  • The EM algorithm where the M-step should not involve backpropagation * through the output of the E-step.
  • @@ -7193,8 +7145,8 @@ public Timestamp timestamp() { } /** - * Returns the TopK unique values in the array in sorted order. The - * running time is proportional to the product of K and the input + * Returns the TopK unique values in the array in sorted order. + * The running time is proportional to the product of K and the input * size. Sorting the whole array is more efficient for sufficiently large * values of K. The median-of-medians algorithm is probably faster, but * difficult to implement efficiently in XLA. If there are fewer than K @@ -7216,11 +7168,12 @@ public TopKUnique topKUnique(Operand input, Long k) { } /** - * Returns the TopK values in the array in sorted order. This is a combination - * of MakeUnique and TopKUnique. The returned top-K will have its lower bits - * replaced by iota, thus it will be close to the original value but not exactly - * the same. The running time is proportional to the product of K and the input - * size. NaNs are never returned. Subnormal numbers are flushed to zero. + * Returns the TopK values in the array in sorted order. + * This is a combination of MakeUnique and TopKUnique. The returned top-K will + * have its lower bits replaced by iota, thus it will be close to the original + * value but not exactly the same. The running time is proportional to the product + * of K and the input size. NaNs are never returned. Subnormal numbers are flushed + * to zero. * * @param input the input value * @param k the value of the k property @@ -7230,67 +7183,6 @@ public TopKWithUnique topKWithUnique(Operand input, Long k) { return TopKWithUnique.create(scope, input, k); } - /** - * Perform batches of RPC requests. - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *
      - *
    • {@code address} (the host+port or BNS address of the request)
    • - *
    • {@code method} (the method name for the request)
    • - *
    • {@code request} (the serialized proto string, or vector of strings, - * of the RPC request argument).
    • - *
    - *

    For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - *

    -   *  service MyService {
    -   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
    -   *    }
    -   *  };
    -   *  
    - *

    then call this op with arguments: - *

    -   *  address = "localhost:2345"
    -   *  method = "MyService/MyMethod"
    -   *  
    - *

    The {@code request} tensor is a string tensor representing serialized {@code MyRequestProto} - * strings; and the output string tensor {@code response} will have the same shape - * and contain (upon successful completion) corresponding serialized - * {@code MyResponseProto} strings. - *

    For example, to send a single, empty, {@code MyRequestProto}, call - * this op with {@code request = ""}. To send 5 parallel empty requests, - * call this op with {@code request = ["", "", "", "", ""]}. - *

    More generally, one can create a batch of {@code MyRequestProto} serialized protos - * from regular batched tensors using the {@code encode_proto} op, and convert - * the response {@code MyResponseProto} serialized protos to batched tensors - * using the {@code decode_proto} op. - *

    NOTE Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - *

    Unlike the standard {@code Rpc} op, if the connection fails or the remote worker - * returns an error status, this op does not reraise the exception. - * Instead, the {@code status_code} and {@code status_message} entry for the corresponding RPC - * call is set with the error returned from the RPC call. The {@code response} tensor - * will contain valid response values for those minibatch entries whose RPCs did - * not fail; the rest of the entries will have empty strings. - * - * @param address {@code 0-D} or {@code 1-D}. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code method} and {@code request}. - * @param method {@code 0-D} or {@code 1-D}. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code address} and {@code request}. - * @param request {@code 0-D} or {@code 1-D}. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code address} and {@code method}. - * @param options carries optional attribute values - * @return a new instance of TryRpc - */ - public TryRpc tryRpc(Operand address, Operand method, Operand request, - TryRpc.Options... options) { - return TryRpc.create(scope, address, method, request, options); - } - /** * Reverses the operation of Batch for a single output Tensor. * An instance of Unbatch either receives an empty batched_tensor, in which case it @@ -7474,29 +7366,29 @@ public Unique unique(Operand x, *

    {@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} *

    For example: *

    -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
    -   *  y, idx, count = unique_with_counts(x)
    +   *  x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
    +   *  y, idx, count = UniqueWithCountsV2(x, axis = [0])
        *  y ==> [1, 2, 4, 7, 8]
        *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
        *  count ==> [2, 1, 3, 1, 2]
        *  
    - *

    For an {@code 2-D} tensor {@code x} with {@code axis = 0}: + *

    For a {@code 2-D} tensor {@code x} with {@code axis = 0}: *

    -   *  # tensor 'x' is [[1, 0, 0],
    -   *  #                [1, 0, 0],
    -   *  #                [2, 0, 0]]
    -   *  y, idx, count = unique_with_counts(x, axis=0)
    +   *  x = tf.constant([[1, 0, 0],
    +   *                  [1, 0, 0],
    +   *                  [2, 0, 0]])
    +   *  y, idx, count = UniqueWithCountsV2(x, axis=[0])
        *  y ==> [[1, 0, 0],
        *         [2, 0, 0]]
        *  idx ==> [0, 0, 1]
        *  count ==> [2, 1]
        *  
    - *

    For an {@code 2-D} tensor {@code x} with {@code axis = 1}: + *

    For a {@code 2-D} tensor {@code x} with {@code axis = 1}: *

    -   *  # tensor 'x' is [[1, 0, 0],
    -   *  #                [1, 0, 0],
    -   *  #                [2, 0, 0]]
    -   *  y, idx, count = unique_with_counts(x, axis=1)
    +   *  x = tf.constant([[1, 0, 0],
    +   *                  [1, 0, 0],
    +   *                  [2, 0, 0]])
    +   *  y, idx, count = UniqueWithCountsV2(x, axis=[1])
        *  y ==> [[1, 0],
        *         [1, 0],
        *         [2, 0]]
    @@ -7530,29 +7422,29 @@ public  UniqueWithCounts uniqueWithCounts(Operand
        *  

    {@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} *

    For example: *

    -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
    -   *  y, idx, count = unique_with_counts(x)
    +   *  x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
    +   *  y, idx, count = UniqueWithCountsV2(x, axis = [0])
        *  y ==> [1, 2, 4, 7, 8]
        *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
        *  count ==> [2, 1, 3, 1, 2]
        *  
    - *

    For an {@code 2-D} tensor {@code x} with {@code axis = 0}: + *

    For a {@code 2-D} tensor {@code x} with {@code axis = 0}: *

    -   *  # tensor 'x' is [[1, 0, 0],
    -   *  #                [1, 0, 0],
    -   *  #                [2, 0, 0]]
    -   *  y, idx, count = unique_with_counts(x, axis=0)
    +   *  x = tf.constant([[1, 0, 0],
    +   *                  [1, 0, 0],
    +   *                  [2, 0, 0]])
    +   *  y, idx, count = UniqueWithCountsV2(x, axis=[0])
        *  y ==> [[1, 0, 0],
        *         [2, 0, 0]]
        *  idx ==> [0, 0, 1]
        *  count ==> [2, 1]
        *  
    - *

    For an {@code 2-D} tensor {@code x} with {@code axis = 1}: + *

    For a {@code 2-D} tensor {@code x} with {@code axis = 1}: *

    -   *  # tensor 'x' is [[1, 0, 0],
    -   *  #                [1, 0, 0],
    -   *  #                [2, 0, 0]]
    -   *  y, idx, count = unique_with_counts(x, axis=1)
    +   *  x = tf.constant([[1, 0, 0],
    +   *                  [1, 0, 0],
    +   *                  [2, 0, 0]])
    +   *  y, idx, count = UniqueWithCountsV2(x, axis=[1])
        *  y ==> [[1, 0],
        *         [1, 0],
        *         [2, 0]]
    @@ -7805,6 +7697,72 @@ public Where where(Operand condition) {
         return Where.create(scope, condition);
       }
     
    +  /**
    +   * Wraps the XLA ConvGeneralDilated operator, documented at
    +   *  https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
    +   *  .
    +   *
    +   * @param  data type for {@code output} output
    +   * @param lhs the input tensor
    +   * @param rhs the kernel tensor
    +   * @param windowStrides the inter-window strides
    +   * @param padding the padding to apply at the start and end of each input dimensions
    +   * @param lhsDilation dilation to apply between input elements
    +   * @param rhsDilation dilation to apply between kernel elements
    +   * @param featureGroupCount number of feature groups for grouped convolution.
    +   * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto.
    +   * @param precisionConfig a serialized xla::PrecisionConfig proto.
    +   * @param preferredElementType The type of the tensor.
    +   * @param  data type for {@code XlaConvV2} output and operands
    +   * @param  data type for {@code XlaConvV2} output and operands
    +   * @return a new instance of XlaConvV2
    +   */
    +  public  XlaConvV2 xlaConvV2(Operand lhs,
    +      Operand rhs, Operand windowStrides, Operand padding,
    +      Operand lhsDilation, Operand rhsDilation, Operand featureGroupCount,
    +      String dimensionNumbers, String precisionConfig, Class preferredElementType) {
    +    return XlaConvV2.create(scope, lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, featureGroupCount, dimensionNumbers, precisionConfig, preferredElementType);
    +  }
    +
    +  /**
    +   * Wraps the XLA DotGeneral operator, documented at
    +   *  https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
    +   *  .
    +   *
    +   * @param  data type for {@code output} output
    +   * @param lhs the LHS tensor
    +   * @param rhs the RHS tensor
    +   * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto.
    +   * @param precisionConfig a serialized xla::PrecisionConfig proto.
    +   * @param preferredElementType The type of the tensor.
    +   * @param  data type for {@code XlaDotV2} output and operands
    +   * @return a new instance of XlaDotV2
    +   */
    +  public  XlaDotV2 xlaDotV2(Operand lhs,
    +      Operand rhs, String dimensionNumbers, String precisionConfig,
    +      Class preferredElementType) {
    +    return XlaDotV2.create(scope, lhs, rhs, dimensionNumbers, precisionConfig, preferredElementType);
    +  }
    +
    +  /**
    +   * Make a static dimension into a xla bounded dynamic dimension.
    +   *  
    +   *      The current static dimension size will become the bound and the second
    +   *      operand becomes the dynamic size of the dimension.
    +   *  
    + * + * @param data type for {@code output} output + * @param input the input value + * @param dimIndex the dimIndex value + * @param sizeOutput the sizeOutput value + * @param data type for {@code XlaSetDynamicDimensionSize} output and operands + * @return a new instance of XlaSetDynamicDimensionSize + */ + public XlaSetDynamicDimensionSize xlaSetDynamicDimensionSize( + Operand input, Operand dimIndex, Operand sizeOutput) { + return XlaSetDynamicDimensionSize.create(scope, input, dimIndex, sizeOutput); + } + /** * An op used by XLA SPMD partitioner to switch from automatic partitioning to * manual partitioning. It annotates the input (full-shape, to be automatically diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java index d838cc65c48..99caae1fdc2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java @@ -261,9 +261,13 @@ public KeyValueSort keyValueSort(Oper * @param data type for {@code output} output * @param input A {@code Tensor} of type T. * @param paddingValue A scalar {@code Tensor} of type T. - * @param paddingLow the padding to apply at the start of each input dimensions - * @param paddingHigh the padding to apply at the end of each input dimension. - * @param paddingInterior the padding to apply between each input element. + * @param paddingLow the padding to apply at the start of each input dimensions. Must + * be a compile-time constant 1D tensor of length equal to rank of input. + * @param paddingHigh the padding to apply at the end of each input dimension. Must + * be a compile-time constant 1D tensor of length equal to rank of input. + * @param paddingInterior the padding to apply between each input element. Must + * be a compile-time constant 1D tensor of length equal to rank of input, + * containing only non-negative values. * @param data type for {@code XlaPad} output and operands * @param data type for {@code XlaPad} output and operands * @return a new instance of Pad @@ -340,11 +344,12 @@ public Send send(Operand tensor, String tensorName) { * * @param data type for {@code output} output * @param input the input value + * @param options carries optional attribute values * @param data type for {@code XlaSharding} output and operands * @return a new instance of Sharding */ - public Sharding sharding(Operand input) { - return Sharding.create(scope, input); + public Sharding sharding(Operand input, Sharding.Options... options) { + return Sharding.create(scope, input, options); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java index f83bc6c1394..2441bc1af65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java @@ -781,8 +781,52 @@ public static native void TF_TensorBitcastFrom(@Const TF_Tensor from, // #ifndef TENSORFLOW_C_TF_TSTRING_H_ // #define TENSORFLOW_C_TF_TSTRING_H_ +// #include "tensorflow/c/tf_tensor.h" // #include "tensorflow/core/platform/ctstring.h" +// #ifdef SWIG +// #define TF_CAPI_EXPORT +// #else +// #if defined(_WIN32) +// #ifdef TF_COMPILE_LIBRARY +// #define TF_CAPI_EXPORT __declspec(dllexport) +// #else +// #define TF_CAPI_EXPORT __declspec(dllimport) +// #endif // TF_COMPILE_LIBRARY +// #else +// #define TF_CAPI_EXPORT __attribute__((visibility("default"))) +// #endif // _WIN32 +// #endif // SWIG + +// #ifdef __cplusplus +// #endif + +public static native void TF_StringInit(TF_TString t); + +public static native void TF_StringCopy(TF_TString dst, @Cast("const char*") BytePointer src, + @Cast("size_t") long size); +public static native void TF_StringCopy(TF_TString dst, String src, + @Cast("size_t") long size); + +public static native void TF_StringAssignView(TF_TString dst, @Cast("const char*") BytePointer src, + @Cast("size_t") long size); +public static native void TF_StringAssignView(TF_TString dst, String src, + @Cast("size_t") long size); + +public static native @Cast("const char*") BytePointer TF_StringGetDataPointer( + @Const TF_TString tstr); + +public static native @Cast("TF_TString_Type") int TF_StringGetType(@Const TF_TString str); + +public static native @Cast("size_t") long TF_StringGetSize(@Const TF_TString tstr); + +public static native @Cast("size_t") long TF_StringGetCapacity(@Const TF_TString str); + +public static native void TF_StringDealloc(TF_TString tstr); + +// #ifdef __cplusplus /* end extern "C" */ +// #endif + // #endif // THIRD_PARTY_TENSORFLOW_C_TF_TSTRING_H_ @@ -2530,7 +2574,7 @@ public static native void TF_FunctionGetAttrValueProto( // Return a new execution session with the associated graph, or NULL on // error. Does not take ownership of any input parameters. // -// *`graph` must be a valid graph (not deleted or nullptr). `graph` will be be +// *`graph` must be a valid graph (not deleted or nullptr). `graph` will be // kept alive for the lifetime of the returned TF_Session. New nodes can still // be added to `graph` after this call. public static native TF_Session TF_NewSession(TF_Graph graph, @@ -3067,6 +3111,7 @@ public static native void TF_RegisterFilesystemPlugin( // #include // #include "tensorflow/c/c_api.h" +// #include "tensorflow/c/experimental/stream_executor/stream_executor.h" // #include "tensorflow/c/tf_datatype.h" // #include "tensorflow/c/tf_status.h" // #include "tensorflow/c/tf_tensor.h" @@ -3101,6 +3146,11 @@ public static native void TF_RegisterFilesystemPlugin( // Targeting ../TF_OpKernelContext.java + +// TF_InitKernel to do op/kernel registration. +// Plugin should implement TF_InitKernel to register kernels. This function +// should register all kernels in a plugin. +public static native void TF_InitKernel(); // Targeting ../Create_func_TF_OpKernelConstruction.java @@ -3158,6 +3208,16 @@ public static native void TF_RegisterKernelBuilder(String kernel_name, // -------------------------------------------------------------------------- // OpKernelContext routines +// TF_GetStream returns the SP_Stream available in ctx. +// This function returns a stream only for devices registered using the +// StreamExecutor C API +// (tensorflow/c/experimental/stream_executor/stream_executor.h). It will return +// nullptr and set error status in all other cases. +// Experimental: this function doesn't have compatibility guarantees and subject +// to change at any time. +public static native @ByVal @Cast("SP_Stream*") Pointer TF_GetStream(TF_OpKernelContext ctx, + TF_Status status); + // TF_NumInputs returns the number of inputs available in ctx. public static native int TF_NumInputs(TF_OpKernelContext ctx); @@ -3200,6 +3260,39 @@ public static native void TF_OpKernelContext_Failure(TF_OpKernelContext ctx, // Returns the step ID of the given context. public static native @Cast("int64_t") long TF_StepId(TF_OpKernelContext ctx); +// Get the list_size and total_size of the attribute `attr_name` of `oper`. +// list_size - the length of the list. +// total_size - total size of the list. +// (1) If attr_type == TF_ATTR_STRING +// then total_size is the cumulative byte size +// of all the strings in the list. +// (3) If attr_type == TF_ATTR_SHAPE +// then total_size is the number of dimensions +// of the shape valued attribute, or -1 +// if its rank is unknown. +// (4) If attr_type == TF_ATTR_SHAPE +// then total_size is the cumulative number +// of dimensions of all shapes in the list. +// (5) Otherwise, total_size is undefined. +public static native void TF_OpKernelConstruction_GetAttrSize( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, IntPointer list_size, + IntPointer total_size, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrSize( + TF_OpKernelConstruction ctx, String attr_name, IntBuffer list_size, + IntBuffer total_size, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrSize( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, int[] list_size, + int[] total_size, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrSize( + TF_OpKernelConstruction ctx, String attr_name, IntPointer list_size, + IntPointer total_size, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrSize( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, IntBuffer list_size, + IntBuffer total_size, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrSize( + TF_OpKernelConstruction ctx, String attr_name, int[] list_size, + int[] total_size, TF_Status status); + // Interprets the named kernel construction attribute as a TF_DataType and // places it into *val. *status is set to TF_OK. // @@ -3248,6 +3341,273 @@ public static native void TF_OpKernelConstruction_GetAttrInt32( TF_OpKernelConstruction ctx, String attr_name, int[] val, TF_Status status); +// Interprets the named kernel construction attribute as int64_t and +// places it into *val. *status is set to TF_OK. +// +// If the attribute could not be found or could not be interpreted as +// int64, *status is populated with an error. +public static native void TF_OpKernelConstruction_GetAttrInt64( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongPointer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64( + TF_OpKernelConstruction ctx, String attr_name, @Cast("int64_t*") LongBuffer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") long[] val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64( + TF_OpKernelConstruction ctx, String attr_name, @Cast("int64_t*") LongPointer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongBuffer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64( + TF_OpKernelConstruction ctx, String attr_name, @Cast("int64_t*") long[] val, + TF_Status status); + +// Interprets the named kernel construction attribute as float and +// places it into *val. *status is set to TF_OK. +// +// If the attribute could not be found or could not be interpreted as +// float, *status is populated with an error. +public static native void TF_OpKernelConstruction_GetAttrFloat( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, FloatPointer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloat( + TF_OpKernelConstruction ctx, String attr_name, FloatBuffer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloat( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, float[] val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloat( + TF_OpKernelConstruction ctx, String attr_name, FloatPointer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloat( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, FloatBuffer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloat( + TF_OpKernelConstruction ctx, String attr_name, float[] val, + TF_Status status); + +// Interprets the named kernel construction attribute as bool and +// places it into *val. *status is set to TF_OK. +// +// If the attribute could not be found or could not be interpreted as +// bool, *status is populated with an error. +public static native void TF_OpKernelConstruction_GetAttrBool( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") BytePointer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBool( + TF_OpKernelConstruction ctx, String attr_name, @Cast("unsigned char*") ByteBuffer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBool( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") byte[] val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBool( + TF_OpKernelConstruction ctx, String attr_name, @Cast("unsigned char*") BytePointer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBool( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") ByteBuffer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBool( + TF_OpKernelConstruction ctx, String attr_name, @Cast("unsigned char*") byte[] val, + TF_Status status); + +// Interprets the named kernel construction attribute as string and +// places it into *val. `val` must +// point to an array of length at least `max_length` (ideally set to +// total_size from TF_OpKernelConstruction_GetAttrSize(ctx, +// attr_name, list_size, total_size)). *status is set to TF_OK. +// +// If the attribute could not be found or could not be interpreted as +// string, *status is populated with an error. +public static native void TF_OpKernelConstruction_GetAttrString( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("char*") BytePointer val, + @Cast("size_t") long max_length, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrString( + TF_OpKernelConstruction ctx, String attr_name, @Cast("char*") ByteBuffer val, + @Cast("size_t") long max_length, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrString( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("char*") byte[] val, + @Cast("size_t") long max_length, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrString( + TF_OpKernelConstruction ctx, String attr_name, @Cast("char*") BytePointer val, + @Cast("size_t") long max_length, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrString( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("char*") ByteBuffer val, + @Cast("size_t") long max_length, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrString( + TF_OpKernelConstruction ctx, String attr_name, @Cast("char*") byte[] val, + @Cast("size_t") long max_length, TF_Status status); + +// Interprets the named kernel construction attribute as a TF_DataType array and +// places it into *vals. *status is set to TF_OK. +// `vals` must point to an array of length at least `max_values` (ideally set +// to list_size from +// TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size, +// total_size)). +public static native void TF_OpKernelConstruction_GetAttrTypeList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") IntPointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTypeList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("TF_DataType*") IntBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTypeList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") int[] vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTypeList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("TF_DataType*") IntPointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTypeList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") IntBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTypeList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("TF_DataType*") int[] vals, + int max_vals, TF_Status status); + +// Interprets the named kernel construction attribute as int32_t array and +// places it into *vals. *status is set to TF_OK. +// `vals` must point to an array of length at least `max_values` (ideally set +// to list_size from +// TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size, +// total_size)). +public static native void TF_OpKernelConstruction_GetAttrInt32List( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, IntPointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt32List( + TF_OpKernelConstruction ctx, String attr_name, IntBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt32List( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, int[] vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt32List( + TF_OpKernelConstruction ctx, String attr_name, IntPointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt32List( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, IntBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt32List( + TF_OpKernelConstruction ctx, String attr_name, int[] vals, + int max_vals, TF_Status status); + +// Interprets the named kernel construction attribute as int64_t array and +// places it into *vals. *status is set to TF_OK. +// `vals` must point to an array of length at least `max_values` (ideally set +// to list_size from +// TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size, +// total_size)). +public static native void TF_OpKernelConstruction_GetAttrInt64List( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongPointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64List( + TF_OpKernelConstruction ctx, String attr_name, @Cast("int64_t*") LongBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64List( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") long[] vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64List( + TF_OpKernelConstruction ctx, String attr_name, @Cast("int64_t*") LongPointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64List( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrInt64List( + TF_OpKernelConstruction ctx, String attr_name, @Cast("int64_t*") long[] vals, + int max_vals, TF_Status status); + +// Interprets the named kernel construction attribute as float array and +// places it into *vals. *status is set to TF_OK. +// `vals` must point to an array of length at least `max_values` (ideally set +// to list_size from +// TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size, +// total_size)). +public static native void TF_OpKernelConstruction_GetAttrFloatList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, FloatPointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloatList( + TF_OpKernelConstruction ctx, String attr_name, FloatBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloatList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, float[] vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloatList( + TF_OpKernelConstruction ctx, String attr_name, FloatPointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloatList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, FloatBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrFloatList( + TF_OpKernelConstruction ctx, String attr_name, float[] vals, + int max_vals, TF_Status status); + +// Interprets the named kernel construction attribute as bool array and +// places it into *vals. *status is set to TF_OK. +// `vals` must point to an array of length at least `max_values` (ideally set +// to list_size from +// TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size, +// total_size)). +public static native void TF_OpKernelConstruction_GetAttrBoolList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") BytePointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBoolList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("unsigned char*") ByteBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBoolList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") byte[] vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBoolList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("unsigned char*") BytePointer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBoolList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") ByteBuffer vals, + int max_vals, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrBoolList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("unsigned char*") byte[] vals, + int max_vals, TF_Status status); + +// Interprets the named kernel construction attribute as string array and fills +// in `vals` and `lengths`, each of which must point to an array of length at +// least `max_values`. *status is set to TF_OK. The elements of values will +// point to addresses in `storage` which must be at least `storage_size` bytes +// in length. Ideally, max_values would be set to list_size and `storage` would +// be at least total_size, obtained from +// TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, list_size, +// total_size). +public static native void TF_OpKernelConstruction_GetAttrStringList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("char**") PointerPointer vals, + @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrStringList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("char**") @ByPtrPtr BytePointer vals, + @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrStringList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("char**") @ByPtrPtr ByteBuffer vals, + @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrStringList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("char**") @ByPtrPtr byte[] vals, + @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrStringList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("char**") @ByPtrPtr BytePointer vals, + @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrStringList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("char**") @ByPtrPtr ByteBuffer vals, + @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrStringList( + TF_OpKernelConstruction ctx, String attr_name, @Cast("char**") @ByPtrPtr byte[] vals, + @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, + TF_Status status); + +// Return true if the kernel construction has the attr_name +public static native @Cast("bool") boolean TF_OpKernelConstruction_HasAttr( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, TF_Status status); +public static native @Cast("bool") boolean TF_OpKernelConstruction_HasAttr( + TF_OpKernelConstruction ctx, String attr_name, TF_Status status); + // Returns the unique operation name for this OpKernel. public static native @ByVal TF_StringView TF_OpKernelConstruction_GetName( TF_OpKernelConstruction ctx); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java index aaeee6f00f8..346216e935b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java @@ -21,11 +21,13 @@ import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; import org.tensorflow.Output; +import org.tensorflow.op.Operands; import org.tensorflow.op.RawOp; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; /** * Mutually accumulates multiple tensors of identical type and shape. @@ -54,6 +56,7 @@ private GatherV2(Operation operation) { * @param groupSize the groupSize value * @param groupKey the groupKey value * @param instanceKey the instanceKey value + * @param orderingToken the orderingToken value * @param options carries optional attribute values * @param data type for {@code CollectiveGatherV2} output and operands * @return a new instance of GatherV2 @@ -63,12 +66,13 @@ private GatherV2(Operation operation) { ) public static GatherV2 create(Scope scope, Operand input, Operand groupSize, Operand groupKey, Operand instanceKey, - Options... options) { + Iterable> orderingToken, Options... options) { OperationBuilder opBuilder = scope.env().opBuilder("CollectiveGatherV2", scope.makeOpName("GatherV2")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); opBuilder.addInput(instanceKey.asOutput()); + opBuilder.addInputList(Operands.asOutputs(orderingToken)); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { @@ -78,6 +82,9 @@ public static GatherV2 create(Scope scope, Operand inp if (opts.timeoutSeconds != null) { opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); } + if (opts.NorderingToken != null) { + opBuilder.setAttr("Nordering_token", opts.NorderingToken); + } } } return new GatherV2<>(opBuilder.build()); @@ -103,6 +110,16 @@ public static Options timeoutSeconds(Float timeoutSeconds) { return new Options().timeoutSeconds(timeoutSeconds); } + /** + * Sets the NorderingToken option. + * + * @param NorderingToken the NorderingToken option + * @return this Options instance. + */ + public static Options NorderingToken(Long NorderingToken) { + return new Options().NorderingToken(NorderingToken); + } + /** * Gets data. * @@ -125,6 +142,8 @@ public static class Options { private Float timeoutSeconds; + private Long NorderingToken; + private Options() { } @@ -149,5 +168,16 @@ public Options timeoutSeconds(Float timeoutSeconds) { this.timeoutSeconds = timeoutSeconds; return this; } + + /** + * Sets the NorderingToken option. + * + * @param NorderingToken the NorderingToken option + * @return this Options instance. + */ + public Options NorderingToken(Long NorderingToken) { + this.NorderingToken = NorderingToken; + return this; + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java index 33c7a1da9f7..f6b7321ac66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java @@ -21,11 +21,13 @@ import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; import org.tensorflow.Output; +import org.tensorflow.op.Operands; import org.tensorflow.op.RawOp; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; /** * Mutually reduces multiple tensors of identical type and shape. @@ -54,6 +56,7 @@ private ReduceV2(Operation operation) { * @param groupSize the groupSize value * @param groupKey the groupKey value * @param instanceKey the instanceKey value + * @param orderingToken the orderingToken value * @param mergeOp the value of the mergeOp property * @param finalOp the value of the finalOp property * @param options carries optional attribute values @@ -65,12 +68,14 @@ private ReduceV2(Operation operation) { ) public static ReduceV2 create(Scope scope, Operand input, Operand groupSize, Operand groupKey, Operand instanceKey, - String mergeOp, String finalOp, Options... options) { + Iterable> orderingToken, String mergeOp, String finalOp, + Options... options) { OperationBuilder opBuilder = scope.env().opBuilder("CollectiveReduceV2", scope.makeOpName("ReduceV2")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); opBuilder.addInput(instanceKey.asOutput()); + opBuilder.addInputList(Operands.asOutputs(orderingToken)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("merge_op", mergeOp); opBuilder.setAttr("final_op", finalOp); @@ -82,6 +87,9 @@ public static ReduceV2 create(Scope scope, Operand inp if (opts.timeoutSeconds != null) { opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); } + if (opts.NorderingToken != null) { + opBuilder.setAttr("Nordering_token", opts.NorderingToken); + } } } return new ReduceV2<>(opBuilder.build()); @@ -107,6 +115,16 @@ public static Options timeoutSeconds(Float timeoutSeconds) { return new Options().timeoutSeconds(timeoutSeconds); } + /** + * Sets the NorderingToken option. + * + * @param NorderingToken the NorderingToken option + * @return this Options instance. + */ + public static Options NorderingToken(Long NorderingToken) { + return new Options().NorderingToken(NorderingToken); + } + /** * Gets data. * @@ -129,6 +147,8 @@ public static class Options { private Float timeoutSeconds; + private Long NorderingToken; + private Options() { } @@ -153,5 +173,16 @@ public Options timeoutSeconds(Float timeoutSeconds) { this.timeoutSeconds = timeoutSeconds; return this; } + + /** + * Sets the NorderingToken option. + * + * @param NorderingToken the NorderingToken option + * @return this Options instance. + */ + public Options NorderingToken(Long NorderingToken) { + this.NorderingToken = NorderingToken; + return this; + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java index 529ba4c4ac1..eb3cf4e2c03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java @@ -84,6 +84,9 @@ public static InitializeTableFromTextFile create(Scope scope, if (opts.delimiter != null) { opBuilder.setAttr("delimiter", opts.delimiter); } + if (opts.offset != null) { + opBuilder.setAttr("offset", opts.offset); + } } } return new InitializeTableFromTextFile(opBuilder.build()); @@ -109,6 +112,16 @@ public static Options delimiter(String delimiter) { return new Options().delimiter(delimiter); } + /** + * Sets the offset option. + * + * @param offset the offset option + * @return this Options instance. + */ + public static Options offset(Long offset) { + return new Options().offset(offset); + } + /** * Optional attributes for {@link org.tensorflow.op.core.InitializeTableFromTextFile} */ @@ -117,6 +130,8 @@ public static class Options { private String delimiter; + private Long offset; + private Options() { } @@ -141,5 +156,16 @@ public Options delimiter(String delimiter) { this.delimiter = delimiter; return this; } + + /** + * Sets the offset option. + * + * @param offset the offset option + * @return this Options instance. + */ + public Options offset(Long offset) { + this.offset = offset; + return this; + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java index 9fc6cc0c0b0..c7cc00f87cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java @@ -29,9 +29,8 @@ import org.tensorflow.types.family.TType; /** - *
      * Adds v into specified rows of x.
    - *
    + * 
      * Computes y = x; y[i, :] += v; return y.
      * 
    * diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RemoteFusedGraphExecute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RemoteFusedGraphExecute.java deleted file mode 100644 index fda684ab296..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RemoteFusedGraphExecute.java +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.core; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; -import org.tensorflow.types.family.TType; - -/** - * Execute a sub graph on a remote processor. - * The graph specifications(such as graph itself, input tensors and output names) - * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo - * as serialized_remote_fused_graph_execute_info. - * The specifications will be passed to a dedicated registered - * remote fused graph executor. The executor will send the graph specifications - * to a remote processor and execute that graph. The execution results - * will be passed to consumer nodes as outputs of this node. - */ -@Operator -public final class RemoteFusedGraphExecute extends RawOp implements Iterable> { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RemoteFusedGraphExecute"; - - private List> outputs; - - @SuppressWarnings("unchecked") - private RemoteFusedGraphExecute(Operation operation) { - super(operation); - int outputIdx = 0; - int outputsLength = operation.outputListLength("outputs"); - outputs = Arrays.asList(operation.outputList(outputIdx, outputsLength)); - outputIdx += outputsLength; - } - - /** - * Factory method to create a class wrapping a new RemoteFusedGraphExecute operation. - * - * @param scope current scope - * @param inputs Arbitrary number of tensors with arbitrary data types - * @param Toutputs the value of the Toutputs property - * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer - * of RemoteFusedGraphExecuteInfo which contains graph specifications. - * @return a new instance of RemoteFusedGraphExecute - */ - @Endpoint( - describeByClass = true - ) - public static RemoteFusedGraphExecute create(Scope scope, Iterable> inputs, - List> Toutputs, String serializedRemoteFusedGraphExecuteInfo) { - OperationBuilder opBuilder = scope.env().opBuilder("RemoteFusedGraphExecute", scope.makeOpName("RemoteFusedGraphExecute")); - opBuilder.addInputList(Operands.asOutputs(inputs)); - opBuilder = scope.apply(opBuilder); - opBuilder.setAttr("Toutputs", Operands.toDataTypes(Toutputs)); - opBuilder.setAttr("serialized_remote_fused_graph_execute_info", serializedRemoteFusedGraphExecuteInfo); - return new RemoteFusedGraphExecute(opBuilder.build()); - } - - /** - * Gets outputs. - * Arbitrary number of tensors with arbitrary data types - * @return outputs. - */ - public List> outputs() { - return outputs; - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public Iterator> iterator() { - return (Iterator) outputs.iterator(); - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rpc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rpc.java deleted file mode 100644 index 248a76e6104..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rpc.java +++ /dev/null @@ -1,228 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.core; - -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; -import org.tensorflow.types.TString; - -/** - * Perform batches of RPC requests. - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *
      - *
    • {@code address} (the host+port or BNS address of the request)
    • - *
    • {@code method} (the RPC method name for the request)
    • - *
    • {@code request} (the serialized proto string, or vector of strings, - * of the RPC request argument).
    • - *
    - *

    For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - *

    - * service MyService {
    - *   rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
    - *   }
    - * };
    - * 
    - *

    then call this op with arguments: - *

    - * address = "localhost:2345"
    - * method = "MyService/MyMethod"
    - * 
    - *

    The {@code request} tensor is a string tensor representing serialized {@code MyRequestProto} - * strings; and the output string tensor {@code response} will have the same shape - * and contain (upon successful completion) corresponding serialized - * {@code MyResponseProto} strings. - *

    For example, to send a single, empty, {@code MyRequestProto}, call - * this op with {@code request = ""}. To send 5 parallel empty requests, - * call this op with {@code request = ["", "", "", "", ""]}. - *

    More generally, one can create a batch of {@code MyRequestProto} serialized protos - * from regular batched tensors using the {@code encode_proto} op, and convert - * the response {@code MyResponseProto} serialized protos to batched tensors - * using the {@code decode_proto} op. - *

    NOTE Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - *

    If the connection fails or the remote worker returns an error - * status, the op reraises this exception locally. - *

    See the {@code TryRpc} op if you prefer to handle RPC failures manually in the graph. - */ -@Operator -public final class Rpc extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "Rpc"; - - private Output response; - - private Rpc(Operation operation) { - super(operation); - int outputIdx = 0; - response = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new Rpc operation. - * - * @param scope current scope - * @param address {@code 0-D} or {@code 1-D}. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code method} and {@code request}. - * @param method {@code 0-D} or {@code 1-D}. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code address} and {@code request}. - * @param request {@code 0-D} or {@code 1-D}. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code address} and {@code method}. - * @param options carries optional attribute values - * @return a new instance of Rpc - */ - @Endpoint( - describeByClass = true - ) - public static Rpc create(Scope scope, Operand address, Operand method, - Operand request, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Rpc", scope.makeOpName("Rpc")); - opBuilder.addInput(address.asOutput()); - opBuilder.addInput(method.asOutput()); - opBuilder.addInput(request.asOutput()); - opBuilder = scope.apply(opBuilder); - if (options != null) { - for (Options opts : options) { - if (opts.protocol != null) { - opBuilder.setAttr("protocol", opts.protocol); - } - if (opts.failFast != null) { - opBuilder.setAttr("fail_fast", opts.failFast); - } - if (opts.timeoutInMs != null) { - opBuilder.setAttr("timeout_in_ms", opts.timeoutInMs); - } - } - } - return new Rpc(opBuilder.build()); - } - - /** - * Sets the protocol option. - * - * @param protocol RPC protocol to use. Empty string means use the default protocol. - * Options include 'grpc'. - * @return this Options instance. - */ - public static Options protocol(String protocol) { - return new Options().protocol(protocol); - } - - /** - * Sets the failFast option. - * - * @param failFast {@code boolean}. If {@code true} (default), then failures to connect - * (i.e., the server does not immediately respond) cause an RPC failure. - * @return this Options instance. - */ - public static Options failFast(Boolean failFast) { - return new Options().failFast(failFast); - } - - /** - * Sets the timeoutInMs option. - * - * @param timeoutInMs {@code int}. If {@code 0} (default), then the kernel will run the RPC - * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than {@code 0}, then the op will raise an exception if - * the RPC takes longer than {@code timeout_in_ms}. - * @return this Options instance. - */ - public static Options timeoutInMs(Long timeoutInMs) { - return new Options().timeoutInMs(timeoutInMs); - } - - /** - * Gets response. - * Same shape as {@code request}. Serialized proto strings: the rpc responses. - * @return response. - */ - public Output response() { - return response; - } - - @Override - public Output asOutput() { - return response; - } - - /** - * Optional attributes for {@link org.tensorflow.op.core.Rpc} - */ - public static class Options { - private String protocol; - - private Boolean failFast; - - private Long timeoutInMs; - - private Options() { - } - - /** - * Sets the protocol option. - * - * @param protocol RPC protocol to use. Empty string means use the default protocol. - * Options include 'grpc'. - * @return this Options instance. - */ - public Options protocol(String protocol) { - this.protocol = protocol; - return this; - } - - /** - * Sets the failFast option. - * - * @param failFast {@code boolean}. If {@code true} (default), then failures to connect - * (i.e., the server does not immediately respond) cause an RPC failure. - * @return this Options instance. - */ - public Options failFast(Boolean failFast) { - this.failFast = failFast; - return this; - } - - /** - * Sets the timeoutInMs option. - * - * @param timeoutInMs {@code int}. If {@code 0} (default), then the kernel will run the RPC - * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than {@code 0}, then the op will raise an exception if - * the RPC takes longer than {@code timeout_in_ms}. - * @return this Options instance. - */ - public Options timeoutInMs(Long timeoutInMs) { - this.timeoutInMs = timeoutInMs; - return this; - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java index 521be5199ec..2ae5a7b5431 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java @@ -37,7 +37,39 @@ * in the graph it inputs are masked from the gradient generator. They are not * taken into account for computing gradients. *

    This is useful any time you want to compute a value with TensorFlow but need - * to pretend that the value was a constant. Some examples include: + * to pretend that the value was a constant. For example, the softmax function + * for a vector x can be written as + *

    + *
    + *   def softmax(x):
    + *     numerator = tf.exp(x)
    + *     denominator = tf.reduce_sum(numerator)
    + *     return numerator / denominator
    + * 
    + *

    This however is susceptible to overflow if the values in x are large. An + * alternative more stable way is to subtract the maximum of x from each of the + * values. + *

    + *
    + *   def stable_softmax(x):
    + *     z = x - tf.reduce_max(x)
    + *     numerator = tf.exp(z)
    + *     denominator = tf.reduce_sum(numerator)
    + *     return numerator / denominator
    + * 
    + *

    However, when we backprop through the softmax to x, we dont want to backprop + * through the {@code tf.reduce_max(x)} (if the max values are not unique then the + * gradient could flow to the wrong input) calculation and treat that as a + * constant. Therefore, we should write this out as + *

    + *
    + *   def stable_softmax(x):
    + *     z = x - tf.stop_gradient(tf.reduce_max(x))
    + *     numerator = tf.exp(z)
    + *     denominator = tf.reduce_sum(numerator)
    + *     return numerator / denominator
    + * 
    + *

    Some other examples include: *

      *
    • The EM algorithm where the M-step should not involve backpropagation * through the output of the E-step.
    • diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java index 1dab0aa5619..28fe76db26b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java @@ -134,7 +134,7 @@ public static Options clearAfterRead(Boolean clearAfterRead) { * Sets the identicalElementShapes option. * * @param identicalElementShapes If true (default is false), then all - * elements in the TensorArray will be expected to have have identical shapes. + * elements in the TensorArray will be expected to have identical shapes. * This allows certain behaviors, like dynamically checking for * consistent shapes on write, and being able to fill in properly * shaped zero tensors on stack -- even if the element_shape attribute @@ -234,7 +234,7 @@ public Options clearAfterRead(Boolean clearAfterRead) { * Sets the identicalElementShapes option. * * @param identicalElementShapes If true (default is false), then all - * elements in the TensorArray will be expected to have have identical shapes. + * elements in the TensorArray will be expected to have identical shapes. * This allows certain behaviors, like dynamically checking for * consistent shapes on write, and being able to fill in properly * shaped zero tensors on stack -- even if the element_shape attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreePredict.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreePredict.java deleted file mode 100644 index da8cf52fc05..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreePredict.java +++ /dev/null @@ -1,82 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.core; - -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TType; - -/** - * Output the logits for the given input data - */ -public final class TensorForestTreePredict extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "TensorForestTreePredict"; - - private Output logits; - - private TensorForestTreePredict(Operation operation) { - super(operation); - int outputIdx = 0; - logits = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new TensorForestTreePredict operation. - * - * @param scope current scope - * @param treeHandle Handle to the tree resource. - * @param denseFeatures Rank 2 dense features tensor. - * @param logitsDimension Scalar, dimension of the logits. - * @return a new instance of TensorForestTreePredict - */ - @Endpoint( - describeByClass = true - ) - public static TensorForestTreePredict create(Scope scope, Operand treeHandle, - Operand denseFeatures, Long logitsDimension) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorForestTreePredict", scope.makeOpName("TensorForestTreePredict")); - opBuilder.addInput(treeHandle.asOutput()); - opBuilder.addInput(denseFeatures.asOutput()); - opBuilder = scope.apply(opBuilder); - opBuilder.setAttr("logits_dimension", logitsDimension); - return new TensorForestTreePredict(opBuilder.build()); - } - - /** - * Gets logits. - * The logits predictions from the tree for each instance in the batch. - * @return logits. - */ - public Output logits() { - return logits; - } - - @Override - public Output asOutput() { - return logits; - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeResourceHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeResourceHandleOp.java deleted file mode 100644 index 645b79e00c1..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeResourceHandleOp.java +++ /dev/null @@ -1,141 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.core; - -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.types.family.TType; - -/** - * Creates a handle to a TensorForestTreeResource - */ -public final class TensorForestTreeResourceHandleOp extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "TensorForestTreeResourceHandleOp"; - - private Output resource; - - @SuppressWarnings("unchecked") - private TensorForestTreeResourceHandleOp(Operation operation) { - super(operation); - int outputIdx = 0; - resource = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new TensorForestTreeResourceHandleOp operation. - * - * @param scope current scope - * @param options carries optional attribute values - * @return a new instance of TensorForestTreeResourceHandleOp - */ - @Endpoint( - describeByClass = true - ) - public static TensorForestTreeResourceHandleOp create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorForestTreeResourceHandleOp", scope.makeOpName("TensorForestTreeResourceHandleOp")); - opBuilder = scope.apply(opBuilder); - if (options != null) { - for (Options opts : options) { - if (opts.container != null) { - opBuilder.setAttr("container", opts.container); - } - if (opts.sharedName != null) { - opBuilder.setAttr("shared_name", opts.sharedName); - } - } - } - return new TensorForestTreeResourceHandleOp(opBuilder.build()); - } - - /** - * Sets the container option. - * - * @param container the container option - * @return this Options instance. - */ - public static Options container(String container) { - return new Options().container(container); - } - - /** - * Sets the sharedName option. - * - * @param sharedName the sharedName option - * @return this Options instance. - */ - public static Options sharedName(String sharedName) { - return new Options().sharedName(sharedName); - } - - /** - * Gets resource. - * - * @return resource. - */ - public Output resource() { - return resource; - } - - @Override - @SuppressWarnings("unchecked") - public Output asOutput() { - return (Output) resource; - } - - /** - * Optional attributes for {@link org.tensorflow.op.core.TensorForestTreeResourceHandleOp} - */ - public static class Options { - private String container; - - private String sharedName; - - private Options() { - } - - /** - * Sets the container option. - * - * @param container the container option - * @return this Options instance. - */ - public Options container(String container) { - this.container = container; - return this; - } - - /** - * Sets the sharedName option. - * - * @param sharedName the sharedName option - * @return this Options instance. - */ - public Options sharedName(String sharedName) { - this.sharedName = sharedName; - return this; - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java index 8627f1eab1b..0e74b1a7745 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java @@ -29,8 +29,8 @@ import org.tensorflow.types.TInt32; /** - * Returns the TopK unique values in the array in sorted order. The - * running time is proportional to the product of K and the input + * Returns the TopK unique values in the array in sorted order. + * The running time is proportional to the product of K and the input * size. Sorting the whole array is more efficient for sufficiently large * values of K. The median-of-medians algorithm is probably faster, but * difficult to implement efficiently in XLA. If there are fewer than K diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java index b984e2741f7..89b0c4ea5dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java @@ -29,11 +29,12 @@ import org.tensorflow.types.TInt32; /** - * Returns the TopK values in the array in sorted order. This is a combination - * of MakeUnique and TopKUnique. The returned top-K will have its lower bits - * replaced by iota, thus it will be close to the original value but not exactly - * the same. The running time is proportional to the product of K and the input - * size. NaNs are never returned. Subnormal numbers are flushed to zero. + * Returns the TopK values in the array in sorted order. + * This is a combination of MakeUnique and TopKUnique. The returned top-K will + * have its lower bits replaced by iota, thus it will be close to the original + * value but not exactly the same. The running time is proportional to the product + * of K and the input size. NaNs are never returned. Subnormal numbers are flushed + * to zero. */ @Operator public final class TopKWithUnique extends RawOp { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TryRpc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TryRpc.java deleted file mode 100644 index b41e0204f36..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TryRpc.java +++ /dev/null @@ -1,252 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.core; - -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.TString; - -/** - * Perform batches of RPC requests. - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *
        - *
      • {@code address} (the host+port or BNS address of the request)
      • - *
      • {@code method} (the method name for the request)
      • - *
      • {@code request} (the serialized proto string, or vector of strings, - * of the RPC request argument).
      • - *
      - *

      For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - *

      - * service MyService {
      - *   rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
      - *   }
      - * };
      - * 
      - *

      then call this op with arguments: - *

      - * address = "localhost:2345"
      - * method = "MyService/MyMethod"
      - * 
      - *

      The {@code request} tensor is a string tensor representing serialized {@code MyRequestProto} - * strings; and the output string tensor {@code response} will have the same shape - * and contain (upon successful completion) corresponding serialized - * {@code MyResponseProto} strings. - *

      For example, to send a single, empty, {@code MyRequestProto}, call - * this op with {@code request = ""}. To send 5 parallel empty requests, - * call this op with {@code request = ["", "", "", "", ""]}. - *

      More generally, one can create a batch of {@code MyRequestProto} serialized protos - * from regular batched tensors using the {@code encode_proto} op, and convert - * the response {@code MyResponseProto} serialized protos to batched tensors - * using the {@code decode_proto} op. - *

      NOTE Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - *

      Unlike the standard {@code Rpc} op, if the connection fails or the remote worker - * returns an error status, this op does not reraise the exception. - * Instead, the {@code status_code} and {@code status_message} entry for the corresponding RPC - * call is set with the error returned from the RPC call. The {@code response} tensor - * will contain valid response values for those minibatch entries whose RPCs did - * not fail; the rest of the entries will have empty strings. - */ -@Operator -public final class TryRpc extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "TryRpc"; - - private Output response; - - private Output statusCode; - - private Output statusMessage; - - private TryRpc(Operation operation) { - super(operation); - int outputIdx = 0; - response = operation.output(outputIdx++); - statusCode = operation.output(outputIdx++); - statusMessage = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new TryRpc operation. - * - * @param scope current scope - * @param address {@code 0-D} or {@code 1-D}. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code method} and {@code request}. - * @param method {@code 0-D} or {@code 1-D}. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code address} and {@code request}. - * @param request {@code 0-D} or {@code 1-D}. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with {@code address} and {@code method}. - * @param options carries optional attribute values - * @return a new instance of TryRpc - */ - @Endpoint( - describeByClass = true - ) - public static TryRpc create(Scope scope, Operand address, Operand method, - Operand request, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TryRpc", scope.makeOpName("TryRpc")); - opBuilder.addInput(address.asOutput()); - opBuilder.addInput(method.asOutput()); - opBuilder.addInput(request.asOutput()); - opBuilder = scope.apply(opBuilder); - if (options != null) { - for (Options opts : options) { - if (opts.protocol != null) { - opBuilder.setAttr("protocol", opts.protocol); - } - if (opts.failFast != null) { - opBuilder.setAttr("fail_fast", opts.failFast); - } - if (opts.timeoutInMs != null) { - opBuilder.setAttr("timeout_in_ms", opts.timeoutInMs); - } - } - } - return new TryRpc(opBuilder.build()); - } - - /** - * Sets the protocol option. - * - * @param protocol RPC protocol to use. Empty string means use the default protocol. - * Options include 'grpc'. - * @return this Options instance. - */ - public static Options protocol(String protocol) { - return new Options().protocol(protocol); - } - - /** - * Sets the failFast option. - * - * @param failFast {@code boolean}. If {@code true} (default), then failures to connect - * (i.e., the server does not immediately respond) cause an RPC failure. - * @return this Options instance. - */ - public static Options failFast(Boolean failFast) { - return new Options().failFast(failFast); - } - - /** - * Sets the timeoutInMs option. - * - * @param timeoutInMs {@code int}. If {@code 0} (default), then the kernel will run the RPC - * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than {@code 0}, then the op will raise an exception if - * the RPC takes longer than {@code timeout_in_ms}. - * @return this Options instance. - */ - public static Options timeoutInMs(Long timeoutInMs) { - return new Options().timeoutInMs(timeoutInMs); - } - - /** - * Gets response. - * Same shape as {@code request}. Serialized proto strings: the rpc responses. - * @return response. - */ - public Output response() { - return response; - } - - /** - * Gets statusCode. - * Same shape as {@code request}. Values correspond to tensorflow Status enum codes. - * @return statusCode. - */ - public Output statusCode() { - return statusCode; - } - - /** - * Gets statusMessage. - * Same shape as {@code request}. Values correspond to Status messages - * returned from the RPC calls. - * @return statusMessage. - */ - public Output statusMessage() { - return statusMessage; - } - - /** - * Optional attributes for {@link org.tensorflow.op.core.TryRpc} - */ - public static class Options { - private String protocol; - - private Boolean failFast; - - private Long timeoutInMs; - - private Options() { - } - - /** - * Sets the protocol option. - * - * @param protocol RPC protocol to use. Empty string means use the default protocol. - * Options include 'grpc'. - * @return this Options instance. - */ - public Options protocol(String protocol) { - this.protocol = protocol; - return this; - } - - /** - * Sets the failFast option. - * - * @param failFast {@code boolean}. If {@code true} (default), then failures to connect - * (i.e., the server does not immediately respond) cause an RPC failure. - * @return this Options instance. - */ - public Options failFast(Boolean failFast) { - this.failFast = failFast; - return this; - } - - /** - * Sets the timeoutInMs option. - * - * @param timeoutInMs {@code int}. If {@code 0} (default), then the kernel will run the RPC - * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than {@code 0}, then the op will raise an exception if - * the RPC takes longer than {@code timeout_in_ms}. - * @return this Options instance. - */ - public Options timeoutInMs(Long timeoutInMs) { - this.timeoutInMs = timeoutInMs; - return this; - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java index 95de8a7d015..54a7e8b4e54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java @@ -43,29 +43,29 @@ *

      {@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} *

      For example: *

      - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
      - * y, idx, count = unique_with_counts(x)
      + * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
      + * y, idx, count = UniqueWithCountsV2(x, axis = [0])
        * y ==> [1, 2, 4, 7, 8]
        * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
        * count ==> [2, 1, 3, 1, 2]
        * 
      - *

      For an {@code 2-D} tensor {@code x} with {@code axis = 0}: + *

      For a {@code 2-D} tensor {@code x} with {@code axis = 0}: *

      - * # tensor 'x' is [[1, 0, 0],
      - * #                [1, 0, 0],
      - * #                [2, 0, 0]]
      - * y, idx, count = unique_with_counts(x, axis=0)
      + * x = tf.constant([[1, 0, 0],
      + *                 [1, 0, 0],
      + *                 [2, 0, 0]])
      + * y, idx, count = UniqueWithCountsV2(x, axis=[0])
        * y ==> [[1, 0, 0],
        *        [2, 0, 0]]
        * idx ==> [0, 0, 1]
        * count ==> [2, 1]
        * 
      - *

      For an {@code 2-D} tensor {@code x} with {@code axis = 1}: + *

      For a {@code 2-D} tensor {@code x} with {@code axis = 1}: *

      - * # tensor 'x' is [[1, 0, 0],
      - * #                [1, 0, 0],
      - * #                [2, 0, 0]]
      - * y, idx, count = unique_with_counts(x, axis=1)
      + * x = tf.constant([[1, 0, 0],
      + *                 [1, 0, 0],
      + *                 [2, 0, 0]])
      + * y, idx, count = UniqueWithCountsV2(x, axis=[1])
        * y ==> [[1, 0],
        *        [1, 0],
        *        [2, 0]]
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaConvV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaConvV2.java
      new file mode 100644
      index 00000000000..529a0d092c6
      --- /dev/null
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaConvV2.java
      @@ -0,0 +1,108 @@
      +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +    http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +=======================================================================*/
      +
      +// This class has been generated, DO NOT EDIT!
      +
      +package org.tensorflow.op.core;
      +
      +import org.tensorflow.Operand;
      +import org.tensorflow.Operation;
      +import org.tensorflow.OperationBuilder;
      +import org.tensorflow.Output;
      +import org.tensorflow.op.Operands;
      +import org.tensorflow.op.RawOp;
      +import org.tensorflow.op.Scope;
      +import org.tensorflow.op.annotation.Endpoint;
      +import org.tensorflow.op.annotation.Operator;
      +import org.tensorflow.types.family.TNumber;
      +import org.tensorflow.types.family.TType;
      +
      +/**
      + * Wraps the XLA ConvGeneralDilated operator, documented at
      + * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
      + * .
      + *
      + * @param  data type for {@code output} output
      + */
      +@Operator
      +public final class XlaConvV2 extends RawOp implements Operand {
      +  /**
      +   * The name of this op, as known by TensorFlow core engine
      +   */
      +  public static final String OP_NAME = "XlaConvV2";
      +
      +  private Output output;
      +
      +  private XlaConvV2(Operation operation) {
      +    super(operation);
      +    int outputIdx = 0;
      +    output = operation.output(outputIdx++);
      +  }
      +
      +  /**
      +   * Factory method to create a class wrapping a new XlaConvV2 operation.
      +   *
      +   * @param scope current scope
      +   * @param lhs the input tensor
      +   * @param rhs the kernel tensor
      +   * @param windowStrides the inter-window strides
      +   * @param padding the padding to apply at the start and end of each input dimensions
      +   * @param lhsDilation dilation to apply between input elements
      +   * @param rhsDilation dilation to apply between kernel elements
      +   * @param featureGroupCount number of feature groups for grouped convolution.
      +   * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto.
      +   * @param precisionConfig a serialized xla::PrecisionConfig proto.
      +   * @param preferredElementType The type of the tensor.
      +   * @param  data type for {@code XlaConvV2} output and operands
      +   * @param  data type for {@code XlaConvV2} output and operands
      +   * @return a new instance of XlaConvV2
      +   */
      +  @Endpoint(
      +      describeByClass = true
      +  )
      +  public static  XlaConvV2 create(Scope scope,
      +      Operand lhs, Operand rhs, Operand windowStrides,
      +      Operand padding, Operand lhsDilation, Operand rhsDilation,
      +      Operand featureGroupCount, String dimensionNumbers, String precisionConfig,
      +      Class preferredElementType) {
      +    OperationBuilder opBuilder = scope.env().opBuilder("XlaConvV2", scope.makeOpName("XlaConvV2"));
      +    opBuilder.addInput(lhs.asOutput());
      +    opBuilder.addInput(rhs.asOutput());
      +    opBuilder.addInput(windowStrides.asOutput());
      +    opBuilder.addInput(padding.asOutput());
      +    opBuilder.addInput(lhsDilation.asOutput());
      +    opBuilder.addInput(rhsDilation.asOutput());
      +    opBuilder.addInput(featureGroupCount.asOutput());
      +    opBuilder = scope.apply(opBuilder);
      +    opBuilder.setAttr("dimension_numbers", dimensionNumbers);
      +    opBuilder.setAttr("precision_config", precisionConfig);
      +    opBuilder.setAttr("preferred_element_type", Operands.toDataType(preferredElementType));
      +    return new XlaConvV2<>(opBuilder.build());
      +  }
      +
      +  /**
      +   * Gets output.
      +   *
      +   * @return output.
      +   */
      +  public Output output() {
      +    return output;
      +  }
      +
      +  @Override
      +  public Output asOutput() {
      +    return output;
      +  }
      +}
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaDotV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaDotV2.java
      new file mode 100644
      index 00000000000..de90ac10d8e
      --- /dev/null
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaDotV2.java
      @@ -0,0 +1,94 @@
      +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +    http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +=======================================================================*/
      +
      +// This class has been generated, DO NOT EDIT!
      +
      +package org.tensorflow.op.core;
      +
      +import org.tensorflow.Operand;
      +import org.tensorflow.Operation;
      +import org.tensorflow.OperationBuilder;
      +import org.tensorflow.Output;
      +import org.tensorflow.op.Operands;
      +import org.tensorflow.op.RawOp;
      +import org.tensorflow.op.Scope;
      +import org.tensorflow.op.annotation.Endpoint;
      +import org.tensorflow.op.annotation.Operator;
      +import org.tensorflow.types.family.TType;
      +
      +/**
      + * Wraps the XLA DotGeneral operator, documented at
      + * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
      + * .
      + *
      + * @param  data type for {@code output} output
      + */
      +@Operator
      +public final class XlaDotV2 extends RawOp implements Operand {
      +  /**
      +   * The name of this op, as known by TensorFlow core engine
      +   */
      +  public static final String OP_NAME = "XlaDotV2";
      +
      +  private Output output;
      +
      +  private XlaDotV2(Operation operation) {
      +    super(operation);
      +    int outputIdx = 0;
      +    output = operation.output(outputIdx++);
      +  }
      +
      +  /**
      +   * Factory method to create a class wrapping a new XlaDotV2 operation.
      +   *
      +   * @param scope current scope
      +   * @param lhs the LHS tensor
      +   * @param rhs the RHS tensor
      +   * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto.
      +   * @param precisionConfig a serialized xla::PrecisionConfig proto.
      +   * @param preferredElementType The type of the tensor.
      +   * @param  data type for {@code XlaDotV2} output and operands
      +   * @return a new instance of XlaDotV2
      +   */
      +  @Endpoint(
      +      describeByClass = true
      +  )
      +  public static  XlaDotV2 create(Scope scope, Operand lhs,
      +      Operand rhs, String dimensionNumbers, String precisionConfig,
      +      Class preferredElementType) {
      +    OperationBuilder opBuilder = scope.env().opBuilder("XlaDotV2", scope.makeOpName("XlaDotV2"));
      +    opBuilder.addInput(lhs.asOutput());
      +    opBuilder.addInput(rhs.asOutput());
      +    opBuilder = scope.apply(opBuilder);
      +    opBuilder.setAttr("dimension_numbers", dimensionNumbers);
      +    opBuilder.setAttr("precision_config", precisionConfig);
      +    opBuilder.setAttr("preferred_element_type", Operands.toDataType(preferredElementType));
      +    return new XlaDotV2<>(opBuilder.build());
      +  }
      +
      +  /**
      +   * Gets output.
      +   *
      +   * @return output.
      +   */
      +  public Output output() {
      +    return output;
      +  }
      +
      +  @Override
      +  public Output asOutput() {
      +    return output;
      +  }
      +}
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSetDynamicDimensionSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSetDynamicDimensionSize.java
      new file mode 100644
      index 00000000000..0fcb7229afa
      --- /dev/null
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSetDynamicDimensionSize.java
      @@ -0,0 +1,91 @@
      +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +    http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +=======================================================================*/
      +
      +// This class has been generated, DO NOT EDIT!
      +
      +package org.tensorflow.op.core;
      +
      +import org.tensorflow.Operand;
      +import org.tensorflow.Operation;
      +import org.tensorflow.OperationBuilder;
      +import org.tensorflow.Output;
      +import org.tensorflow.op.RawOp;
      +import org.tensorflow.op.Scope;
      +import org.tensorflow.op.annotation.Endpoint;
      +import org.tensorflow.op.annotation.Operator;
      +import org.tensorflow.types.TInt32;
      +import org.tensorflow.types.family.TType;
      +
      +/**
      + * Make a static dimension into a xla bounded dynamic dimension.
      + * 
      + *     The current static dimension size will become the bound and the second
      + *     operand becomes the dynamic size of the dimension.
      + * 
      + * + * @param data type for {@code output} output + */ +@Operator +public final class XlaSetDynamicDimensionSize extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSetDynamicDimensionSize"; + + private Output output; + + private XlaSetDynamicDimensionSize(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSetDynamicDimensionSize operation. + * + * @param scope current scope + * @param input the input value + * @param dimIndex the dimIndex value + * @param sizeOutput the sizeOutput value + * @param data type for {@code XlaSetDynamicDimensionSize} output and operands + * @return a new instance of XlaSetDynamicDimensionSize + */ + @Endpoint( + describeByClass = true + ) + public static XlaSetDynamicDimensionSize create(Scope scope, + Operand input, Operand dimIndex, Operand sizeOutput) { + OperationBuilder opBuilder = scope.env().opBuilder("XlaSetDynamicDimensionSize", scope.makeOpName("XlaSetDynamicDimensionSize")); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(dimIndex.asOutput()); + opBuilder.addInput(sizeOutput.asOutput()); + opBuilder = scope.apply(opBuilder); + return new XlaSetDynamicDimensionSize<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DataServiceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DataServiceDataset.java index 056c6bb0c21..eec5392bda5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DataServiceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DataServiceDataset.java @@ -27,17 +27,13 @@ import org.tensorflow.op.RawOp; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** - * The DataServiceDataset operation + * Creates a dataset that reads data from the tf.data service. */ -@Operator( - group = "data.experimental" -) public final class DataServiceDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -97,6 +93,9 @@ public static DataServiceDataset create(Scope scope, Operand datasetId, if (opts.taskRefreshIntervalHintMs != null) { opBuilder.setAttr("task_refresh_interval_hint_ms", opts.taskRefreshIntervalHintMs); } + if (opts.dataTransferProtocol != null) { + opBuilder.setAttr("data_transfer_protocol", opts.dataTransferProtocol); + } } } return new DataServiceDataset(opBuilder.build()); @@ -112,6 +111,16 @@ public static Options taskRefreshIntervalHintMs(Long taskRefreshIntervalHintMs) return new Options().taskRefreshIntervalHintMs(taskRefreshIntervalHintMs); } + /** + * Sets the dataTransferProtocol option. + * + * @param dataTransferProtocol the dataTransferProtocol option + * @return this Options instance. + */ + public static Options dataTransferProtocol(String dataTransferProtocol) { + return new Options().dataTransferProtocol(dataTransferProtocol); + } + /** * Gets handle. * @@ -133,6 +142,8 @@ public Output asOutput() { public static class Options { private Long taskRefreshIntervalHintMs; + private String dataTransferProtocol; + private Options() { } @@ -146,5 +157,16 @@ public Options taskRefreshIntervalHintMs(Long taskRefreshIntervalHintMs) { this.taskRefreshIntervalHintMs = taskRefreshIntervalHintMs; return this; } + + /** + * Sets the dataTransferProtocol option. + * + * @param dataTransferProtocol the dataTransferProtocol option + * @return this Options instance. + */ + public Options dataTransferProtocol(String dataTransferProtocol) { + this.dataTransferProtocol = dataTransferProtocol; + return this; + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java index 50eb425c04f..068892293e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java @@ -29,9 +29,9 @@ /** * Checks a tensor for NaN, -Inf and +Inf values. * When run, reports an {@code InvalidArgument} error if {@code tensor} has any values - * that are not a number (NaN) or infinity (Inf). Otherwise, passes {@code tensor} as-is. - * Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf in the - * errors it throws. + * that are not a number (NaN) or infinity (Inf). Otherwise, returns the input + * tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf + * in the errors it throws. * * @param data type for {@code output} output */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java index c20c3f981bd..acf5dadb78e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java @@ -81,7 +81,9 @@ private CombinedNonMaxSuppression(Operation operation) { * representing a single score corresponding to each box (each row of boxes). * @param maxOutputSizePerClass A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression per class - * @param maxTotalSize A scalar representing maximum number of boxes retained over all classes. + * @param maxTotalSize An int32 scalar representing the maximum number of boxes retained over all + * classes. Note that setting this value to a large number may result in OOM error + * depending on the system workload. * @param iouThreshold A 0-D float tensor representing the threshold for deciding whether * boxes overlap too much with respect to IOU. * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to remove diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java index 7c038ac4fd9..82b134b39d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java @@ -143,7 +143,7 @@ public static Options uniformNoise(Boolean uniformNoise) { * Sets the noise option. * * @param noise indicates if the noise should {@code uniform}, {@code gaussian}, or - * {@code zero}. The default is {@code uniform} which means the the noise type + * {@code zero}. The default is {@code uniform} which means the noise type * will be decided by {@code uniform_noise}. * @return this Options instance. */ @@ -221,7 +221,7 @@ public Options uniformNoise(Boolean uniformNoise) { * Sets the noise option. * * @param noise indicates if the noise should {@code uniform}, {@code gaussian}, or - * {@code zero}. The default is {@code uniform} which means the the noise type + * {@code zero}. The default is {@code uniform} which means the noise type * will be decided by {@code uniform_noise}. * @return this Options instance. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java index 94cef1d7502..143c787038e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java @@ -93,7 +93,7 @@ public static ImageProjectiveTransformV3 create(Scope sco /** * Sets the fillMode option. * - * @param fillMode Fill mode, "REFLECT", "WRAP", or "CONSTANT". + * @param fillMode Fill mode, "REFLECT", "WRAP", "CONSTANT", or "NEAREST". * @return this Options instance. */ public static Options fillMode(String fillMode) { @@ -127,7 +127,7 @@ private Options() { /** * Sets the fillMode option. * - * @param fillMode Fill mode, "REFLECT", "WRAP", or "CONSTANT". + * @param fillMode Fill mode, "REFLECT", "WRAP", "CONSTANT", or "NEAREST". * @return this Options instance. */ public Options fillMode(String fillMode) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java index d2cde0a1a6c..3781c269b84 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java @@ -29,12 +29,13 @@ /** * Convert JSON-encoded Example records to binary protocol buffer strings. - * This op translates a tensor containing Example records, encoded using - * the standard JSON - * mapping , - * into a tensor containing the same records encoded as binary protocol - * buffers. The resulting tensor can then be fed to any of the other - * Example-parsing ops. + * Note: This is not a general purpose JSON parsing op. + *

      This op converts JSON-serialized + * {@code tf.train.Example} (created with {@code json_format.MessageToJson}, following the + * standard JSON mapping ) + * to a binary-serialized {@code tf.train.Example} (equivalent to + * {@code Example.SerializeToString()}) suitable for conversion to tensors with + * {@code tf.io.parse_example}. */ @Operator( group = "io" diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java index 24c1d6334aa..4205f873deb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java @@ -39,25 +39,25 @@ *

      For example: *

        * # if 'input' is [[ 0,  1,  2, 3]
      - *                  [-1,  0,  1, 2]
      - *                  [-2, -1,  0, 1]
      - *                  [-3, -2, -1, 0]],
      + * #                [-1,  0,  1, 2]
      + * #                [-2, -1,  0, 1]
      + * #                [-3, -2, -1, 0]],
        *
      - * tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
      + * tf.linalg.band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
        *                                        [-1,  0,  1, 2]
        *                                        [ 0, -1,  0, 1]
        *                                        [ 0,  0, -1, 0]],
        *
      - * tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
      + * tf.linalg.band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
        *                                       [-1,  0,  1, 0]
        *                                       [-2, -1,  0, 1]
        *                                       [ 0, -2, -1, 0]]
        * 
      *

      Useful special cases: *

      - *  tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
      - *  tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
      - *  tf.matrix_band_part(input, 0, 0) ==> Diagonal.
      + *  tf.linalg.band_part(input, 0, -1) ==> Upper triangular part.
      + *  tf.linalg.band_part(input, -1, 0) ==> Lower triangular part.
      + *  tf.linalg.band_part(input, 0, 0) ==> Diagonal.
        * 
      * * @param data type for {@code band} output diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java index c1be1a20816..00e59eadad2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java @@ -28,7 +28,7 @@ import org.tensorflow.types.family.TNumber; /** - * Computes softplus: {@code log(exp(features) + 1)}. + * The Softplus operation * * @param data type for {@code activations} output */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java index 66ce871ca7a..d7afc873a44 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java @@ -28,8 +28,26 @@ import org.tensorflow.types.family.TNumber; /** - * Computes exponential linear: {@code exp(features) - 1} if < 0, {@code features} otherwise. - * See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * Computes the exponential linear function. + * The ELU function is defined as: + *
        + *
      • $ e ^ x - 1 $ if $ x < 0 $
      • + *
      • $ x $ if $ x >= 0 $
      • + *
      + *

      Examples: + *

      + *
      + *
      + *

      tf.nn.elu(1.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=1.0> + * tf.nn.elu(0.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=0.0> + * tf.nn.elu(-1000.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=-1.0> + *

      + *
      + *
      + *

      See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * * * @param data type for {@code activations} output diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java index b85617c997f..6963a4905cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java @@ -34,8 +34,8 @@ *

      *
      *
      - *

      tf.nn.relu([-2., 0., -0., 3.]).numpy() - * array([ 0., 0., -0., 3.], dtype=float32) + *

      tf.nn.relu([-2., 0., 3.]).numpy() + * array([0., 0., 3.], dtype=float32) *

      *
      *
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java index 6386ed591b4..6b269656c5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java @@ -68,7 +68,7 @@ private RaggedTensorToTensor(Operation operation) { * Factory method to create a class wrapping a new RaggedTensorToTensor operation. * * @param scope current scope - * @param shape The desired shape of the the output tensor. If left unspecified (empty), + * @param shape The desired shape of the output tensor. If left unspecified (empty), * the minimal shape required to contain all the elements in the ragged tensor * (the natural shape) will be used. If some dimensions are left unspecified, then * the size of the natural shape is used in that dimension. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java new file mode 100644 index 00000000000..aba0663c0d1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java @@ -0,0 +1,157 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * Receives a tensor value broadcast from another device. + * + * @param data type for {@code data} output + */ +public final class CollectiveBcastRecvV2 extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "CollectiveBcastRecvV2"; + + private Output data; + + private CollectiveBcastRecvV2(Operation operation) { + super(operation); + int outputIdx = 0; + data = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new CollectiveBcastRecvV2 operation. + * + * @param scope current scope + * @param groupSize the groupSize value + * @param groupKey the groupKey value + * @param instanceKey the instanceKey value + * @param shape the shape value + * @param T the value of the T property + * @param options carries optional attribute values + * @param data type for {@code CollectiveBcastRecvV2} output and operands + * @return a new instance of CollectiveBcastRecvV2 + */ + @Endpoint( + describeByClass = true + ) + public static CollectiveBcastRecvV2 create(Scope scope, + Operand groupSize, Operand groupKey, Operand instanceKey, + Operand shape, Class T, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("CollectiveBcastRecvV2", scope.makeOpName("CollectiveBcastRecvV2")); + opBuilder.addInput(groupSize.asOutput()); + opBuilder.addInput(groupKey.asOutput()); + opBuilder.addInput(instanceKey.asOutput()); + opBuilder.addInput(shape.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("T", Operands.toDataType(T)); + if (options != null) { + for (Options opts : options) { + if (opts.communicationHint != null) { + opBuilder.setAttr("communication_hint", opts.communicationHint); + } + if (opts.timeoutSeconds != null) { + opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); + } + } + } + return new CollectiveBcastRecvV2<>(opBuilder.build()); + } + + /** + * Sets the communicationHint option. + * + * @param communicationHint the communicationHint option + * @return this Options instance. + */ + public static Options communicationHint(String communicationHint) { + return new Options().communicationHint(communicationHint); + } + + /** + * Sets the timeoutSeconds option. + * + * @param timeoutSeconds the timeoutSeconds option + * @return this Options instance. + */ + public static Options timeoutSeconds(Float timeoutSeconds) { + return new Options().timeoutSeconds(timeoutSeconds); + } + + /** + * Gets data. + * + * @return data. + */ + public Output data() { + return data; + } + + @Override + public Output asOutput() { + return data; + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.CollectiveBcastRecvV2} + */ + public static class Options { + private String communicationHint; + + private Float timeoutSeconds; + + private Options() { + } + + /** + * Sets the communicationHint option. + * + * @param communicationHint the communicationHint option + * @return this Options instance. + */ + public Options communicationHint(String communicationHint) { + this.communicationHint = communicationHint; + return this; + } + + /** + * Sets the timeoutSeconds option. + * + * @param timeoutSeconds the timeoutSeconds option + * @return this Options instance. + */ + public Options timeoutSeconds(Float timeoutSeconds) { + this.timeoutSeconds = timeoutSeconds; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java new file mode 100644 index 00000000000..28a8a714995 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java @@ -0,0 +1,153 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TType; + +/** + * Broadcasts a tensor value to one or more other devices. + * + * @param data type for {@code data} output + */ +public final class CollectiveBcastSendV2 extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "CollectiveBcastSendV2"; + + private Output data; + + private CollectiveBcastSendV2(Operation operation) { + super(operation); + int outputIdx = 0; + data = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new CollectiveBcastSendV2 operation. + * + * @param scope current scope + * @param input the input value + * @param groupSize the groupSize value + * @param groupKey the groupKey value + * @param instanceKey the instanceKey value + * @param options carries optional attribute values + * @param data type for {@code CollectiveBcastSendV2} output and operands + * @return a new instance of CollectiveBcastSendV2 + */ + @Endpoint( + describeByClass = true + ) + public static CollectiveBcastSendV2 create(Scope scope, Operand input, + Operand groupSize, Operand groupKey, Operand instanceKey, + Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("CollectiveBcastSendV2", scope.makeOpName("CollectiveBcastSendV2")); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(groupSize.asOutput()); + opBuilder.addInput(groupKey.asOutput()); + opBuilder.addInput(instanceKey.asOutput()); + opBuilder = scope.apply(opBuilder); + if (options != null) { + for (Options opts : options) { + if (opts.communicationHint != null) { + opBuilder.setAttr("communication_hint", opts.communicationHint); + } + if (opts.timeoutSeconds != null) { + opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); + } + } + } + return new CollectiveBcastSendV2<>(opBuilder.build()); + } + + /** + * Sets the communicationHint option. + * + * @param communicationHint the communicationHint option + * @return this Options instance. + */ + public static Options communicationHint(String communicationHint) { + return new Options().communicationHint(communicationHint); + } + + /** + * Sets the timeoutSeconds option. + * + * @param timeoutSeconds the timeoutSeconds option + * @return this Options instance. + */ + public static Options timeoutSeconds(Float timeoutSeconds) { + return new Options().timeoutSeconds(timeoutSeconds); + } + + /** + * Gets data. + * + * @return data. + */ + public Output data() { + return data; + } + + @Override + public Output asOutput() { + return data; + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.CollectiveBcastSendV2} + */ + public static class Options { + private String communicationHint; + + private Float timeoutSeconds; + + private Options() { + } + + /** + * Sets the communicationHint option. + * + * @param communicationHint the communicationHint option + * @return this Options instance. + */ + public Options communicationHint(String communicationHint) { + this.communicationHint = communicationHint; + return this; + } + + /** + * Sets the timeoutSeconds option. + * + * @param timeoutSeconds the timeoutSeconds option + * @return this Options instance. + */ + public Options timeoutSeconds(Float timeoutSeconds) { + this.timeoutSeconds = timeoutSeconds; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/DataServiceDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/DataServiceDatasetV2.java new file mode 100644 index 00000000000..ebd5f4220b8 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/DataServiceDatasetV2.java @@ -0,0 +1,176 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.TString; +import org.tensorflow.types.family.TType; + +/** + * Creates a dataset that reads data from the tf.data service. + */ +public final class DataServiceDatasetV2 extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "DataServiceDatasetV2"; + + private Output handle; + + @SuppressWarnings("unchecked") + private DataServiceDatasetV2(Operation operation) { + super(operation); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new DataServiceDatasetV2 operation. + * + * @param scope current scope + * @param datasetId the datasetId value + * @param processingMode the processingMode value + * @param address the address value + * @param protocol the protocol value + * @param jobName the jobName value + * @param consumerIndex the consumerIndex value + * @param numConsumers the numConsumers value + * @param maxOutstandingRequests the maxOutstandingRequests value + * @param iterationCounter the iterationCounter value + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property + * @param options carries optional attribute values + * @return a new instance of DataServiceDatasetV2 + */ + @Endpoint( + describeByClass = true + ) + public static DataServiceDatasetV2 create(Scope scope, Operand datasetId, + Operand processingMode, Operand address, Operand protocol, + Operand jobName, Operand consumerIndex, Operand numConsumers, + Operand maxOutstandingRequests, Operand iterationCounter, + List> outputTypes, List outputShapes, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("DataServiceDatasetV2", scope.makeOpName("DataServiceDatasetV2")); + opBuilder.addInput(datasetId.asOutput()); + opBuilder.addInput(processingMode.asOutput()); + opBuilder.addInput(address.asOutput()); + opBuilder.addInput(protocol.asOutput()); + opBuilder.addInput(jobName.asOutput()); + opBuilder.addInput(consumerIndex.asOutput()); + opBuilder.addInput(numConsumers.asOutput()); + opBuilder.addInput(maxOutstandingRequests.asOutput()); + opBuilder.addInput(iterationCounter.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.taskRefreshIntervalHintMs != null) { + opBuilder.setAttr("task_refresh_interval_hint_ms", opts.taskRefreshIntervalHintMs); + } + if (opts.dataTransferProtocol != null) { + opBuilder.setAttr("data_transfer_protocol", opts.dataTransferProtocol); + } + } + } + return new DataServiceDatasetV2(opBuilder.build()); + } + + /** + * Sets the taskRefreshIntervalHintMs option. + * + * @param taskRefreshIntervalHintMs the taskRefreshIntervalHintMs option + * @return this Options instance. + */ + public static Options taskRefreshIntervalHintMs(Long taskRefreshIntervalHintMs) { + return new Options().taskRefreshIntervalHintMs(taskRefreshIntervalHintMs); + } + + /** + * Sets the dataTransferProtocol option. + * + * @param dataTransferProtocol the dataTransferProtocol option + * @return this Options instance. + */ + public static Options dataTransferProtocol(String dataTransferProtocol) { + return new Options().dataTransferProtocol(dataTransferProtocol); + } + + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.DataServiceDatasetV2} + */ + public static class Options { + private Long taskRefreshIntervalHintMs; + + private String dataTransferProtocol; + + private Options() { + } + + /** + * Sets the taskRefreshIntervalHintMs option. + * + * @param taskRefreshIntervalHintMs the taskRefreshIntervalHintMs option + * @return this Options instance. + */ + public Options taskRefreshIntervalHintMs(Long taskRefreshIntervalHintMs) { + this.taskRefreshIntervalHintMs = taskRefreshIntervalHintMs; + return this; + } + + /** + * Sets the dataTransferProtocol option. + * + * @param dataTransferProtocol the dataTransferProtocol option + * @return this Options instance. + */ + public Options dataTransferProtocol(String dataTransferProtocol) { + this.dataTransferProtocol = dataTransferProtocol; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/FinalizeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/FinalizeDataset.java new file mode 100644 index 00000000000..9ef62abad9c --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/FinalizeDataset.java @@ -0,0 +1,129 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TType; + +/** + * Creates a dataset by applying {@code tf.data.Options} to {@code input_dataset}. + */ +public final class FinalizeDataset extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "FinalizeDataset"; + + private Output handle; + + @SuppressWarnings("unchecked") + private FinalizeDataset(Operation operation) { + super(operation); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new FinalizeDataset operation. + * + * @param scope current scope + * @param inputDataset A variant tensor representing the input dataset. + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property + * @param options carries optional attribute values + * @return a new instance of FinalizeDataset + */ + @Endpoint( + describeByClass = true + ) + public static FinalizeDataset create(Scope scope, Operand inputDataset, + List> outputTypes, List outputShapes, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("FinalizeDataset", scope.makeOpName("FinalizeDataset")); + opBuilder.addInput(inputDataset.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.hasCapturedRef != null) { + opBuilder.setAttr("has_captured_ref", opts.hasCapturedRef); + } + } + } + return new FinalizeDataset(opBuilder.build()); + } + + /** + * Sets the hasCapturedRef option. + * + * @param hasCapturedRef the hasCapturedRef option + * @return this Options instance. + */ + public static Options hasCapturedRef(Boolean hasCapturedRef) { + return new Options().hasCapturedRef(hasCapturedRef); + } + + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.FinalizeDataset} + */ + public static class Options { + private Boolean hasCapturedRef; + + private Options() { + } + + /** + * Sets the hasCapturedRef option. + * + * @param hasCapturedRef the hasCapturedRef option + * @return this Options instance. + */ + public Options hasCapturedRef(Boolean hasCapturedRef) { + this.hasCapturedRef = hasCapturedRef; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeSerialize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java similarity index 55% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeSerialize.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java index 81e0658f425..ad7ec23eb00 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeSerialize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.core; +package org.tensorflow.op.rawops; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -28,50 +28,50 @@ import org.tensorflow.types.family.TType; /** - * Serializes the tree handle to a proto + * Returns the {@code tf.data.Options} attached to {@code input_dataset}. */ -public final class TensorForestTreeSerialize extends RawOp implements Operand { +public final class GetOptions extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "TensorForestTreeSerialize"; + public static final String OP_NAME = "GetOptions"; - private Output treeConfig; + private Output serializedOptions; - private TensorForestTreeSerialize(Operation operation) { + private GetOptions(Operation operation) { super(operation); int outputIdx = 0; - treeConfig = operation.output(outputIdx++); + serializedOptions = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new TensorForestTreeSerialize operation. + * Factory method to create a class wrapping a new GetOptions operation. * * @param scope current scope - * @param treeHandle Handle to the tree resource to be serialized. - * @return a new instance of TensorForestTreeSerialize + * @param inputDataset A variant tensor representing the input dataset. + * @return a new instance of GetOptions */ @Endpoint( describeByClass = true ) - public static TensorForestTreeSerialize create(Scope scope, Operand treeHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorForestTreeSerialize", scope.makeOpName("TensorForestTreeSerialize")); - opBuilder.addInput(treeHandle.asOutput()); + public static GetOptions create(Scope scope, Operand inputDataset) { + OperationBuilder opBuilder = scope.env().opBuilder("GetOptions", scope.makeOpName("GetOptions")); + opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); - return new TensorForestTreeSerialize(opBuilder.build()); + return new GetOptions(opBuilder.build()); } /** - * Gets treeConfig. - * Serialied proto string of the tree resource. - * @return treeConfig. + * Gets serializedOptions. + * + * @return serializedOptions. */ - public Output treeConfig() { - return treeConfig; + public Output serializedOptions() { + return serializedOptions; } @Override public Output asOutput() { - return treeConfig; + return serializedOptions; } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java new file mode 100644 index 00000000000..be8583f352c --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java @@ -0,0 +1,161 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TFloat32; + +/** + * Load frequency estimator embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + */ +public final class LoadTPUEmbeddingFrequencyEstimatorParameters extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "LoadTPUEmbeddingFrequencyEstimatorParameters"; + + private LoadTPUEmbeddingFrequencyEstimatorParameters(Operation operation) { + super(operation); + } + + /** + * Factory method to create a class wrapping a new LoadTPUEmbeddingFrequencyEstimatorParameters operation. + * + * @param scope current scope + * @param parameters Value of parameters used in the frequency estimator optimization algorithm. + * @param lastHitStep Value of last_hit_step used in the frequency estimator optimization algorithm. + * @param numShards the value of the numShards property + * @param shardId the value of the shardId property + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingFrequencyEstimatorParameters + */ + @Endpoint( + describeByClass = true + ) + public static LoadTPUEmbeddingFrequencyEstimatorParameters create(Scope scope, + Operand parameters, Operand lastHitStep, Long numShards, Long shardId, + Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingFrequencyEstimatorParameters", scope.makeOpName("LoadTPUEmbeddingFrequencyEstimatorParameters")); + opBuilder.addInput(parameters.asOutput()); + opBuilder.addInput(lastHitStep.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("num_shards", numShards); + opBuilder.setAttr("shard_id", shardId); + if (options != null) { + for (Options opts : options) { + if (opts.tableId != null) { + opBuilder.setAttr("table_id", opts.tableId); + } + if (opts.tableName != null) { + opBuilder.setAttr("table_name", opts.tableName); + } + if (opts.config != null) { + opBuilder.setAttr("config", opts.config); + } + } + } + return new LoadTPUEmbeddingFrequencyEstimatorParameters(opBuilder.build()); + } + + /** + * Sets the tableId option. + * + * @param tableId the tableId option + * @return this Options instance. + */ + public static Options tableId(Long tableId) { + return new Options().tableId(tableId); + } + + /** + * Sets the tableName option. + * + * @param tableName the tableName option + * @return this Options instance. + */ + public static Options tableName(String tableName) { + return new Options().tableName(tableName); + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public static Options config(String config) { + return new Options().config(config); + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.LoadTPUEmbeddingFrequencyEstimatorParameters} + */ + public static class Options { + private Long tableId; + + private String tableName; + + private String config; + + private Options() { + } + + /** + * Sets the tableId option. + * + * @param tableId the tableId option + * @return this Options instance. + */ + public Options tableId(Long tableId) { + this.tableId = tableId; + return this; + } + + /** + * Sets the tableName option. + * + * @param tableName the tableName option + * @return this Options instance. + */ + public Options tableName(String tableName) { + this.tableName = tableName; + return this; + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public Options config(String config) { + this.config = config; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java new file mode 100644 index 00000000000..cb5dcd87eef --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java @@ -0,0 +1,164 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TFloat32; + +/** + * Load frequency estimator embedding parameters with debug support. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + */ +public final class LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"; + + private LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation operation) { + super(operation); + } + + /** + * Factory method to create a class wrapping a new LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug operation. + * + * @param scope current scope + * @param parameters Value of parameters used in the frequency estimator optimization algorithm. + * @param lastHitStep Value of last_hit_step used in the frequency estimator optimization algorithm. + * @param gradientAccumulators Value of gradient_accumulators used in the frequency estimator optimization + * algorithm. + * @param numShards the value of the numShards property + * @param shardId the value of the shardId property + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug + */ + @Endpoint( + describeByClass = true + ) + public static LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug create(Scope scope, + Operand parameters, Operand lastHitStep, + Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug")); + opBuilder.addInput(parameters.asOutput()); + opBuilder.addInput(lastHitStep.asOutput()); + opBuilder.addInput(gradientAccumulators.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("num_shards", numShards); + opBuilder.setAttr("shard_id", shardId); + if (options != null) { + for (Options opts : options) { + if (opts.tableId != null) { + opBuilder.setAttr("table_id", opts.tableId); + } + if (opts.tableName != null) { + opBuilder.setAttr("table_name", opts.tableName); + } + if (opts.config != null) { + opBuilder.setAttr("config", opts.config); + } + } + } + return new LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(opBuilder.build()); + } + + /** + * Sets the tableId option. + * + * @param tableId the tableId option + * @return this Options instance. + */ + public static Options tableId(Long tableId) { + return new Options().tableId(tableId); + } + + /** + * Sets the tableName option. + * + * @param tableName the tableName option + * @return this Options instance. + */ + public static Options tableName(String tableName) { + return new Options().tableName(tableName); + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public static Options config(String config) { + return new Options().config(config); + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug} + */ + public static class Options { + private Long tableId; + + private String tableName; + + private String config; + + private Options() { + } + + /** + * Sets the tableId option. + * + * @param tableId the tableId option + * @return this Options instance. + */ + public Options tableId(Long tableId) { + this.tableId = tableId; + return this; + } + + /** + * Sets the tableName option. + * + * @param tableName the tableName option + * @return this Options instance. + */ + public Options tableName(String tableName) { + this.tableName = tableName; + return this; + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public Options config(String config) { + this.config = config; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/OptionsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/OptionsDataset.java new file mode 100644 index 00000000000..a8a2bea05a3 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/OptionsDataset.java @@ -0,0 +1,93 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TType; + +/** + * Creates a dataset by attaching tf.data.Options to {@code input_dataset}. + */ +public final class OptionsDataset extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "OptionsDataset"; + + private Output handle; + + @SuppressWarnings("unchecked") + private OptionsDataset(Operation operation) { + super(operation); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new OptionsDataset operation. + * + * @param scope current scope + * @param inputDataset A variant tensor representing the input dataset. + * @param serializedOptions A {@code tf.string} scalar {@code tf.Tensor} of serialized {@code tf.data.Options} protocol buffer. + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property + * @return a new instance of OptionsDataset + */ + @Endpoint( + describeByClass = true + ) + public static OptionsDataset create(Scope scope, Operand inputDataset, + String serializedOptions, List> outputTypes, + List outputShapes) { + OperationBuilder opBuilder = scope.env().opBuilder("OptionsDataset", scope.makeOpName("OptionsDataset")); + opBuilder.addInput(inputDataset.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("serialized_options", serializedOptions); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + return new OptionsDataset(opBuilder.build()); + } + + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/ParallelBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/ParallelBatchDataset.java new file mode 100644 index 00000000000..49b2b56b1c6 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/ParallelBatchDataset.java @@ -0,0 +1,138 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TBool; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TType; + +/** + * The ParallelBatchDataset operation + */ +public final class ParallelBatchDataset extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ParallelBatchDataset"; + + private Output handle; + + @SuppressWarnings("unchecked") + private ParallelBatchDataset(Operation operation) { + super(operation); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new ParallelBatchDataset operation. + * + * @param scope current scope + * @param inputDataset the inputDataset value + * @param batchSize the batchSize value + * @param numParallelCalls the numParallelCalls value + * @param dropRemainder the dropRemainder value + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property + * @param options carries optional attribute values + * @return a new instance of ParallelBatchDataset + */ + @Endpoint( + describeByClass = true + ) + public static ParallelBatchDataset create(Scope scope, Operand inputDataset, + Operand batchSize, Operand numParallelCalls, Operand dropRemainder, + List> outputTypes, List outputShapes, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("ParallelBatchDataset", scope.makeOpName("ParallelBatchDataset")); + opBuilder.addInput(inputDataset.asOutput()); + opBuilder.addInput(batchSize.asOutput()); + opBuilder.addInput(numParallelCalls.asOutput()); + opBuilder.addInput(dropRemainder.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.deterministic != null) { + opBuilder.setAttr("deterministic", opts.deterministic); + } + } + } + return new ParallelBatchDataset(opBuilder.build()); + } + + /** + * Sets the deterministic option. + * + * @param deterministic the deterministic option + * @return this Options instance. + */ + public static Options deterministic(String deterministic) { + return new Options().deterministic(deterministic); + } + + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.ParallelBatchDataset} + */ + public static class Options { + private String deterministic; + + private Options() { + } + + /** + * Sets the deterministic option. + * + * @param deterministic the deterministic option + * @return this Options instance. + */ + public Options deterministic(String deterministic) { + this.deterministic = deterministic; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java new file mode 100644 index 00000000000..56a60afbde8 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java @@ -0,0 +1,181 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TFloat32; + +/** + * Retrieve frequency estimator embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + */ +public final class RetrieveTPUEmbeddingFrequencyEstimatorParameters extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RetrieveTPUEmbeddingFrequencyEstimatorParameters"; + + private Output parameters; + + private Output lastHitStep; + + private RetrieveTPUEmbeddingFrequencyEstimatorParameters(Operation operation) { + super(operation); + int outputIdx = 0; + parameters = operation.output(outputIdx++); + lastHitStep = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RetrieveTPUEmbeddingFrequencyEstimatorParameters operation. + * + * @param scope current scope + * @param numShards the value of the numShards property + * @param shardId the value of the shardId property + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingFrequencyEstimatorParameters + */ + @Endpoint( + describeByClass = true + ) + public static RetrieveTPUEmbeddingFrequencyEstimatorParameters create(Scope scope, Long numShards, + Long shardId, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingFrequencyEstimatorParameters", scope.makeOpName("RetrieveTPUEmbeddingFrequencyEstimatorParameters")); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("num_shards", numShards); + opBuilder.setAttr("shard_id", shardId); + if (options != null) { + for (Options opts : options) { + if (opts.tableId != null) { + opBuilder.setAttr("table_id", opts.tableId); + } + if (opts.tableName != null) { + opBuilder.setAttr("table_name", opts.tableName); + } + if (opts.config != null) { + opBuilder.setAttr("config", opts.config); + } + } + } + return new RetrieveTPUEmbeddingFrequencyEstimatorParameters(opBuilder.build()); + } + + /** + * Sets the tableId option. + * + * @param tableId the tableId option + * @return this Options instance. + */ + public static Options tableId(Long tableId) { + return new Options().tableId(tableId); + } + + /** + * Sets the tableName option. + * + * @param tableName the tableName option + * @return this Options instance. + */ + public static Options tableName(String tableName) { + return new Options().tableName(tableName); + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public static Options config(String config) { + return new Options().config(config); + } + + /** + * Gets parameters. + * Parameter parameters updated by the frequency estimator optimization algorithm. + * @return parameters. + */ + public Output parameters() { + return parameters; + } + + /** + * Gets lastHitStep. + * Parameter last_hit_step updated by the frequency estimator optimization + * algorithm. + * @return lastHitStep. + */ + public Output lastHitStep() { + return lastHitStep; + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.RetrieveTPUEmbeddingFrequencyEstimatorParameters} + */ + public static class Options { + private Long tableId; + + private String tableName; + + private String config; + + private Options() { + } + + /** + * Sets the tableId option. + * + * @param tableId the tableId option + * @return this Options instance. + */ + public Options tableId(Long tableId) { + this.tableId = tableId; + return this; + } + + /** + * Sets the tableName option. + * + * @param tableName the tableName option + * @return this Options instance. + */ + public Options tableName(String tableName) { + this.tableName = tableName; + return this; + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public Options config(String config) { + this.config = config; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java new file mode 100644 index 00000000000..dfa83fc4ee2 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java @@ -0,0 +1,194 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TFloat32; + +/** + * Retrieve frequency estimator embedding parameters with debug support. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + */ +public final class RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"; + + private Output parameters; + + private Output lastHitStep; + + private Output gradientAccumulators; + + private RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation operation) { + super(operation); + int outputIdx = 0; + parameters = operation.output(outputIdx++); + lastHitStep = operation.output(outputIdx++); + gradientAccumulators = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug operation. + * + * @param scope current scope + * @param numShards the value of the numShards property + * @param shardId the value of the shardId property + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug + */ + @Endpoint( + describeByClass = true + ) + public static RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug create(Scope scope, + Long numShards, Long shardId, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug")); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("num_shards", numShards); + opBuilder.setAttr("shard_id", shardId); + if (options != null) { + for (Options opts : options) { + if (opts.tableId != null) { + opBuilder.setAttr("table_id", opts.tableId); + } + if (opts.tableName != null) { + opBuilder.setAttr("table_name", opts.tableName); + } + if (opts.config != null) { + opBuilder.setAttr("config", opts.config); + } + } + } + return new RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(opBuilder.build()); + } + + /** + * Sets the tableId option. + * + * @param tableId the tableId option + * @return this Options instance. + */ + public static Options tableId(Long tableId) { + return new Options().tableId(tableId); + } + + /** + * Sets the tableName option. + * + * @param tableName the tableName option + * @return this Options instance. + */ + public static Options tableName(String tableName) { + return new Options().tableName(tableName); + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public static Options config(String config) { + return new Options().config(config); + } + + /** + * Gets parameters. + * Parameter parameters updated by the frequency estimator optimization algorithm. + * @return parameters. + */ + public Output parameters() { + return parameters; + } + + /** + * Gets lastHitStep. + * Parameter last_hit_step updated by the frequency estimator optimization + * algorithm. + * @return lastHitStep. + */ + public Output lastHitStep() { + return lastHitStep; + } + + /** + * Gets gradientAccumulators. + * Parameter gradient_accumulators updated by the frequency estimator optimization + * algorithm. + * @return gradientAccumulators. + */ + public Output gradientAccumulators() { + return gradientAccumulators; + } + + /** + * Optional attributes for {@link org.tensorflow.op.rawops.RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug} + */ + public static class Options { + private Long tableId; + + private String tableName; + + private String config; + + private Options() { + } + + /** + * Sets the tableId option. + * + * @param tableId the tableId option + * @return this Options instance. + */ + public Options tableId(Long tableId) { + this.tableId = tableId; + return this; + } + + /** + * Sets the tableName option. + * + * @param tableName the tableName option + * @return this Options instance. + */ + public Options tableName(String tableName) { + this.tableName = tableName; + return this; + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public Options config(String config) { + this.config = config; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java similarity index 56% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeSize.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java index d6082e3be89..f2fae0ede92 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.core; +package org.tensorflow.op.rawops; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -25,53 +25,51 @@ import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.types.TInt32; -import org.tensorflow.types.family.TType; /** - * Get the number of nodes in a tree + * Picks the best counter-based RNG algorithm based on device. + * This op picks the best counter-based RNG algorithm based on device. */ -public final class TensorForestTreeSize extends RawOp implements Operand { +public final class StatelessRandomGetAlg extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "TensorForestTreeSize"; + public static final String OP_NAME = "StatelessRandomGetAlg"; - private Output treeSize; + private Output alg; - private TensorForestTreeSize(Operation operation) { + private StatelessRandomGetAlg(Operation operation) { super(operation); int outputIdx = 0; - treeSize = operation.output(outputIdx++); + alg = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new TensorForestTreeSize operation. + * Factory method to create a class wrapping a new StatelessRandomGetAlg operation. * * @param scope current scope - * @param treeHandle Handle to the tree resource. - * @return a new instance of TensorForestTreeSize + * @return a new instance of StatelessRandomGetAlg */ @Endpoint( describeByClass = true ) - public static TensorForestTreeSize create(Scope scope, Operand treeHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorForestTreeSize", scope.makeOpName("TensorForestTreeSize")); - opBuilder.addInput(treeHandle.asOutput()); + public static StatelessRandomGetAlg create(Scope scope) { + OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomGetAlg", scope.makeOpName("StatelessRandomGetAlg")); opBuilder = scope.apply(opBuilder); - return new TensorForestTreeSize(opBuilder.build()); + return new StatelessRandomGetAlg(opBuilder.build()); } /** - * Gets treeSize. - * The size of the tree. - * @return treeSize. + * Gets alg. + * The RNG algorithm (shape int32[]). + * @return alg. */ - public Output treeSize() { - return treeSize; + public Output alg() { + return alg; } @Override public Output asOutput() { - return treeSize; + return alg; } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java new file mode 100644 index 00000000000..96e06befdc7 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java @@ -0,0 +1,86 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.rawops; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * Scrambles seed into key and counter, using the best algorithm based on device. + * This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers). + */ +public final class StatelessRandomGetKeyCounter extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "StatelessRandomGetKeyCounter"; + + private Output key; + + private Output counter; + + @SuppressWarnings("unchecked") + private StatelessRandomGetKeyCounter(Operation operation) { + super(operation); + int outputIdx = 0; + key = operation.output(outputIdx++); + counter = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new StatelessRandomGetKeyCounter operation. + * + * @param scope current scope + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessRandomGetKeyCounter + */ + @Endpoint( + describeByClass = true + ) + public static StatelessRandomGetKeyCounter create(Scope scope, Operand seed) { + OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomGetKeyCounter", scope.makeOpName("StatelessRandomGetKeyCounter")); + opBuilder.addInput(seed.asOutput()); + opBuilder = scope.apply(opBuilder); + return new StatelessRandomGetKeyCounter(opBuilder.build()); + } + + /** + * Gets key. + * Key for the counter-based RNG algorithm (shape uint64[1]). + * @return key. + */ + public Output key() { + return key; + } + + /** + * Gets counter. + * Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms). + * @return counter. + */ + public Output counter() { + return counter; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestCreateTreeVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java similarity index 51% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestCreateTreeVariable.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java index def3e63a22f..19b042663bd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestCreateTreeVariable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java @@ -15,47 +15,65 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.core; +package org.tensorflow.op.risc; import org.tensorflow.Operand; import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; import org.tensorflow.op.RawOp; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.types.TString; -import org.tensorflow.types.family.TType; +import org.tensorflow.types.family.TNumber; /** - * Creates a tree resource and returns a handle to it. + * The RiscAbs operation + * + * @param data type for {@code y} output */ -public final class TensorForestCreateTreeVariable extends RawOp { +public final class RiscAbs extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "TensorForestCreateTreeVariable"; + public static final String OP_NAME = "RiscAbs"; - private TensorForestCreateTreeVariable(Operation operation) { + private Output y; + + private RiscAbs(Operation operation) { super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new TensorForestCreateTreeVariable operation. + * Factory method to create a class wrapping a new RiscAbs operation. * * @param scope current scope - * @param treeHandle Handle to the tree resource to be created. - * @param treeConfig Serialized proto string of the boosted_trees.Tree. - * @return a new instance of TensorForestCreateTreeVariable + * @param x the x value + * @param data type for {@code RiscAbs} output and operands + * @return a new instance of RiscAbs */ @Endpoint( describeByClass = true ) - public static TensorForestCreateTreeVariable create(Scope scope, - Operand treeHandle, Operand treeConfig) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorForestCreateTreeVariable", scope.makeOpName("TensorForestCreateTreeVariable")); - opBuilder.addInput(treeHandle.asOutput()); - opBuilder.addInput(treeConfig.asOutput()); + public static RiscAbs create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscAbs", scope.makeOpName("RiscAbs")); + opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); - return new TensorForestCreateTreeVariable(opBuilder.build()); + return new RiscAbs<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java new file mode 100644 index 00000000000..33a9471e18b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java @@ -0,0 +1,84 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * Returns x + y element-wise. + * NOTE: {@code risc.RiscAdd} does not supports broadcasting. + *

      Given two input tensors, the {@code tf.risc_add} operation computes the sum for every element in the tensor. + *

      Both input and output have a range {@code (-inf, inf)}. + * + * @param data type for {@code z} output + */ +public final class RiscAdd extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscAdd"; + + private Output z; + + private RiscAdd(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscAdd operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param data type for {@code RiscAdd} output and operands + * @return a new instance of RiscAdd + */ + @Endpoint( + describeByClass = true + ) + public static RiscAdd create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscAdd", scope.makeOpName("RiscAdd")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscAdd<>(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java new file mode 100644 index 00000000000..94c493dcea1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java @@ -0,0 +1,84 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscBinaryArithmetic operation + * + * @param data type for {@code z} output + */ +public final class RiscBinaryArithmetic extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscBinaryArithmetic"; + + private Output z; + + private RiscBinaryArithmetic(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscBinaryArithmetic operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param opType the value of the opType property + * @param data type for {@code RiscBinaryArithmetic} output and operands + * @return a new instance of RiscBinaryArithmetic + */ + @Endpoint( + describeByClass = true + ) + public static RiscBinaryArithmetic create(Scope scope, Operand x, + Operand y, String opType) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscBinaryArithmetic", scope.makeOpName("RiscBinaryArithmetic")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("op_type", opType); + return new RiscBinaryArithmetic<>(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java new file mode 100644 index 00000000000..8ef2fcd4e79 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java @@ -0,0 +1,83 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TBool; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscBinaryComparison operation + */ +public final class RiscBinaryComparison extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscBinaryComparison"; + + private Output z; + + private RiscBinaryComparison(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscBinaryComparison operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param opType the value of the opType property + * @param data type for {@code RiscBinaryComparison} output and operands + * @return a new instance of RiscBinaryComparison + */ + @Endpoint( + describeByClass = true + ) + public static RiscBinaryComparison create(Scope scope, Operand x, + Operand y, String opType) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscBinaryComparison", scope.makeOpName("RiscBinaryComparison")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("op_type", opType); + return new RiscBinaryComparison(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java new file mode 100644 index 00000000000..9692477dfd4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java @@ -0,0 +1,83 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TType; + +/** + * The RiscBitcast operation + * + * @param data type for {@code y} output + */ +public final class RiscBitcast extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscBitcast"; + + private Output y; + + private RiscBitcast(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscBitcast operation. + * + * @param scope current scope + * @param x the x value + * @param DstT the value of the DstT property + * @param data type for {@code RiscBitcast} output and operands + * @return a new instance of RiscBitcast + */ + @Endpoint( + describeByClass = true + ) + public static RiscBitcast create(Scope scope, Operand x, + Class DstT) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscBitcast", scope.makeOpName("RiscBitcast")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("DstT", Operands.toDataType(DstT)); + return new RiscBitcast<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java new file mode 100644 index 00000000000..b8f1bfe7086 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java @@ -0,0 +1,83 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * The RiscBroadcast operation + * + * @param data type for {@code output} output + */ +public final class RiscBroadcast extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscBroadcast"; + + private Output output; + + private RiscBroadcast(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscBroadcast operation. + * + * @param scope current scope + * @param input the input value + * @param shape the shape value + * @param data type for {@code RiscBroadcast} output and operands + * @return a new instance of RiscBroadcast + */ + @Endpoint( + describeByClass = true + ) + public static RiscBroadcast create(Scope scope, Operand input, + Operand shape) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscBroadcast", scope.makeOpName("RiscBroadcast")); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(shape.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscBroadcast<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java new file mode 100644 index 00000000000..fcdf9c65520 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java @@ -0,0 +1,83 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TType; + +/** + * The RiscCast operation + * + * @param data type for {@code y} output + */ +public final class RiscCast extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscCast"; + + private Output y; + + private RiscCast(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscCast operation. + * + * @param scope current scope + * @param x the x value + * @param DstT the value of the DstT property + * @param data type for {@code RiscCast} output and operands + * @return a new instance of RiscCast + */ + @Endpoint( + describeByClass = true + ) + public static RiscCast create(Scope scope, Operand x, + Class DstT) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscCast", scope.makeOpName("RiscCast")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("DstT", Operands.toDataType(DstT)); + return new RiscCast<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java new file mode 100644 index 00000000000..cff47bfb7aa --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java @@ -0,0 +1,79 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscCeil operation + * + * @param data type for {@code y} output + */ +public final class RiscCeil extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscCeil"; + + private Output y; + + private RiscCeil(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscCeil operation. + * + * @param scope current scope + * @param x the x value + * @param data type for {@code RiscCeil} output and operands + * @return a new instance of RiscCeil + */ + @Endpoint( + describeByClass = true + ) + public static RiscCeil create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscCeil", scope.makeOpName("RiscCeil")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscCeil<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java new file mode 100644 index 00000000000..a862bfda954 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java @@ -0,0 +1,79 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscCholesky operation + * + * @param data type for {@code output} output + */ +public final class RiscCholesky extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscCholesky"; + + private Output output; + + private RiscCholesky(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscCholesky operation. + * + * @param scope current scope + * @param input the input value + * @param data type for {@code RiscCholesky} output and operands + * @return a new instance of RiscCholesky + */ + @Endpoint( + describeByClass = true + ) + public static RiscCholesky create(Scope scope, Operand input) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscCholesky", scope.makeOpName("RiscCholesky")); + opBuilder.addInput(input.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscCholesky<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java new file mode 100644 index 00000000000..c5432006fcd --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java @@ -0,0 +1,84 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * The RiscConcat operation + * + * @param data type for {@code output} output + */ +public final class RiscConcat extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscConcat"; + + private Output output; + + private RiscConcat(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscConcat operation. + * + * @param scope current scope + * @param values the values value + * @param axis the axis value + * @param data type for {@code RiscConcat} output and operands + * @return a new instance of RiscConcat + */ + @Endpoint( + describeByClass = true + ) + public static RiscConcat create(Scope scope, Iterable> values, + Operand axis) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscConcat", scope.makeOpName("RiscConcat")); + opBuilder.addInputList(Operands.asOutputs(values)); + opBuilder.addInput(axis.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscConcat<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java new file mode 100644 index 00000000000..7f320f37311 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java @@ -0,0 +1,180 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscConv operation + * + * @param data type for {@code output} output + */ +public final class RiscConv extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscConv"; + + private Output output; + + private RiscConv(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscConv operation. + * + * @param scope current scope + * @param input the input value + * @param filter the filter value + * @param strides the value of the strides property + * @param options carries optional attribute values + * @param data type for {@code RiscConv} output and operands + * @return a new instance of RiscConv + */ + @Endpoint( + describeByClass = true + ) + public static RiscConv create(Scope scope, Operand input, + Operand filter, List strides, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscConv", scope.makeOpName("RiscConv")); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(filter.asOutput()); + opBuilder = scope.apply(opBuilder); + long[] stridesArray = new long[strides.size()]; + for (int i = 0 ; i < stridesArray.length ; i++) { + stridesArray[i] = strides.get(i); + } + opBuilder.setAttr("strides", stridesArray); + if (options != null) { + for (Options opts : options) { + if (opts.dataFormat != null) { + opBuilder.setAttr("data_format", opts.dataFormat); + } + if (opts.dilations != null) { + long[] dilationsArray = new long[opts.dilations.size()]; + for (int i = 0 ; i < dilationsArray.length ; i++) { + dilationsArray[i] = opts.dilations.get(i); + } + opBuilder.setAttr("dilations", dilationsArray); + } + } + } + return new RiscConv<>(opBuilder.build()); + } + + /** + * Sets the dataFormat option. + * + * @param dataFormat the dataFormat option + * @return this Options instance. + */ + public static Options dataFormat(String dataFormat) { + return new Options().dataFormat(dataFormat); + } + + /** + * Sets the dilations option. + * + * @param dilations the dilations option + * @return this Options instance. + */ + public static Options dilations(List dilations) { + return new Options().dilations(dilations); + } + + /** + * Sets the dilations option. + * + * @param dilations the dilations option + * @return this Options instance. + */ + public static Options dilations(Long[] dilations) { + return new Options().dilations(dilations); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + /** + * Optional attributes for {@link org.tensorflow.op.risc.RiscConv} + */ + public static class Options { + private String dataFormat; + + private List dilations; + + private Options() { + } + + /** + * Sets the dataFormat option. + * + * @param dataFormat the dataFormat option + * @return this Options instance. + */ + public Options dataFormat(String dataFormat) { + this.dataFormat = dataFormat; + return this; + } + + /** + * Sets the dilations option. + * + * @param dilations the dilations option + * @return this Options instance. + */ + public Options dilations(List dilations) { + this.dilations = dilations; + return this; + } + + /** + * Sets the dilations option. + * + * @param dilations the dilations option + * @return this Options instance. + */ + public Options dilations(Long... dilations) { + this.dilations = Arrays.asList(dilations); + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java new file mode 100644 index 00000000000..f81bc3b4040 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java @@ -0,0 +1,79 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscCos operation + * + * @param data type for {@code y} output + */ +public final class RiscCos extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscCos"; + + private Output y; + + private RiscCos(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscCos operation. + * + * @param scope current scope + * @param x the x value + * @param data type for {@code RiscCos} output and operands + * @return a new instance of RiscCos + */ + @Endpoint( + describeByClass = true + ) + public static RiscCos create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscCos", scope.makeOpName("RiscCos")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscCos<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java new file mode 100644 index 00000000000..621e4213f56 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java @@ -0,0 +1,81 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscDiv operation + * + * @param data type for {@code z} output + */ +public final class RiscDiv extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscDiv"; + + private Output z; + + private RiscDiv(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscDiv operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param data type for {@code RiscDiv} output and operands + * @return a new instance of RiscDiv + */ + @Endpoint( + describeByClass = true + ) + public static RiscDiv create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscDiv", scope.makeOpName("RiscDiv")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscDiv<>(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java new file mode 100644 index 00000000000..650d2db5157 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java @@ -0,0 +1,147 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscDot operation + * + * @param data type for {@code product} output + */ +public final class RiscDot extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscDot"; + + private Output product; + + private RiscDot(Operation operation) { + super(operation); + int outputIdx = 0; + product = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscDot operation. + * + * @param scope current scope + * @param a the a value + * @param b the b value + * @param options carries optional attribute values + * @param data type for {@code RiscDot} output and operands + * @return a new instance of RiscDot + */ + @Endpoint( + describeByClass = true + ) + public static RiscDot create(Scope scope, Operand a, Operand b, + Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscDot", scope.makeOpName("RiscDot")); + opBuilder.addInput(a.asOutput()); + opBuilder.addInput(b.asOutput()); + opBuilder = scope.apply(opBuilder); + if (options != null) { + for (Options opts : options) { + if (opts.transposeA != null) { + opBuilder.setAttr("transpose_a", opts.transposeA); + } + if (opts.transposeB != null) { + opBuilder.setAttr("transpose_b", opts.transposeB); + } + } + } + return new RiscDot<>(opBuilder.build()); + } + + /** + * Sets the transposeA option. + * + * @param transposeA the transposeA option + * @return this Options instance. + */ + public static Options transposeA(Boolean transposeA) { + return new Options().transposeA(transposeA); + } + + /** + * Sets the transposeB option. + * + * @param transposeB the transposeB option + * @return this Options instance. + */ + public static Options transposeB(Boolean transposeB) { + return new Options().transposeB(transposeB); + } + + /** + * Gets product. + * + * @return product. + */ + public Output product() { + return product; + } + + @Override + public Output asOutput() { + return product; + } + + /** + * Optional attributes for {@link org.tensorflow.op.risc.RiscDot} + */ + public static class Options { + private Boolean transposeA; + + private Boolean transposeB; + + private Options() { + } + + /** + * Sets the transposeA option. + * + * @param transposeA the transposeA option + * @return this Options instance. + */ + public Options transposeA(Boolean transposeA) { + this.transposeA = transposeA; + return this; + } + + /** + * Sets the transposeB option. + * + * @param transposeB the transposeB option + * @return this Options instance. + */ + public Options transposeB(Boolean transposeB) { + this.transposeB = transposeB; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java new file mode 100644 index 00000000000..ab6b1388b52 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java @@ -0,0 +1,79 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscExp operation + * + * @param data type for {@code y} output + */ +public final class RiscExp extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscExp"; + + private Output y; + + private RiscExp(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscExp operation. + * + * @param scope current scope + * @param x the x value + * @param data type for {@code RiscExp} output and operands + * @return a new instance of RiscExp + */ + @Endpoint( + describeByClass = true + ) + public static RiscExp create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscExp", scope.makeOpName("RiscExp")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscExp<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeDeserialize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java similarity index 53% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeDeserialize.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java index 690cd0fba7b..9096bcb2f33 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeDeserialize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java @@ -15,47 +15,65 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.core; +package org.tensorflow.op.risc; import org.tensorflow.Operand; import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; import org.tensorflow.op.RawOp; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** - * Deserializes a proto into the tree handle + * The RiscFft operation + * + * @param data type for {@code output} output */ -public final class TensorForestTreeDeserialize extends RawOp { +public final class RiscFft extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "TensorForestTreeDeserialize"; + public static final String OP_NAME = "RiscFft"; - private TensorForestTreeDeserialize(Operation operation) { + private Output output; + + private RiscFft(Operation operation) { super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new TensorForestTreeDeserialize operation. + * Factory method to create a class wrapping a new RiscFft operation. * * @param scope current scope - * @param treeHandle Handle to the tree resource to be restored. - * @param treeConfig Serialied proto string of the boosted_trees.Tree proto. - * @return a new instance of TensorForestTreeDeserialize + * @param input the input value + * @param data type for {@code RiscFft} output and operands + * @return a new instance of RiscFft */ @Endpoint( describeByClass = true ) - public static TensorForestTreeDeserialize create(Scope scope, Operand treeHandle, - Operand treeConfig) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorForestTreeDeserialize", scope.makeOpName("TensorForestTreeDeserialize")); - opBuilder.addInput(treeHandle.asOutput()); - opBuilder.addInput(treeConfig.asOutput()); + public static RiscFft create(Scope scope, Operand input) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscFft", scope.makeOpName("RiscFft")); + opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); - return new TensorForestTreeDeserialize(opBuilder.build()); + return new RiscFft<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java new file mode 100644 index 00000000000..ceac25a2609 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java @@ -0,0 +1,79 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscFloor operation + * + * @param data type for {@code y} output + */ +public final class RiscFloor extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscFloor"; + + private Output y; + + private RiscFloor(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscFloor operation. + * + * @param scope current scope + * @param x the x value + * @param data type for {@code RiscFloor} output and operands + * @return a new instance of RiscFloor + */ + @Endpoint( + describeByClass = true + ) + public static RiscFloor create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscFloor", scope.makeOpName("RiscFloor")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscFloor<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java new file mode 100644 index 00000000000..9b6f39aa2ac --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java @@ -0,0 +1,124 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * The RiscGather operation + * + * @param data type for {@code output} output + */ +public final class RiscGather extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscGather"; + + private Output output; + + private RiscGather(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscGather operation. + * + * @param scope current scope + * @param params the params value + * @param indices the indices value + * @param axis the axis value + * @param options carries optional attribute values + * @param data type for {@code RiscGather} output and operands + * @return a new instance of RiscGather + */ + @Endpoint( + describeByClass = true + ) + public static RiscGather create(Scope scope, Operand params, + Operand indices, Operand axis, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscGather", scope.makeOpName("RiscGather")); + opBuilder.addInput(params.asOutput()); + opBuilder.addInput(indices.asOutput()); + opBuilder.addInput(axis.asOutput()); + opBuilder = scope.apply(opBuilder); + if (options != null) { + for (Options opts : options) { + if (opts.batchDims != null) { + opBuilder.setAttr("batch_dims", opts.batchDims); + } + } + } + return new RiscGather<>(opBuilder.build()); + } + + /** + * Sets the batchDims option. + * + * @param batchDims the batchDims option + * @return this Options instance. + */ + public static Options batchDims(Long batchDims) { + return new Options().batchDims(batchDims); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + /** + * Optional attributes for {@link org.tensorflow.op.risc.RiscGather} + */ + public static class Options { + private Long batchDims; + + private Options() { + } + + /** + * Sets the batchDims option. + * + * @param batchDims the batchDims option + * @return this Options instance. + */ + public Options batchDims(Long batchDims) { + this.batchDims = batchDims; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java new file mode 100644 index 00000000000..ba7c79dd7e7 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java @@ -0,0 +1,99 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * The RiscImag operation + * + * @param data type for {@code output} output + */ +public final class RiscImag extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscImag"; + + private Output output; + + private RiscImag(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscImag operation. + * + * @param scope current scope + * @param input the input value + * @param Tout the value of the Tout property + * @param data type for {@code RiscImag} output and operands + * @return a new instance of RiscImag + */ + @Endpoint( + describeByClass = true + ) + public static RiscImag create(Scope scope, Operand input, + Class Tout) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscImag", scope.makeOpName("RiscImag")); + opBuilder.addInput(input.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("Tout", Operands.toDataType(Tout)); + return new RiscImag<>(opBuilder.build()); + } + + /** + * Factory method to create a class wrapping a new RiscImag operation, with the default output types. + * + * @param scope current scope + * @param input the input value + * @return a new instance of RiscImag, with default output types + */ + @Endpoint( + describeByClass = true + ) + public static RiscImag create(Scope scope, Operand input) { + return create(scope, input, TFloat32.class); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeIsInitializedOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java similarity index 52% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeIsInitializedOp.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java index 42a8b748515..d9f88c6006a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorForestTreeIsInitializedOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.core; +package org.tensorflow.op.risc; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -25,54 +25,53 @@ import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.types.TBool; -import org.tensorflow.types.family.TType; +import org.tensorflow.types.family.TNumber; /** - * Checks whether a tree has been initialized. + * The RiscIsFinite operation */ -public final class TensorForestTreeIsInitializedOp extends RawOp implements Operand { +public final class RiscIsFinite extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "TensorForestTreeIsInitializedOp"; + public static final String OP_NAME = "RiscIsFinite"; - private Output isInitialized; + private Output y; - private TensorForestTreeIsInitializedOp(Operation operation) { + private RiscIsFinite(Operation operation) { super(operation); int outputIdx = 0; - isInitialized = operation.output(outputIdx++); + y = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new TensorForestTreeIsInitializedOp operation. + * Factory method to create a class wrapping a new RiscIsFinite operation. * * @param scope current scope - * @param treeHandle Handle to the tree. - * @return a new instance of TensorForestTreeIsInitializedOp + * @param x the x value + * @return a new instance of RiscIsFinite */ @Endpoint( describeByClass = true ) - public static TensorForestTreeIsInitializedOp create(Scope scope, - Operand treeHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorForestTreeIsInitializedOp", scope.makeOpName("TensorForestTreeIsInitializedOp")); - opBuilder.addInput(treeHandle.asOutput()); + public static RiscIsFinite create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscIsFinite", scope.makeOpName("RiscIsFinite")); + opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); - return new TensorForestTreeIsInitializedOp(opBuilder.build()); + return new RiscIsFinite(opBuilder.build()); } /** - * Gets isInitialized. - * Whether the tree is initialized. - * @return isInitialized. + * Gets y. + * + * @return y. */ - public Output isInitialized() { - return isInitialized; + public Output y() { + return y; } @Override public Output asOutput() { - return isInitialized; + return y; } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java new file mode 100644 index 00000000000..97fac206298 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java @@ -0,0 +1,79 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscLog operation + * + * @param data type for {@code y} output + */ +public final class RiscLog extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscLog"; + + private Output y; + + private RiscLog(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscLog operation. + * + * @param scope current scope + * @param x the x value + * @param data type for {@code RiscLog} output and operands + * @return a new instance of RiscLog + */ + @Endpoint( + describeByClass = true + ) + public static RiscLog create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscLog", scope.makeOpName("RiscLog")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscLog<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java new file mode 100644 index 00000000000..71ea8855546 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java @@ -0,0 +1,78 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TBool; + +/** + * The RiscLogicalAnd operation + */ +public final class RiscLogicalAnd extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscLogicalAnd"; + + private Output z; + + private RiscLogicalAnd(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscLogicalAnd operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @return a new instance of RiscLogicalAnd + */ + @Endpoint( + describeByClass = true + ) + public static RiscLogicalAnd create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscLogicalAnd", scope.makeOpName("RiscLogicalAnd")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscLogicalAnd(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java new file mode 100644 index 00000000000..053a64376a5 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java @@ -0,0 +1,76 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TBool; + +/** + * The RiscLogicalNot operation + */ +public final class RiscLogicalNot extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscLogicalNot"; + + private Output z; + + private RiscLogicalNot(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscLogicalNot operation. + * + * @param scope current scope + * @param x the x value + * @return a new instance of RiscLogicalNot + */ + @Endpoint( + describeByClass = true + ) + public static RiscLogicalNot create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscLogicalNot", scope.makeOpName("RiscLogicalNot")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscLogicalNot(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java new file mode 100644 index 00000000000..e3ffe0141f2 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java @@ -0,0 +1,78 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TBool; + +/** + * The RiscLogicalOr operation + */ +public final class RiscLogicalOr extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscLogicalOr"; + + private Output z; + + private RiscLogicalOr(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscLogicalOr operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @return a new instance of RiscLogicalOr + */ + @Endpoint( + describeByClass = true + ) + public static RiscLogicalOr create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscLogicalOr", scope.makeOpName("RiscLogicalOr")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscLogicalOr(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java new file mode 100644 index 00000000000..f036460b85e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java @@ -0,0 +1,83 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * Returns max(x, y) element-wise. + * NOTE: {@code risc.RiscMax} does not supports broadcasting. + *

      Given two input tensors, the {@code tf.risc_max} operation computes the maximum for every element in the tensor. + * + * @param data type for {@code max} output + */ +public final class RiscMax extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscMax"; + + private Output max; + + private RiscMax(Operation operation) { + super(operation); + int outputIdx = 0; + max = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscMax operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param data type for {@code RiscMax} output and operands + * @return a new instance of RiscMax + */ + @Endpoint( + describeByClass = true + ) + public static RiscMax create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscMax", scope.makeOpName("RiscMax")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscMax<>(opBuilder.build()); + } + + /** + * Gets max. + * + * @return max. + */ + public Output max() { + return max; + } + + @Override + public Output asOutput() { + return max; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java new file mode 100644 index 00000000000..259bc140e93 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java @@ -0,0 +1,81 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscMin operation + * + * @param data type for {@code z} output + */ +public final class RiscMin extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscMin"; + + private Output z; + + private RiscMin(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscMin operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param data type for {@code RiscMin} output and operands + * @return a new instance of RiscMin + */ + @Endpoint( + describeByClass = true + ) + public static RiscMin create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscMin", scope.makeOpName("RiscMin")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscMin<>(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java new file mode 100644 index 00000000000..16518324a8d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java @@ -0,0 +1,81 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscMul operation + * + * @param data type for {@code z} output + */ +public final class RiscMul extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscMul"; + + private Output z; + + private RiscMul(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscMul operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param data type for {@code RiscMul} output and operands + * @return a new instance of RiscMul + */ + @Endpoint( + describeByClass = true + ) + public static RiscMul create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscMul", scope.makeOpName("RiscMul")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscMul<>(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java new file mode 100644 index 00000000000..8b2833592c1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java @@ -0,0 +1,79 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscNeg operation + * + * @param data type for {@code y} output + */ +public final class RiscNeg extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscNeg"; + + private Output y; + + private RiscNeg(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscNeg operation. + * + * @param scope current scope + * @param x the x value + * @param data type for {@code RiscNeg} output and operands + * @return a new instance of RiscNeg + */ + @Endpoint( + describeByClass = true + ) + public static RiscNeg create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscNeg", scope.makeOpName("RiscNeg")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscNeg<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java new file mode 100644 index 00000000000..34dfbd72afe --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java @@ -0,0 +1,84 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscPad operation + * + * @param data type for {@code output} output + */ +public final class RiscPad extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscPad"; + + private Output output; + + private RiscPad(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscPad operation. + * + * @param scope current scope + * @param input the input value + * @param paddings the paddings value + * @param constantValues the constantValues value + * @param data type for {@code RiscPad} output and operands + * @return a new instance of RiscPad + */ + @Endpoint( + describeByClass = true + ) + public static RiscPad create(Scope scope, Operand input, + Operand paddings, Operand constantValues) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscPad", scope.makeOpName("RiscPad")); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(paddings.asOutput()); + opBuilder.addInput(constantValues.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscPad<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java new file mode 100644 index 00000000000..83762058d15 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java @@ -0,0 +1,134 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscPool operation + * + * @param data type for {@code output} output + */ +public final class RiscPool extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscPool"; + + private Output output; + + private RiscPool(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscPool operation. + * + * @param scope current scope + * @param value the value value + * @param ksize the value of the ksize property + * @param strides the value of the strides property + * @param poolingType the value of the poolingType property + * @param options carries optional attribute values + * @param data type for {@code RiscPool} output and operands + * @return a new instance of RiscPool + */ + @Endpoint( + describeByClass = true + ) + public static RiscPool create(Scope scope, Operand value, + List ksize, List strides, String poolingType, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscPool", scope.makeOpName("RiscPool")); + opBuilder.addInput(value.asOutput()); + opBuilder = scope.apply(opBuilder); + long[] ksizeArray = new long[ksize.size()]; + for (int i = 0 ; i < ksizeArray.length ; i++) { + ksizeArray[i] = ksize.get(i); + } + opBuilder.setAttr("ksize", ksizeArray); + long[] stridesArray = new long[strides.size()]; + for (int i = 0 ; i < stridesArray.length ; i++) { + stridesArray[i] = strides.get(i); + } + opBuilder.setAttr("strides", stridesArray); + opBuilder.setAttr("pooling_type", poolingType); + if (options != null) { + for (Options opts : options) { + if (opts.dataFormat != null) { + opBuilder.setAttr("data_format", opts.dataFormat); + } + } + } + return new RiscPool<>(opBuilder.build()); + } + + /** + * Sets the dataFormat option. + * + * @param dataFormat the dataFormat option + * @return this Options instance. + */ + public static Options dataFormat(String dataFormat) { + return new Options().dataFormat(dataFormat); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + /** + * Optional attributes for {@link org.tensorflow.op.risc.RiscPool} + */ + public static class Options { + private String dataFormat; + + private Options() { + } + + /** + * Sets the dataFormat option. + * + * @param dataFormat the dataFormat option + * @return this Options instance. + */ + public Options dataFormat(String dataFormat) { + this.dataFormat = dataFormat; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java new file mode 100644 index 00000000000..657dc0dd80a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java @@ -0,0 +1,81 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscPow operation + * + * @param data type for {@code z} output + */ +public final class RiscPow extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscPow"; + + private Output z; + + private RiscPow(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscPow operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param data type for {@code RiscPow} output and operands + * @return a new instance of RiscPow + */ + @Endpoint( + describeByClass = true + ) + public static RiscPow create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscPow", scope.makeOpName("RiscPow")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscPow<>(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java new file mode 100644 index 00000000000..af8f26b7802 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java @@ -0,0 +1,117 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscRandomUniform operation + */ +public final class RiscRandomUniform extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscRandomUniform"; + + private Output output; + + private RiscRandomUniform(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscRandomUniform operation. + * + * @param scope current scope + * @param shape the shape value + * @param options carries optional attribute values + * @return a new instance of RiscRandomUniform + */ + @Endpoint( + describeByClass = true + ) + public static RiscRandomUniform create(Scope scope, Operand shape, + Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscRandomUniform", scope.makeOpName("RiscRandomUniform")); + opBuilder.addInput(shape.asOutput()); + opBuilder = scope.apply(opBuilder); + if (options != null) { + for (Options opts : options) { + if (opts.seed != null) { + opBuilder.setAttr("seed", opts.seed); + } + } + } + return new RiscRandomUniform(opBuilder.build()); + } + + /** + * Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + */ + public static Options seed(Long seed) { + return new Options().seed(seed); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + /** + * Optional attributes for {@link org.tensorflow.op.risc.RiscRandomUniform} + */ + public static class Options { + private Long seed; + + private Options() { + } + + /** + * Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + */ + public Options seed(Long seed) { + this.seed = seed; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java new file mode 100644 index 00000000000..962dde9f7c2 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java @@ -0,0 +1,99 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * The RiscReal operation + * + * @param data type for {@code output} output + */ +public final class RiscReal extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscReal"; + + private Output output; + + private RiscReal(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscReal operation. + * + * @param scope current scope + * @param input the input value + * @param Tout the value of the Tout property + * @param data type for {@code RiscReal} output and operands + * @return a new instance of RiscReal + */ + @Endpoint( + describeByClass = true + ) + public static RiscReal create(Scope scope, Operand input, + Class Tout) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscReal", scope.makeOpName("RiscReal")); + opBuilder.addInput(input.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("Tout", Operands.toDataType(Tout)); + return new RiscReal<>(opBuilder.build()); + } + + /** + * Factory method to create a class wrapping a new RiscReal operation, with the default output types. + * + * @param scope current scope + * @param input the input value + * @return a new instance of RiscReal, with default output types + */ + @Endpoint( + describeByClass = true + ) + public static RiscReal create(Scope scope, Operand input) { + return create(scope, input, TFloat32.class); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java new file mode 100644 index 00000000000..f8a66eff454 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java @@ -0,0 +1,84 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscReduce operation + * + * @param data type for {@code output} output + */ +public final class RiscReduce extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscReduce"; + + private Output output; + + private RiscReduce(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscReduce operation. + * + * @param scope current scope + * @param tensor the tensor value + * @param axis the axis value + * @param reduceType the value of the reduceType property + * @param data type for {@code RiscReduce} output and operands + * @return a new instance of RiscReduce + */ + @Endpoint( + describeByClass = true + ) + public static RiscReduce create(Scope scope, Operand tensor, + Operand axis, String reduceType) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscReduce", scope.makeOpName("RiscReduce")); + opBuilder.addInput(tensor.asOutput()); + opBuilder.addInput(axis.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("reduce_type", reduceType); + return new RiscReduce<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java new file mode 100644 index 00000000000..3cd94a30933 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java @@ -0,0 +1,81 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscRem operation + * + * @param data type for {@code z} output + */ +public final class RiscRem extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscRem"; + + private Output z; + + private RiscRem(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscRem operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param data type for {@code RiscRem} output and operands + * @return a new instance of RiscRem + */ + @Endpoint( + describeByClass = true + ) + public static RiscRem create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscRem", scope.makeOpName("RiscRem")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscRem<>(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java new file mode 100644 index 00000000000..8e0690bf7c1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java @@ -0,0 +1,82 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscReshape operation + * + * @param data type for {@code output} output + */ +public final class RiscReshape extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscReshape"; + + private Output output; + + private RiscReshape(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscReshape operation. + * + * @param scope current scope + * @param tensor the tensor value + * @param shape the shape value + * @param data type for {@code RiscReshape} output and operands + * @return a new instance of RiscReshape + */ + @Endpoint( + describeByClass = true + ) + public static RiscReshape create(Scope scope, Operand tensor, + Operand shape) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscReshape", scope.makeOpName("RiscReshape")); + opBuilder.addInput(tensor.asOutput()); + opBuilder.addInput(shape.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscReshape<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java new file mode 100644 index 00000000000..faacbbc2e0d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java @@ -0,0 +1,82 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscReverse operation + * + * @param data type for {@code output} output + */ +public final class RiscReverse extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscReverse"; + + private Output output; + + private RiscReverse(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscReverse operation. + * + * @param scope current scope + * @param tensor the tensor value + * @param axis the axis value + * @param data type for {@code RiscReverse} output and operands + * @return a new instance of RiscReverse + */ + @Endpoint( + describeByClass = true + ) + public static RiscReverse create(Scope scope, Operand tensor, + Operand axis) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscReverse", scope.makeOpName("RiscReverse")); + opBuilder.addInput(tensor.asOutput()); + opBuilder.addInput(axis.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscReverse<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java new file mode 100644 index 00000000000..6b412dc25d4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java @@ -0,0 +1,85 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscScatter operation + * + * @param data type for {@code output} output + */ +public final class RiscScatter extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscScatter"; + + private Output output; + + private RiscScatter(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscScatter operation. + * + * @param scope current scope + * @param indices the indices value + * @param updates the updates value + * @param shape the shape value + * @param data type for {@code RiscScatter} output and operands + * @param data type for {@code RiscScatter} output and operands + * @return a new instance of RiscScatter + */ + @Endpoint( + describeByClass = true + ) + public static RiscScatter create(Scope scope, + Operand indices, Operand updates, Operand shape) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscScatter", scope.makeOpName("RiscScatter")); + opBuilder.addInput(indices.asOutput()); + opBuilder.addInput(updates.asOutput()); + opBuilder.addInput(shape.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscScatter<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java new file mode 100644 index 00000000000..0075fbabc2e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java @@ -0,0 +1,98 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscShape operation + * + * @param data type for {@code output} output + */ +public final class RiscShape extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscShape"; + + private Output output; + + private RiscShape(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscShape operation. + * + * @param scope current scope + * @param input the input value + * @param outType the value of the outType property + * @param data type for {@code RiscShape} output and operands + * @return a new instance of RiscShape + */ + @Endpoint( + describeByClass = true + ) + public static RiscShape create(Scope scope, + Operand input, Class outType) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscShape", scope.makeOpName("RiscShape")); + opBuilder.addInput(input.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("out_type", Operands.toDataType(outType)); + return new RiscShape<>(opBuilder.build()); + } + + /** + * Factory method to create a class wrapping a new RiscShape operation, with the default output types. + * + * @param scope current scope + * @param input the input value + * @return a new instance of RiscShape, with default output types + */ + @Endpoint( + describeByClass = true + ) + public static RiscShape create(Scope scope, Operand input) { + return create(scope, input, TInt32.class); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java new file mode 100644 index 00000000000..c4fbabe08ff --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java @@ -0,0 +1,79 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscSign operation + * + * @param data type for {@code y} output + */ +public final class RiscSign extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscSign"; + + private Output y; + + private RiscSign(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscSign operation. + * + * @param scope current scope + * @param x the x value + * @param data type for {@code RiscSign} output and operands + * @return a new instance of RiscSign + */ + @Endpoint( + describeByClass = true + ) + public static RiscSign create(Scope scope, Operand x) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscSign", scope.makeOpName("RiscSign")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscSign<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java new file mode 100644 index 00000000000..cbeb1e65a9d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java @@ -0,0 +1,85 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscSlice operation + * + * @param data type for {@code output} output + */ +public final class RiscSlice extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscSlice"; + + private Output output; + + private RiscSlice(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscSlice operation. + * + * @param scope current scope + * @param input the input value + * @param begin the begin value + * @param sizeOutput the sizeOutput value + * @param data type for {@code RiscSlice} output and operands + * @param data type for {@code RiscSlice} output and operands + * @return a new instance of RiscSlice + */ + @Endpoint( + describeByClass = true + ) + public static RiscSlice create(Scope scope, + Operand input, Operand begin, Operand sizeOutput) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscSlice", scope.makeOpName("RiscSlice")); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(begin.asOutput()); + opBuilder.addInput(sizeOutput.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscSlice<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java new file mode 100644 index 00000000000..54ed2a79e69 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java @@ -0,0 +1,84 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscSort operation + * + * @param data type for {@code output} output + */ +public final class RiscSort extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscSort"; + + private Output output; + + private RiscSort(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscSort operation. + * + * @param scope current scope + * @param input the input value + * @param axis the axis value + * @param direction the value of the direction property + * @param data type for {@code RiscSort} output and operands + * @return a new instance of RiscSort + */ + @Endpoint( + describeByClass = true + ) + public static RiscSort create(Scope scope, Operand input, + Operand axis, String direction) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscSort", scope.makeOpName("RiscSort")); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(axis.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("direction", direction); + return new RiscSort<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java new file mode 100644 index 00000000000..1e14f2ea36d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java @@ -0,0 +1,146 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TType; + +/** + * The RiscSqueeze operation + * + * @param data type for {@code output} output + */ +public final class RiscSqueeze extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscSqueeze"; + + private Output output; + + private RiscSqueeze(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscSqueeze operation. + * + * @param scope current scope + * @param input the input value + * @param options carries optional attribute values + * @param data type for {@code RiscSqueeze} output and operands + * @return a new instance of RiscSqueeze + */ + @Endpoint( + describeByClass = true + ) + public static RiscSqueeze create(Scope scope, Operand input, + Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscSqueeze", scope.makeOpName("RiscSqueeze")); + opBuilder.addInput(input.asOutput()); + opBuilder = scope.apply(opBuilder); + if (options != null) { + for (Options opts : options) { + if (opts.squeezeDims != null) { + long[] squeezeDimsArray = new long[opts.squeezeDims.size()]; + for (int i = 0 ; i < squeezeDimsArray.length ; i++) { + squeezeDimsArray[i] = opts.squeezeDims.get(i); + } + opBuilder.setAttr("squeeze_dims", squeezeDimsArray); + } + } + } + return new RiscSqueeze<>(opBuilder.build()); + } + + /** + * Sets the squeezeDims option. + * + * @param squeezeDims the squeezeDims option + * @return this Options instance. + */ + public static Options squeezeDims(List squeezeDims) { + return new Options().squeezeDims(squeezeDims); + } + + /** + * Sets the squeezeDims option. + * + * @param squeezeDims the squeezeDims option + * @return this Options instance. + */ + public static Options squeezeDims(Long[] squeezeDims) { + return new Options().squeezeDims(squeezeDims); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + /** + * Optional attributes for {@link org.tensorflow.op.risc.RiscSqueeze} + */ + public static class Options { + private List squeezeDims; + + private Options() { + } + + /** + * Sets the squeezeDims option. + * + * @param squeezeDims the squeezeDims option + * @return this Options instance. + */ + public Options squeezeDims(List squeezeDims) { + this.squeezeDims = squeezeDims; + return this; + } + + /** + * Sets the squeezeDims option. + * + * @param squeezeDims the squeezeDims option + * @return this Options instance. + */ + public Options squeezeDims(Long... squeezeDims) { + this.squeezeDims = Arrays.asList(squeezeDims); + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java new file mode 100644 index 00000000000..4f1a9515914 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java @@ -0,0 +1,81 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscSub operation + * + * @param data type for {@code z} output + */ +public final class RiscSub extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscSub"; + + private Output z; + + private RiscSub(Operation operation) { + super(operation); + int outputIdx = 0; + z = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscSub operation. + * + * @param scope current scope + * @param x the x value + * @param y the y value + * @param data type for {@code RiscSub} output and operands + * @return a new instance of RiscSub + */ + @Endpoint( + describeByClass = true + ) + public static RiscSub create(Scope scope, Operand x, Operand y) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscSub", scope.makeOpName("RiscSub")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(y.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscSub<>(opBuilder.build()); + } + + /** + * Gets z. + * + * @return z. + */ + public Output z() { + return z; + } + + @Override + public Output asOutput() { + return z; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java new file mode 100644 index 00000000000..27a0a2879fc --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java @@ -0,0 +1,83 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * The RiscTranspose operation + * + * @param data type for {@code y} output + */ +public final class RiscTranspose extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscTranspose"; + + private Output y; + + private RiscTranspose(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscTranspose operation. + * + * @param scope current scope + * @param x the x value + * @param perm the perm value + * @param data type for {@code RiscTranspose} output and operands + * @return a new instance of RiscTranspose + */ + @Endpoint( + describeByClass = true + ) + public static RiscTranspose create(Scope scope, Operand x, + Operand perm) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscTranspose", scope.makeOpName("RiscTranspose")); + opBuilder.addInput(x.asOutput()); + opBuilder.addInput(perm.asOutput()); + opBuilder = scope.apply(opBuilder); + return new RiscTranspose<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java new file mode 100644 index 00000000000..000b5c22b66 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java @@ -0,0 +1,147 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscTriangularSolve operation + * + * @param data type for {@code output} output + */ +public final class RiscTriangularSolve extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscTriangularSolve"; + + private Output output; + + private RiscTriangularSolve(Operation operation) { + super(operation); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscTriangularSolve operation. + * + * @param scope current scope + * @param matrix the matrix value + * @param rhs the rhs value + * @param options carries optional attribute values + * @param data type for {@code RiscTriangularSolve} output and operands + * @return a new instance of RiscTriangularSolve + */ + @Endpoint( + describeByClass = true + ) + public static RiscTriangularSolve create(Scope scope, Operand matrix, + Operand rhs, Options... options) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscTriangularSolve", scope.makeOpName("RiscTriangularSolve")); + opBuilder.addInput(matrix.asOutput()); + opBuilder.addInput(rhs.asOutput()); + opBuilder = scope.apply(opBuilder); + if (options != null) { + for (Options opts : options) { + if (opts.lower != null) { + opBuilder.setAttr("lower", opts.lower); + } + if (opts.adjoint != null) { + opBuilder.setAttr("adjoint", opts.adjoint); + } + } + } + return new RiscTriangularSolve<>(opBuilder.build()); + } + + /** + * Sets the lower option. + * + * @param lower the lower option + * @return this Options instance. + */ + public static Options lower(Boolean lower) { + return new Options().lower(lower); + } + + /** + * Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. + */ + public static Options adjoint(Boolean adjoint) { + return new Options().adjoint(adjoint); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + /** + * Optional attributes for {@link org.tensorflow.op.risc.RiscTriangularSolve} + */ + public static class Options { + private Boolean lower; + + private Boolean adjoint; + + private Options() { + } + + /** + * Sets the lower option. + * + * @param lower the lower option + * @return this Options instance. + */ + public Options lower(Boolean lower) { + this.lower = lower; + return this; + } + + /** + * Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. + */ + public Options adjoint(Boolean adjoint) { + this.adjoint = adjoint; + return this; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java new file mode 100644 index 00000000000..ef530d473a4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java @@ -0,0 +1,81 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.risc; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.family.TNumber; + +/** + * The RiscUnary operation + * + * @param data type for {@code y} output + */ +public final class RiscUnary extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "RiscUnary"; + + private Output y; + + private RiscUnary(Operation operation) { + super(operation); + int outputIdx = 0; + y = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new RiscUnary operation. + * + * @param scope current scope + * @param x the x value + * @param opType the value of the opType property + * @param data type for {@code RiscUnary} output and operands + * @return a new instance of RiscUnary + */ + @Endpoint( + describeByClass = true + ) + public static RiscUnary create(Scope scope, Operand x, String opType) { + OperationBuilder opBuilder = scope.env().opBuilder("RiscUnary", scope.makeOpName("RiscUnary")); + opBuilder.addInput(x.asOutput()); + opBuilder = scope.apply(opBuilder); + opBuilder.setAttr("op_type", opType); + return new RiscUnary<>(opBuilder.build()); + } + + /** + * Gets y. + * + * @return y. + */ + public Output y() { + return y; + } + + @Override + public Output asOutput() { + return y; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java index d8cff616735..7ad19e303bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java @@ -113,6 +113,13 @@ public static EnqueueTPUEmbeddingRaggedTensorBatch create(Scope scope, } opBuilder.setAttr("max_sequence_lengths", maxSequenceLengthsArray); } + if (opts.numFeatures != null) { + long[] numFeaturesArray = new long[opts.numFeatures.size()]; + for (int i = 0 ; i < numFeaturesArray.length ; i++) { + numFeaturesArray[i] = opts.numFeatures.get(i); + } + opBuilder.setAttr("num_features", numFeaturesArray); + } } } return new EnqueueTPUEmbeddingRaggedTensorBatch(opBuilder.build()); @@ -179,6 +186,26 @@ public static Options maxSequenceLengths(Long[] maxSequenceLengths) { return new Options().maxSequenceLengths(maxSequenceLengths); } + /** + * Sets the numFeatures option. + * + * @param numFeatures the numFeatures option + * @return this Options instance. + */ + public static Options numFeatures(List numFeatures) { + return new Options().numFeatures(numFeatures); + } + + /** + * Sets the numFeatures option. + * + * @param numFeatures the numFeatures option + * @return this Options instance. + */ + public static Options numFeatures(Long[] numFeatures) { + return new Options().numFeatures(numFeatures); + } + /** * Optional attributes for {@link org.tensorflow.op.tpu.EnqueueTPUEmbeddingRaggedTensorBatch} */ @@ -189,6 +216,8 @@ public static class Options { private List maxSequenceLengths; + private List numFeatures; + private Options() { } @@ -257,5 +286,27 @@ public Options maxSequenceLengths(Long... maxSequenceLengths) { this.maxSequenceLengths = Arrays.asList(maxSequenceLengths); return this; } + + /** + * Sets the numFeatures option. + * + * @param numFeatures the numFeatures option + * @return this Options instance. + */ + public Options numFeatures(List numFeatures) { + this.numFeatures = numFeatures; + return this; + } + + /** + * Sets the numFeatures option. + * + * @param numFeatures the numFeatures option + * @return this Options instance. + */ + public Options numFeatures(Long... numFeatures) { + this.numFeatures = Arrays.asList(numFeatures); + return this; + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java index 79226f5998f..42f1a114864 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java @@ -111,6 +111,13 @@ public static EnqueueTPUEmbeddingSparseTensorBatch create(Scope scope, } opBuilder.setAttr("max_sequence_lengths", maxSequenceLengthsArray); } + if (opts.numFeatures != null) { + long[] numFeaturesArray = new long[opts.numFeatures.size()]; + for (int i = 0 ; i < numFeaturesArray.length ; i++) { + numFeaturesArray[i] = opts.numFeatures.get(i); + } + opBuilder.setAttr("num_features", numFeaturesArray); + } } } return new EnqueueTPUEmbeddingSparseTensorBatch(opBuilder.build()); @@ -177,6 +184,26 @@ public static Options maxSequenceLengths(Long[] maxSequenceLengths) { return new Options().maxSequenceLengths(maxSequenceLengths); } + /** + * Sets the numFeatures option. + * + * @param numFeatures the numFeatures option + * @return this Options instance. + */ + public static Options numFeatures(List numFeatures) { + return new Options().numFeatures(numFeatures); + } + + /** + * Sets the numFeatures option. + * + * @param numFeatures the numFeatures option + * @return this Options instance. + */ + public static Options numFeatures(Long[] numFeatures) { + return new Options().numFeatures(numFeatures); + } + /** * Optional attributes for {@link org.tensorflow.op.tpu.EnqueueTPUEmbeddingSparseTensorBatch} */ @@ -187,6 +214,8 @@ public static class Options { private List maxSequenceLengths; + private List numFeatures; + private Options() { } @@ -255,5 +284,27 @@ public Options maxSequenceLengths(Long... maxSequenceLengths) { this.maxSequenceLengths = Arrays.asList(maxSequenceLengths); return this; } + + /** + * Sets the numFeatures option. + * + * @param numFeatures the numFeatures option + * @return this Options instance. + */ + public Options numFeatures(List numFeatures) { + this.numFeatures = numFeatures; + return this; + } + + /** + * Sets the numFeatures option. + * + * @param numFeatures the numFeatures option + * @return this Options instance. + */ + public Options numFeatures(Long... numFeatures) { + this.numFeatures = Arrays.asList(numFeatures); + return this; + } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java new file mode 100644 index 00000000000..55f908d985b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java @@ -0,0 +1,69 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.types.TString; +import org.tensorflow.types.family.TType; + +/** + * Op that reshards on-device TPU variables to specified state. + * Op that reshards on-device TPU variables to specified state. Internal use only. + *

      The sharding state is represented as the key of the compilation that generated + * the sharding/unsharding programs along with the main program. new_format_key + * specifies the desired state, and format_state_var is the current state of the + * variables. + */ +public final class TPUReshardVariables extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "TPUReshardVariables"; + + private TPUReshardVariables(Operation operation) { + super(operation); + } + + /** + * Factory method to create a class wrapping a new TPUReshardVariables operation. + * + * @param scope current scope + * @param vars the vars value + * @param newFormatKey the newFormatKey value + * @param formatStateVar the formatStateVar value + * @return a new instance of TPUReshardVariables + */ + @Endpoint( + describeByClass = true + ) + public static TPUReshardVariables create(Scope scope, Iterable> vars, + Operand newFormatKey, Operand formatStateVar) { + OperationBuilder opBuilder = scope.env().opBuilder("TPUReshardVariables", scope.makeOpName("TPUReshardVariables")); + opBuilder.addInputList(Operands.asOutputs(vars)); + opBuilder.addInput(newFormatKey.asOutput()); + opBuilder.addInput(formatStateVar.asOutput()); + opBuilder = scope.apply(opBuilder); + return new TPUReshardVariables(opBuilder.build()); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java index c801e1fc9c1..78bf07d3a42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java @@ -58,9 +58,13 @@ private Pad(Operation operation) { * @param scope current scope * @param input A {@code Tensor} of type T. * @param paddingValue A scalar {@code Tensor} of type T. - * @param paddingLow the padding to apply at the start of each input dimensions - * @param paddingHigh the padding to apply at the end of each input dimension. - * @param paddingInterior the padding to apply between each input element. + * @param paddingLow the padding to apply at the start of each input dimensions. Must + * be a compile-time constant 1D tensor of length equal to rank of input. + * @param paddingHigh the padding to apply at the end of each input dimension. Must + * be a compile-time constant 1D tensor of length equal to rank of input. + * @param paddingInterior the padding to apply between each input element. Must + * be a compile-time constant 1D tensor of length equal to rank of input, + * containing only non-negative values. * @param data type for {@code XlaPad} output and operands * @param data type for {@code XlaPad} output and operands * @return a new instance of Pad diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java index d62e4741a94..bf5e754d5a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java @@ -54,19 +54,38 @@ private Sharding(Operation operation) { * * @param scope current scope * @param input the input value + * @param options carries optional attribute values * @param data type for {@code XlaSharding} output and operands * @return a new instance of Sharding */ @Endpoint( describeByClass = true ) - public static Sharding create(Scope scope, Operand input) { + public static Sharding create(Scope scope, Operand input, + Options... options) { OperationBuilder opBuilder = scope.env().opBuilder("XlaSharding", scope.makeOpName("Sharding")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); + if (options != null) { + for (Options opts : options) { + if (opts.sharding != null) { + opBuilder.setAttr("sharding", opts.sharding); + } + } + } return new Sharding<>(opBuilder.build()); } + /** + * Sets the sharding option. + * + * @param sharding the sharding option + * @return this Options instance. + */ + public static Options sharding(String sharding) { + return new Options().sharding(sharding); + } + /** * Gets output. * @@ -80,4 +99,25 @@ public Output output() { public Output asOutput() { return output; } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.Sharding} + */ + public static class Options { + private String sharding; + + private Options() { + } + + /** + * Sets the sharding option. + * + * @param sharding the sharding option + * @return this Options instance. + */ + public Options sharding(String sharding) { + this.sharding = sharding; + return this; + } + } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutoShardPolicy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutoShardPolicy.java new file mode 100644 index 00000000000..a45271b08eb --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutoShardPolicy.java @@ -0,0 +1,188 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + *

      + * Represents the type of auto-sharding we enable.
      + * 
      + * + * Protobuf enum {@code tensorflow.data.AutoShardPolicy} + */ +public enum AutoShardPolicy + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
      +   * AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding.
      +   * 
      + * + * AUTO = 0; + */ + AUTO(0), + /** + *
      +   * FILE: Shards by input files (i.e. each worker will get a set of files to
      +   * process). When this option is selected, make sure that there is at least as
      +   * many files as workers. If there are fewer input files than workers, a
      +   * runtime error will be raised.
      +   * 
      + * + * FILE = 1; + */ + FILE(1), + /** + *
      +   * DATA: Shards by elements produced by the dataset. Each worker will process
      +   * the whole dataset and discard the portion that is not for itself. Note that
      +   * for this mode to correctly partitions the dataset elements, the dataset
      +   * needs to produce elements in a deterministic order.
      +   * 
      + * + * DATA = 2; + */ + DATA(2), + /** + *
      +   * HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated
      +   * as a placeholder to replace with `shard(num_workers, worker_index)`.
      +   * 
      + * + * HINT = 3; + */ + HINT(3), + /** + *
      +   * OFF: No sharding will be performed.
      +   * 
      + * + * OFF = -1; + */ + OFF(-1), + UNRECOGNIZED(-1), + ; + + /** + *
      +   * AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding.
      +   * 
      + * + * AUTO = 0; + */ + public static final int AUTO_VALUE = 0; + /** + *
      +   * FILE: Shards by input files (i.e. each worker will get a set of files to
      +   * process). When this option is selected, make sure that there is at least as
      +   * many files as workers. If there are fewer input files than workers, a
      +   * runtime error will be raised.
      +   * 
      + * + * FILE = 1; + */ + public static final int FILE_VALUE = 1; + /** + *
      +   * DATA: Shards by elements produced by the dataset. Each worker will process
      +   * the whole dataset and discard the portion that is not for itself. Note that
      +   * for this mode to correctly partitions the dataset elements, the dataset
      +   * needs to produce elements in a deterministic order.
      +   * 
      + * + * DATA = 2; + */ + public static final int DATA_VALUE = 2; + /** + *
      +   * HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated
      +   * as a placeholder to replace with `shard(num_workers, worker_index)`.
      +   * 
      + * + * HINT = 3; + */ + public static final int HINT_VALUE = 3; + /** + *
      +   * OFF: No sharding will be performed.
      +   * 
      + * + * OFF = -1; + */ + public static final int OFF_VALUE = -1; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static AutoShardPolicy valueOf(int value) { + return forNumber(value); + } + + public static AutoShardPolicy forNumber(int value) { + switch (value) { + case 0: return AUTO; + case 1: return FILE; + case 2: return DATA; + case 3: return HINT; + case -1: return OFF; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + AutoShardPolicy> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public AutoShardPolicy findValueByNumber(int number) { + return AutoShardPolicy.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final AutoShardPolicy[] VALUES = values(); + + public static AutoShardPolicy valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private AutoShardPolicy(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:tensorflow.data.AutoShardPolicy) +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java new file mode 100644 index 00000000000..4426fbe9187 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java @@ -0,0 +1,144 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +public final class DatasetOptionsProtos { + private DatasetOptionsProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_DistributeOptions_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_DistributeOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_MapVectorization_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_MapVectorization_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_OptimizationOptions_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_OptimizationOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_ThreadingOptions_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_Options_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_Options_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n/tensorflow/core/framework/dataset_opti" + + "ons.proto\022\017tensorflow.data\"\177\n\021Distribute" + + "Options\022;\n\021auto_shard_policy\030\001 \001(\0162 .ten" + + "sorflow.data.AutoShardPolicy\022\025\n\013num_devi" + + "ces\030\002 \001(\005H\000B\026\n\024optional_num_devices\"v\n\020M" + + "apVectorization\022\021\n\007enabled\030\001 \001(\010H\000\022\034\n\022us" + + "e_choose_fastest\030\002 \001(\010H\001B\022\n\020optional_ena" + + "bledB\035\n\033optional_use_choose_fastest\"\311\010\n\023" + + "OptimizationOptions\022%\n\033apply_default_opt" + + "imizations\030\001 \001(\010H\000\022\022\n\010autotune\030\002 \001(\010H\001\022\032" + + "\n\020autotune_buffers\030\003 \001(\010H\002\022\035\n\023autotune_c" + + "pu_budget\030\004 \001(\005H\003\022\035\n\023autotune_ram_budget" + + "\030\005 \001(\005H\004\022\027\n\rfilter_fusion\030\006 \001(\010H\005\022+\n!fil" + + "ter_with_random_uniform_fusion\030\007 \001(\010H\006\022\036" + + "\n\024hoist_random_uniform\030\010 \001(\010H\007\022\036\n\024map_an" + + "d_batch_fusion\030\t \001(\010H\010\022\037\n\025map_and_filter" + + "_fusion\030\n \001(\010H\t\022\024\n\nmap_fusion\030\013 \001(\010H\n\022\035\n" + + "\023map_parallelization\030\014 \001(\010H\013\022<\n\021map_vect" + + "orization\030\r \001(\0132!.tensorflow.data.MapVec" + + "torization\022\032\n\020noop_elimination\030\016 \001(\010H\014\022\030" + + "\n\016parallel_batch\030\017 \001(\010H\r\022%\n\033reorder_data" + + "_discarding_ops\030\020 \001(\010H\016\022#\n\031shuffle_and_r" + + "epeat_fusion\030\021 \001(\010H\017B&\n$optional_apply_d" + + "efault_optimizationsB\023\n\021optional_autotun" + + "eB\033\n\031optional_autotune_buffersB\036\n\034option" + + "al_autotune_cpu_budgetB\036\n\034optional_autot" + + "une_ram_budgetB\030\n\026optional_filter_fusion" + + "B,\n*optional_filter_with_random_uniform_" + + "fusionB\037\n\035optional_hoist_random_uniformB" + + "\037\n\035optional_map_and_batch_fusionB \n\036opti" + + "onal_map_and_filter_fusionB\025\n\023optional_m" + + "ap_fusionB\036\n\034optional_map_parallelizatio" + + "nB\033\n\031optional_noop_eliminationB\031\n\027option" + + "al_parallel_batchB&\n$optional_reorder_da" + + "ta_discarding_opsB$\n\"optional_shuffle_an" + + "d_repeat_fusion\"\242\001\n\020ThreadingOptions\022\"\n\030" + + "max_intra_op_parallelism\030\001 \001(\005H\000\022!\n\027priv" + + "ate_threadpool_size\030\002 \001(\005H\001B#\n!optional_" + + "max_intra_op_parallelismB\"\n optional_pri" + + "vate_threadpool_size\"\212\003\n\007Options\022\027\n\rdete" + + "rministic\030\001 \001(\010H\000\022>\n\022distribute_options\030" + + "\002 \001(\0132\".tensorflow.data.DistributeOption" + + "s\022B\n\024optimization_options\030\003 \001(\0132$.tensor" + + "flow.data.OptimizationOptions\022\017\n\005slack\030\004" + + " \001(\010H\001\022<\n\021threading_options\030\005 \001(\0132!.tens" + + "orflow.data.ThreadingOptions\022E\n\025external" + + "_state_policy\030\006 \001(\0162$.tensorflow.data.Ex" + + "ternalStatePolicyH\002B\030\n\026optional_determin" + + "isticB\020\n\016optional_slackB \n\036optional_exte" + + "rnal_state_policy*K\n\017AutoShardPolicy\022\010\n\004" + + "AUTO\020\000\022\010\n\004FILE\020\001\022\010\n\004DATA\020\002\022\010\n\004HINT\020\003\022\020\n\003" + + "OFF\020\377\377\377\377\377\377\377\377\377\001*J\n\023ExternalStatePolicy\022\017\n" + + "\013POLICY_WARN\020\000\022\021\n\rPOLICY_IGNORE\020\001\022\017\n\013POL" + + "ICY_FAIL\020\002B3\n\031org.tensorflow.proto.dataB" + + "\024DatasetOptionsProtosP\001b\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_tensorflow_data_DistributeOptions_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_data_DistributeOptions_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_DistributeOptions_descriptor, + new java.lang.String[] { "AutoShardPolicy", "NumDevices", "OptionalNumDevices", }); + internal_static_tensorflow_data_MapVectorization_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_tensorflow_data_MapVectorization_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_MapVectorization_descriptor, + new java.lang.String[] { "Enabled", "UseChooseFastest", "OptionalEnabled", "OptionalUseChooseFastest", }); + internal_static_tensorflow_data_OptimizationOptions_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_tensorflow_data_OptimizationOptions_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_OptimizationOptions_descriptor, + new java.lang.String[] { "ApplyDefaultOptimizations", "Autotune", "AutotuneBuffers", "AutotuneCpuBudget", "AutotuneRamBudget", "FilterFusion", "FilterWithRandomUniformFusion", "HoistRandomUniform", "MapAndBatchFusion", "MapAndFilterFusion", "MapFusion", "MapParallelization", "MapVectorization", "NoopElimination", "ParallelBatch", "ReorderDataDiscardingOps", "ShuffleAndRepeatFusion", "OptionalApplyDefaultOptimizations", "OptionalAutotune", "OptionalAutotuneBuffers", "OptionalAutotuneCpuBudget", "OptionalAutotuneRamBudget", "OptionalFilterFusion", "OptionalFilterWithRandomUniformFusion", "OptionalHoistRandomUniform", "OptionalMapAndBatchFusion", "OptionalMapAndFilterFusion", "OptionalMapFusion", "OptionalMapParallelization", "OptionalNoopElimination", "OptionalParallelBatch", "OptionalReorderDataDiscardingOps", "OptionalShuffleAndRepeatFusion", }); + internal_static_tensorflow_data_ThreadingOptions_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_ThreadingOptions_descriptor, + new java.lang.String[] { "MaxIntraOpParallelism", "PrivateThreadpoolSize", "OptionalMaxIntraOpParallelism", "OptionalPrivateThreadpoolSize", }); + internal_static_tensorflow_data_Options_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_tensorflow_data_Options_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_Options_descriptor, + new java.lang.String[] { "Deterministic", "DistributeOptions", "OptimizationOptions", "Slack", "ThreadingOptions", "ExternalStatePolicy", "OptionalDeterministic", "OptionalSlack", "OptionalExternalStatePolicy", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptions.java new file mode 100644 index 00000000000..dba5ebe1b32 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptions.java @@ -0,0 +1,642 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + * Protobuf type {@code tensorflow.data.DistributeOptions} + */ +public final class DistributeOptions extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.DistributeOptions) + DistributeOptionsOrBuilder { +private static final long serialVersionUID = 0L; + // Use DistributeOptions.newBuilder() to construct. + private DistributeOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DistributeOptions() { + autoShardPolicy_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new DistributeOptions(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DistributeOptions( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + int rawValue = input.readEnum(); + + autoShardPolicy_ = rawValue; + break; + } + case 16: { + optionalNumDevicesCase_ = 2; + optionalNumDevices_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_DistributeOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_DistributeOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DistributeOptions.class, org.tensorflow.proto.data.DistributeOptions.Builder.class); + } + + private int optionalNumDevicesCase_ = 0; + private java.lang.Object optionalNumDevices_; + public enum OptionalNumDevicesCase + implements com.google.protobuf.Internal.EnumLite { + NUM_DEVICES(2), + OPTIONALNUMDEVICES_NOT_SET(0); + private final int value; + private OptionalNumDevicesCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalNumDevicesCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalNumDevicesCase forNumber(int value) { + switch (value) { + case 2: return NUM_DEVICES; + case 0: return OPTIONALNUMDEVICES_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalNumDevicesCase + getOptionalNumDevicesCase() { + return OptionalNumDevicesCase.forNumber( + optionalNumDevicesCase_); + } + + public static final int AUTO_SHARD_POLICY_FIELD_NUMBER = 1; + private int autoShardPolicy_; + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + public int getAutoShardPolicyValue() { + return autoShardPolicy_; + } + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + public org.tensorflow.proto.data.AutoShardPolicy getAutoShardPolicy() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.AutoShardPolicy result = org.tensorflow.proto.data.AutoShardPolicy.valueOf(autoShardPolicy_); + return result == null ? org.tensorflow.proto.data.AutoShardPolicy.UNRECOGNIZED : result; + } + + public static final int NUM_DEVICES_FIELD_NUMBER = 2; + /** + * int32 num_devices = 2; + */ + public int getNumDevices() { + if (optionalNumDevicesCase_ == 2) { + return (java.lang.Integer) optionalNumDevices_; + } + return 0; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (autoShardPolicy_ != org.tensorflow.proto.data.AutoShardPolicy.AUTO.getNumber()) { + output.writeEnum(1, autoShardPolicy_); + } + if (optionalNumDevicesCase_ == 2) { + output.writeInt32( + 2, (int)((java.lang.Integer) optionalNumDevices_)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (autoShardPolicy_ != org.tensorflow.proto.data.AutoShardPolicy.AUTO.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, autoShardPolicy_); + } + if (optionalNumDevicesCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size( + 2, (int)((java.lang.Integer) optionalNumDevices_)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.DistributeOptions)) { + return super.equals(obj); + } + org.tensorflow.proto.data.DistributeOptions other = (org.tensorflow.proto.data.DistributeOptions) obj; + + if (autoShardPolicy_ != other.autoShardPolicy_) return false; + if (!getOptionalNumDevicesCase().equals(other.getOptionalNumDevicesCase())) return false; + switch (optionalNumDevicesCase_) { + case 2: + if (getNumDevices() + != other.getNumDevices()) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + AUTO_SHARD_POLICY_FIELD_NUMBER; + hash = (53 * hash) + autoShardPolicy_; + switch (optionalNumDevicesCase_) { + case 2: + hash = (37 * hash) + NUM_DEVICES_FIELD_NUMBER; + hash = (53 * hash) + getNumDevices(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.DistributeOptions parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DistributeOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DistributeOptions parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DistributeOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.DistributeOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code tensorflow.data.DistributeOptions} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.DistributeOptions) + org.tensorflow.proto.data.DistributeOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_DistributeOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_DistributeOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DistributeOptions.class, org.tensorflow.proto.data.DistributeOptions.Builder.class); + } + + // Construct using org.tensorflow.proto.data.DistributeOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + autoShardPolicy_ = 0; + + optionalNumDevicesCase_ = 0; + optionalNumDevices_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_DistributeOptions_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.DistributeOptions getDefaultInstanceForType() { + return org.tensorflow.proto.data.DistributeOptions.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.DistributeOptions build() { + org.tensorflow.proto.data.DistributeOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.DistributeOptions buildPartial() { + org.tensorflow.proto.data.DistributeOptions result = new org.tensorflow.proto.data.DistributeOptions(this); + result.autoShardPolicy_ = autoShardPolicy_; + if (optionalNumDevicesCase_ == 2) { + result.optionalNumDevices_ = optionalNumDevices_; + } + result.optionalNumDevicesCase_ = optionalNumDevicesCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.DistributeOptions) { + return mergeFrom((org.tensorflow.proto.data.DistributeOptions)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.DistributeOptions other) { + if (other == org.tensorflow.proto.data.DistributeOptions.getDefaultInstance()) return this; + if (other.autoShardPolicy_ != 0) { + setAutoShardPolicyValue(other.getAutoShardPolicyValue()); + } + switch (other.getOptionalNumDevicesCase()) { + case NUM_DEVICES: { + setNumDevices(other.getNumDevices()); + break; + } + case OPTIONALNUMDEVICES_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.DistributeOptions parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.DistributeOptions) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int optionalNumDevicesCase_ = 0; + private java.lang.Object optionalNumDevices_; + public OptionalNumDevicesCase + getOptionalNumDevicesCase() { + return OptionalNumDevicesCase.forNumber( + optionalNumDevicesCase_); + } + + public Builder clearOptionalNumDevices() { + optionalNumDevicesCase_ = 0; + optionalNumDevices_ = null; + onChanged(); + return this; + } + + + private int autoShardPolicy_ = 0; + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + public int getAutoShardPolicyValue() { + return autoShardPolicy_; + } + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + public Builder setAutoShardPolicyValue(int value) { + autoShardPolicy_ = value; + onChanged(); + return this; + } + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + public org.tensorflow.proto.data.AutoShardPolicy getAutoShardPolicy() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.AutoShardPolicy result = org.tensorflow.proto.data.AutoShardPolicy.valueOf(autoShardPolicy_); + return result == null ? org.tensorflow.proto.data.AutoShardPolicy.UNRECOGNIZED : result; + } + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + public Builder setAutoShardPolicy(org.tensorflow.proto.data.AutoShardPolicy value) { + if (value == null) { + throw new NullPointerException(); + } + + autoShardPolicy_ = value.getNumber(); + onChanged(); + return this; + } + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + public Builder clearAutoShardPolicy() { + + autoShardPolicy_ = 0; + onChanged(); + return this; + } + + /** + * int32 num_devices = 2; + */ + public int getNumDevices() { + if (optionalNumDevicesCase_ == 2) { + return (java.lang.Integer) optionalNumDevices_; + } + return 0; + } + /** + * int32 num_devices = 2; + */ + public Builder setNumDevices(int value) { + optionalNumDevicesCase_ = 2; + optionalNumDevices_ = value; + onChanged(); + return this; + } + /** + * int32 num_devices = 2; + */ + public Builder clearNumDevices() { + if (optionalNumDevicesCase_ == 2) { + optionalNumDevicesCase_ = 0; + optionalNumDevices_ = null; + onChanged(); + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.DistributeOptions) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.DistributeOptions) + private static final org.tensorflow.proto.data.DistributeOptions DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.DistributeOptions(); + } + + public static org.tensorflow.proto.data.DistributeOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DistributeOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DistributeOptions(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.DistributeOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptionsOrBuilder.java new file mode 100644 index 00000000000..84cd08668e6 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptionsOrBuilder.java @@ -0,0 +1,25 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +public interface DistributeOptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.DistributeOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + int getAutoShardPolicyValue(); + /** + * .tensorflow.data.AutoShardPolicy auto_shard_policy = 1; + */ + org.tensorflow.proto.data.AutoShardPolicy getAutoShardPolicy(); + + /** + * int32 num_devices = 2; + */ + int getNumDevices(); + + public org.tensorflow.proto.data.DistributeOptions.OptionalNumDevicesCase getOptionalNumDevicesCase(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ExternalStatePolicy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ExternalStatePolicy.java new file mode 100644 index 00000000000..2f7375ae00d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ExternalStatePolicy.java @@ -0,0 +1,116 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + *
      + * Represents how to handle external state during serialization.
      + * 
      + * + * Protobuf enum {@code tensorflow.data.ExternalStatePolicy} + */ +public enum ExternalStatePolicy + implements com.google.protobuf.ProtocolMessageEnum { + /** + * POLICY_WARN = 0; + */ + POLICY_WARN(0), + /** + * POLICY_IGNORE = 1; + */ + POLICY_IGNORE(1), + /** + * POLICY_FAIL = 2; + */ + POLICY_FAIL(2), + UNRECOGNIZED(-1), + ; + + /** + * POLICY_WARN = 0; + */ + public static final int POLICY_WARN_VALUE = 0; + /** + * POLICY_IGNORE = 1; + */ + public static final int POLICY_IGNORE_VALUE = 1; + /** + * POLICY_FAIL = 2; + */ + public static final int POLICY_FAIL_VALUE = 2; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ExternalStatePolicy valueOf(int value) { + return forNumber(value); + } + + public static ExternalStatePolicy forNumber(int value) { + switch (value) { + case 0: return POLICY_WARN; + case 1: return POLICY_IGNORE; + case 2: return POLICY_FAIL; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + ExternalStatePolicy> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ExternalStatePolicy findValueByNumber(int number) { + return ExternalStatePolicy.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final ExternalStatePolicy[] VALUES = values(); + + public static ExternalStatePolicy valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ExternalStatePolicy(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:tensorflow.data.ExternalStatePolicy) +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/MapVectorization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/MapVectorization.java new file mode 100644 index 00000000000..235c0895047 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/MapVectorization.java @@ -0,0 +1,697 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + * Protobuf type {@code tensorflow.data.MapVectorization} + */ +public final class MapVectorization extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.MapVectorization) + MapVectorizationOrBuilder { +private static final long serialVersionUID = 0L; + // Use MapVectorization.newBuilder() to construct. + private MapVectorization(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private MapVectorization() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new MapVectorization(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MapVectorization( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + optionalEnabledCase_ = 1; + optionalEnabled_ = input.readBool(); + break; + } + case 16: { + optionalUseChooseFastestCase_ = 2; + optionalUseChooseFastest_ = input.readBool(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_MapVectorization_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_MapVectorization_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.MapVectorization.class, org.tensorflow.proto.data.MapVectorization.Builder.class); + } + + private int optionalEnabledCase_ = 0; + private java.lang.Object optionalEnabled_; + public enum OptionalEnabledCase + implements com.google.protobuf.Internal.EnumLite { + ENABLED(1), + OPTIONALENABLED_NOT_SET(0); + private final int value; + private OptionalEnabledCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalEnabledCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalEnabledCase forNumber(int value) { + switch (value) { + case 1: return ENABLED; + case 0: return OPTIONALENABLED_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalEnabledCase + getOptionalEnabledCase() { + return OptionalEnabledCase.forNumber( + optionalEnabledCase_); + } + + private int optionalUseChooseFastestCase_ = 0; + private java.lang.Object optionalUseChooseFastest_; + public enum OptionalUseChooseFastestCase + implements com.google.protobuf.Internal.EnumLite { + USE_CHOOSE_FASTEST(2), + OPTIONALUSECHOOSEFASTEST_NOT_SET(0); + private final int value; + private OptionalUseChooseFastestCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalUseChooseFastestCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalUseChooseFastestCase forNumber(int value) { + switch (value) { + case 2: return USE_CHOOSE_FASTEST; + case 0: return OPTIONALUSECHOOSEFASTEST_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalUseChooseFastestCase + getOptionalUseChooseFastestCase() { + return OptionalUseChooseFastestCase.forNumber( + optionalUseChooseFastestCase_); + } + + public static final int ENABLED_FIELD_NUMBER = 1; + /** + * bool enabled = 1; + */ + public boolean getEnabled() { + if (optionalEnabledCase_ == 1) { + return (java.lang.Boolean) optionalEnabled_; + } + return false; + } + + public static final int USE_CHOOSE_FASTEST_FIELD_NUMBER = 2; + /** + * bool use_choose_fastest = 2; + */ + public boolean getUseChooseFastest() { + if (optionalUseChooseFastestCase_ == 2) { + return (java.lang.Boolean) optionalUseChooseFastest_; + } + return false; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (optionalEnabledCase_ == 1) { + output.writeBool( + 1, (boolean)((java.lang.Boolean) optionalEnabled_)); + } + if (optionalUseChooseFastestCase_ == 2) { + output.writeBool( + 2, (boolean)((java.lang.Boolean) optionalUseChooseFastest_)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (optionalEnabledCase_ == 1) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 1, (boolean)((java.lang.Boolean) optionalEnabled_)); + } + if (optionalUseChooseFastestCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 2, (boolean)((java.lang.Boolean) optionalUseChooseFastest_)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.MapVectorization)) { + return super.equals(obj); + } + org.tensorflow.proto.data.MapVectorization other = (org.tensorflow.proto.data.MapVectorization) obj; + + if (!getOptionalEnabledCase().equals(other.getOptionalEnabledCase())) return false; + switch (optionalEnabledCase_) { + case 1: + if (getEnabled() + != other.getEnabled()) return false; + break; + case 0: + default: + } + if (!getOptionalUseChooseFastestCase().equals(other.getOptionalUseChooseFastestCase())) return false; + switch (optionalUseChooseFastestCase_) { + case 2: + if (getUseChooseFastest() + != other.getUseChooseFastest()) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (optionalEnabledCase_) { + case 1: + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getEnabled()); + break; + case 0: + default: + } + switch (optionalUseChooseFastestCase_) { + case 2: + hash = (37 * hash) + USE_CHOOSE_FASTEST_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getUseChooseFastest()); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.MapVectorization parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.MapVectorization parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.MapVectorization parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.MapVectorization parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.MapVectorization prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code tensorflow.data.MapVectorization} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.MapVectorization) + org.tensorflow.proto.data.MapVectorizationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_MapVectorization_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_MapVectorization_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.MapVectorization.class, org.tensorflow.proto.data.MapVectorization.Builder.class); + } + + // Construct using org.tensorflow.proto.data.MapVectorization.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + optionalEnabledCase_ = 0; + optionalEnabled_ = null; + optionalUseChooseFastestCase_ = 0; + optionalUseChooseFastest_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_MapVectorization_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.MapVectorization getDefaultInstanceForType() { + return org.tensorflow.proto.data.MapVectorization.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.MapVectorization build() { + org.tensorflow.proto.data.MapVectorization result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.MapVectorization buildPartial() { + org.tensorflow.proto.data.MapVectorization result = new org.tensorflow.proto.data.MapVectorization(this); + if (optionalEnabledCase_ == 1) { + result.optionalEnabled_ = optionalEnabled_; + } + if (optionalUseChooseFastestCase_ == 2) { + result.optionalUseChooseFastest_ = optionalUseChooseFastest_; + } + result.optionalEnabledCase_ = optionalEnabledCase_; + result.optionalUseChooseFastestCase_ = optionalUseChooseFastestCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.MapVectorization) { + return mergeFrom((org.tensorflow.proto.data.MapVectorization)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.MapVectorization other) { + if (other == org.tensorflow.proto.data.MapVectorization.getDefaultInstance()) return this; + switch (other.getOptionalEnabledCase()) { + case ENABLED: { + setEnabled(other.getEnabled()); + break; + } + case OPTIONALENABLED_NOT_SET: { + break; + } + } + switch (other.getOptionalUseChooseFastestCase()) { + case USE_CHOOSE_FASTEST: { + setUseChooseFastest(other.getUseChooseFastest()); + break; + } + case OPTIONALUSECHOOSEFASTEST_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.MapVectorization parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.MapVectorization) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int optionalEnabledCase_ = 0; + private java.lang.Object optionalEnabled_; + public OptionalEnabledCase + getOptionalEnabledCase() { + return OptionalEnabledCase.forNumber( + optionalEnabledCase_); + } + + public Builder clearOptionalEnabled() { + optionalEnabledCase_ = 0; + optionalEnabled_ = null; + onChanged(); + return this; + } + + private int optionalUseChooseFastestCase_ = 0; + private java.lang.Object optionalUseChooseFastest_; + public OptionalUseChooseFastestCase + getOptionalUseChooseFastestCase() { + return OptionalUseChooseFastestCase.forNumber( + optionalUseChooseFastestCase_); + } + + public Builder clearOptionalUseChooseFastest() { + optionalUseChooseFastestCase_ = 0; + optionalUseChooseFastest_ = null; + onChanged(); + return this; + } + + + /** + * bool enabled = 1; + */ + public boolean getEnabled() { + if (optionalEnabledCase_ == 1) { + return (java.lang.Boolean) optionalEnabled_; + } + return false; + } + /** + * bool enabled = 1; + */ + public Builder setEnabled(boolean value) { + optionalEnabledCase_ = 1; + optionalEnabled_ = value; + onChanged(); + return this; + } + /** + * bool enabled = 1; + */ + public Builder clearEnabled() { + if (optionalEnabledCase_ == 1) { + optionalEnabledCase_ = 0; + optionalEnabled_ = null; + onChanged(); + } + return this; + } + + /** + * bool use_choose_fastest = 2; + */ + public boolean getUseChooseFastest() { + if (optionalUseChooseFastestCase_ == 2) { + return (java.lang.Boolean) optionalUseChooseFastest_; + } + return false; + } + /** + * bool use_choose_fastest = 2; + */ + public Builder setUseChooseFastest(boolean value) { + optionalUseChooseFastestCase_ = 2; + optionalUseChooseFastest_ = value; + onChanged(); + return this; + } + /** + * bool use_choose_fastest = 2; + */ + public Builder clearUseChooseFastest() { + if (optionalUseChooseFastestCase_ == 2) { + optionalUseChooseFastestCase_ = 0; + optionalUseChooseFastest_ = null; + onChanged(); + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.MapVectorization) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.MapVectorization) + private static final org.tensorflow.proto.data.MapVectorization DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.MapVectorization(); + } + + public static org.tensorflow.proto.data.MapVectorization getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MapVectorization parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MapVectorization(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.MapVectorization getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/MapVectorizationOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/MapVectorizationOrBuilder.java new file mode 100644 index 00000000000..0a09b9379c8 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/MapVectorizationOrBuilder.java @@ -0,0 +1,23 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +public interface MapVectorizationOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.MapVectorization) + com.google.protobuf.MessageOrBuilder { + + /** + * bool enabled = 1; + */ + boolean getEnabled(); + + /** + * bool use_choose_fastest = 2; + */ + boolean getUseChooseFastest(); + + public org.tensorflow.proto.data.MapVectorization.OptionalEnabledCase getOptionalEnabledCase(); + + public org.tensorflow.proto.data.MapVectorization.OptionalUseChooseFastestCase getOptionalUseChooseFastestCase(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptions.java new file mode 100644 index 00000000000..f470fdd27a4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptions.java @@ -0,0 +1,2870 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + * Protobuf type {@code tensorflow.data.OptimizationOptions} + */ +public final class OptimizationOptions extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.OptimizationOptions) + OptimizationOptionsOrBuilder { +private static final long serialVersionUID = 0L; + // Use OptimizationOptions.newBuilder() to construct. + private OptimizationOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private OptimizationOptions() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new OptimizationOptions(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OptimizationOptions( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + optionalApplyDefaultOptimizationsCase_ = 1; + optionalApplyDefaultOptimizations_ = input.readBool(); + break; + } + case 16: { + optionalAutotuneCase_ = 2; + optionalAutotune_ = input.readBool(); + break; + } + case 24: { + optionalAutotuneBuffersCase_ = 3; + optionalAutotuneBuffers_ = input.readBool(); + break; + } + case 32: { + optionalAutotuneCpuBudgetCase_ = 4; + optionalAutotuneCpuBudget_ = input.readInt32(); + break; + } + case 40: { + optionalAutotuneRamBudgetCase_ = 5; + optionalAutotuneRamBudget_ = input.readInt32(); + break; + } + case 48: { + optionalFilterFusionCase_ = 6; + optionalFilterFusion_ = input.readBool(); + break; + } + case 56: { + optionalFilterWithRandomUniformFusionCase_ = 7; + optionalFilterWithRandomUniformFusion_ = input.readBool(); + break; + } + case 64: { + optionalHoistRandomUniformCase_ = 8; + optionalHoistRandomUniform_ = input.readBool(); + break; + } + case 72: { + optionalMapAndBatchFusionCase_ = 9; + optionalMapAndBatchFusion_ = input.readBool(); + break; + } + case 80: { + optionalMapAndFilterFusionCase_ = 10; + optionalMapAndFilterFusion_ = input.readBool(); + break; + } + case 88: { + optionalMapFusionCase_ = 11; + optionalMapFusion_ = input.readBool(); + break; + } + case 96: { + optionalMapParallelizationCase_ = 12; + optionalMapParallelization_ = input.readBool(); + break; + } + case 106: { + org.tensorflow.proto.data.MapVectorization.Builder subBuilder = null; + if (mapVectorization_ != null) { + subBuilder = mapVectorization_.toBuilder(); + } + mapVectorization_ = input.readMessage(org.tensorflow.proto.data.MapVectorization.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(mapVectorization_); + mapVectorization_ = subBuilder.buildPartial(); + } + + break; + } + case 112: { + optionalNoopEliminationCase_ = 14; + optionalNoopElimination_ = input.readBool(); + break; + } + case 120: { + optionalParallelBatchCase_ = 15; + optionalParallelBatch_ = input.readBool(); + break; + } + case 128: { + optionalReorderDataDiscardingOpsCase_ = 16; + optionalReorderDataDiscardingOps_ = input.readBool(); + break; + } + case 136: { + optionalShuffleAndRepeatFusionCase_ = 17; + optionalShuffleAndRepeatFusion_ = input.readBool(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_OptimizationOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_OptimizationOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.OptimizationOptions.class, org.tensorflow.proto.data.OptimizationOptions.Builder.class); + } + + private int optionalApplyDefaultOptimizationsCase_ = 0; + private java.lang.Object optionalApplyDefaultOptimizations_; + public enum OptionalApplyDefaultOptimizationsCase + implements com.google.protobuf.Internal.EnumLite { + APPLY_DEFAULT_OPTIMIZATIONS(1), + OPTIONALAPPLYDEFAULTOPTIMIZATIONS_NOT_SET(0); + private final int value; + private OptionalApplyDefaultOptimizationsCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalApplyDefaultOptimizationsCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalApplyDefaultOptimizationsCase forNumber(int value) { + switch (value) { + case 1: return APPLY_DEFAULT_OPTIMIZATIONS; + case 0: return OPTIONALAPPLYDEFAULTOPTIMIZATIONS_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalApplyDefaultOptimizationsCase + getOptionalApplyDefaultOptimizationsCase() { + return OptionalApplyDefaultOptimizationsCase.forNumber( + optionalApplyDefaultOptimizationsCase_); + } + + private int optionalAutotuneCase_ = 0; + private java.lang.Object optionalAutotune_; + public enum OptionalAutotuneCase + implements com.google.protobuf.Internal.EnumLite { + AUTOTUNE(2), + OPTIONALAUTOTUNE_NOT_SET(0); + private final int value; + private OptionalAutotuneCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalAutotuneCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalAutotuneCase forNumber(int value) { + switch (value) { + case 2: return AUTOTUNE; + case 0: return OPTIONALAUTOTUNE_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalAutotuneCase + getOptionalAutotuneCase() { + return OptionalAutotuneCase.forNumber( + optionalAutotuneCase_); + } + + private int optionalAutotuneBuffersCase_ = 0; + private java.lang.Object optionalAutotuneBuffers_; + public enum OptionalAutotuneBuffersCase + implements com.google.protobuf.Internal.EnumLite { + AUTOTUNE_BUFFERS(3), + OPTIONALAUTOTUNEBUFFERS_NOT_SET(0); + private final int value; + private OptionalAutotuneBuffersCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalAutotuneBuffersCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalAutotuneBuffersCase forNumber(int value) { + switch (value) { + case 3: return AUTOTUNE_BUFFERS; + case 0: return OPTIONALAUTOTUNEBUFFERS_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalAutotuneBuffersCase + getOptionalAutotuneBuffersCase() { + return OptionalAutotuneBuffersCase.forNumber( + optionalAutotuneBuffersCase_); + } + + private int optionalAutotuneCpuBudgetCase_ = 0; + private java.lang.Object optionalAutotuneCpuBudget_; + public enum OptionalAutotuneCpuBudgetCase + implements com.google.protobuf.Internal.EnumLite { + AUTOTUNE_CPU_BUDGET(4), + OPTIONALAUTOTUNECPUBUDGET_NOT_SET(0); + private final int value; + private OptionalAutotuneCpuBudgetCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalAutotuneCpuBudgetCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalAutotuneCpuBudgetCase forNumber(int value) { + switch (value) { + case 4: return AUTOTUNE_CPU_BUDGET; + case 0: return OPTIONALAUTOTUNECPUBUDGET_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalAutotuneCpuBudgetCase + getOptionalAutotuneCpuBudgetCase() { + return OptionalAutotuneCpuBudgetCase.forNumber( + optionalAutotuneCpuBudgetCase_); + } + + private int optionalAutotuneRamBudgetCase_ = 0; + private java.lang.Object optionalAutotuneRamBudget_; + public enum OptionalAutotuneRamBudgetCase + implements com.google.protobuf.Internal.EnumLite { + AUTOTUNE_RAM_BUDGET(5), + OPTIONALAUTOTUNERAMBUDGET_NOT_SET(0); + private final int value; + private OptionalAutotuneRamBudgetCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalAutotuneRamBudgetCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalAutotuneRamBudgetCase forNumber(int value) { + switch (value) { + case 5: return AUTOTUNE_RAM_BUDGET; + case 0: return OPTIONALAUTOTUNERAMBUDGET_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalAutotuneRamBudgetCase + getOptionalAutotuneRamBudgetCase() { + return OptionalAutotuneRamBudgetCase.forNumber( + optionalAutotuneRamBudgetCase_); + } + + private int optionalFilterFusionCase_ = 0; + private java.lang.Object optionalFilterFusion_; + public enum OptionalFilterFusionCase + implements com.google.protobuf.Internal.EnumLite { + FILTER_FUSION(6), + OPTIONALFILTERFUSION_NOT_SET(0); + private final int value; + private OptionalFilterFusionCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalFilterFusionCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalFilterFusionCase forNumber(int value) { + switch (value) { + case 6: return FILTER_FUSION; + case 0: return OPTIONALFILTERFUSION_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalFilterFusionCase + getOptionalFilterFusionCase() { + return OptionalFilterFusionCase.forNumber( + optionalFilterFusionCase_); + } + + private int optionalFilterWithRandomUniformFusionCase_ = 0; + private java.lang.Object optionalFilterWithRandomUniformFusion_; + public enum OptionalFilterWithRandomUniformFusionCase + implements com.google.protobuf.Internal.EnumLite { + FILTER_WITH_RANDOM_UNIFORM_FUSION(7), + OPTIONALFILTERWITHRANDOMUNIFORMFUSION_NOT_SET(0); + private final int value; + private OptionalFilterWithRandomUniformFusionCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalFilterWithRandomUniformFusionCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalFilterWithRandomUniformFusionCase forNumber(int value) { + switch (value) { + case 7: return FILTER_WITH_RANDOM_UNIFORM_FUSION; + case 0: return OPTIONALFILTERWITHRANDOMUNIFORMFUSION_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalFilterWithRandomUniformFusionCase + getOptionalFilterWithRandomUniformFusionCase() { + return OptionalFilterWithRandomUniformFusionCase.forNumber( + optionalFilterWithRandomUniformFusionCase_); + } + + private int optionalHoistRandomUniformCase_ = 0; + private java.lang.Object optionalHoistRandomUniform_; + public enum OptionalHoistRandomUniformCase + implements com.google.protobuf.Internal.EnumLite { + HOIST_RANDOM_UNIFORM(8), + OPTIONALHOISTRANDOMUNIFORM_NOT_SET(0); + private final int value; + private OptionalHoistRandomUniformCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalHoistRandomUniformCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalHoistRandomUniformCase forNumber(int value) { + switch (value) { + case 8: return HOIST_RANDOM_UNIFORM; + case 0: return OPTIONALHOISTRANDOMUNIFORM_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalHoistRandomUniformCase + getOptionalHoistRandomUniformCase() { + return OptionalHoistRandomUniformCase.forNumber( + optionalHoistRandomUniformCase_); + } + + private int optionalMapAndBatchFusionCase_ = 0; + private java.lang.Object optionalMapAndBatchFusion_; + public enum OptionalMapAndBatchFusionCase + implements com.google.protobuf.Internal.EnumLite { + MAP_AND_BATCH_FUSION(9), + OPTIONALMAPANDBATCHFUSION_NOT_SET(0); + private final int value; + private OptionalMapAndBatchFusionCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalMapAndBatchFusionCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalMapAndBatchFusionCase forNumber(int value) { + switch (value) { + case 9: return MAP_AND_BATCH_FUSION; + case 0: return OPTIONALMAPANDBATCHFUSION_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalMapAndBatchFusionCase + getOptionalMapAndBatchFusionCase() { + return OptionalMapAndBatchFusionCase.forNumber( + optionalMapAndBatchFusionCase_); + } + + private int optionalMapAndFilterFusionCase_ = 0; + private java.lang.Object optionalMapAndFilterFusion_; + public enum OptionalMapAndFilterFusionCase + implements com.google.protobuf.Internal.EnumLite { + MAP_AND_FILTER_FUSION(10), + OPTIONALMAPANDFILTERFUSION_NOT_SET(0); + private final int value; + private OptionalMapAndFilterFusionCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalMapAndFilterFusionCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalMapAndFilterFusionCase forNumber(int value) { + switch (value) { + case 10: return MAP_AND_FILTER_FUSION; + case 0: return OPTIONALMAPANDFILTERFUSION_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalMapAndFilterFusionCase + getOptionalMapAndFilterFusionCase() { + return OptionalMapAndFilterFusionCase.forNumber( + optionalMapAndFilterFusionCase_); + } + + private int optionalMapFusionCase_ = 0; + private java.lang.Object optionalMapFusion_; + public enum OptionalMapFusionCase + implements com.google.protobuf.Internal.EnumLite { + MAP_FUSION(11), + OPTIONALMAPFUSION_NOT_SET(0); + private final int value; + private OptionalMapFusionCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalMapFusionCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalMapFusionCase forNumber(int value) { + switch (value) { + case 11: return MAP_FUSION; + case 0: return OPTIONALMAPFUSION_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalMapFusionCase + getOptionalMapFusionCase() { + return OptionalMapFusionCase.forNumber( + optionalMapFusionCase_); + } + + private int optionalMapParallelizationCase_ = 0; + private java.lang.Object optionalMapParallelization_; + public enum OptionalMapParallelizationCase + implements com.google.protobuf.Internal.EnumLite { + MAP_PARALLELIZATION(12), + OPTIONALMAPPARALLELIZATION_NOT_SET(0); + private final int value; + private OptionalMapParallelizationCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalMapParallelizationCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalMapParallelizationCase forNumber(int value) { + switch (value) { + case 12: return MAP_PARALLELIZATION; + case 0: return OPTIONALMAPPARALLELIZATION_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalMapParallelizationCase + getOptionalMapParallelizationCase() { + return OptionalMapParallelizationCase.forNumber( + optionalMapParallelizationCase_); + } + + private int optionalNoopEliminationCase_ = 0; + private java.lang.Object optionalNoopElimination_; + public enum OptionalNoopEliminationCase + implements com.google.protobuf.Internal.EnumLite { + NOOP_ELIMINATION(14), + OPTIONALNOOPELIMINATION_NOT_SET(0); + private final int value; + private OptionalNoopEliminationCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalNoopEliminationCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalNoopEliminationCase forNumber(int value) { + switch (value) { + case 14: return NOOP_ELIMINATION; + case 0: return OPTIONALNOOPELIMINATION_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalNoopEliminationCase + getOptionalNoopEliminationCase() { + return OptionalNoopEliminationCase.forNumber( + optionalNoopEliminationCase_); + } + + private int optionalParallelBatchCase_ = 0; + private java.lang.Object optionalParallelBatch_; + public enum OptionalParallelBatchCase + implements com.google.protobuf.Internal.EnumLite { + PARALLEL_BATCH(15), + OPTIONALPARALLELBATCH_NOT_SET(0); + private final int value; + private OptionalParallelBatchCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalParallelBatchCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalParallelBatchCase forNumber(int value) { + switch (value) { + case 15: return PARALLEL_BATCH; + case 0: return OPTIONALPARALLELBATCH_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalParallelBatchCase + getOptionalParallelBatchCase() { + return OptionalParallelBatchCase.forNumber( + optionalParallelBatchCase_); + } + + private int optionalReorderDataDiscardingOpsCase_ = 0; + private java.lang.Object optionalReorderDataDiscardingOps_; + public enum OptionalReorderDataDiscardingOpsCase + implements com.google.protobuf.Internal.EnumLite { + REORDER_DATA_DISCARDING_OPS(16), + OPTIONALREORDERDATADISCARDINGOPS_NOT_SET(0); + private final int value; + private OptionalReorderDataDiscardingOpsCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalReorderDataDiscardingOpsCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalReorderDataDiscardingOpsCase forNumber(int value) { + switch (value) { + case 16: return REORDER_DATA_DISCARDING_OPS; + case 0: return OPTIONALREORDERDATADISCARDINGOPS_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalReorderDataDiscardingOpsCase + getOptionalReorderDataDiscardingOpsCase() { + return OptionalReorderDataDiscardingOpsCase.forNumber( + optionalReorderDataDiscardingOpsCase_); + } + + private int optionalShuffleAndRepeatFusionCase_ = 0; + private java.lang.Object optionalShuffleAndRepeatFusion_; + public enum OptionalShuffleAndRepeatFusionCase + implements com.google.protobuf.Internal.EnumLite { + SHUFFLE_AND_REPEAT_FUSION(17), + OPTIONALSHUFFLEANDREPEATFUSION_NOT_SET(0); + private final int value; + private OptionalShuffleAndRepeatFusionCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalShuffleAndRepeatFusionCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalShuffleAndRepeatFusionCase forNumber(int value) { + switch (value) { + case 17: return SHUFFLE_AND_REPEAT_FUSION; + case 0: return OPTIONALSHUFFLEANDREPEATFUSION_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalShuffleAndRepeatFusionCase + getOptionalShuffleAndRepeatFusionCase() { + return OptionalShuffleAndRepeatFusionCase.forNumber( + optionalShuffleAndRepeatFusionCase_); + } + + public static final int APPLY_DEFAULT_OPTIMIZATIONS_FIELD_NUMBER = 1; + /** + * bool apply_default_optimizations = 1; + */ + public boolean getApplyDefaultOptimizations() { + if (optionalApplyDefaultOptimizationsCase_ == 1) { + return (java.lang.Boolean) optionalApplyDefaultOptimizations_; + } + return false; + } + + public static final int AUTOTUNE_FIELD_NUMBER = 2; + /** + * bool autotune = 2; + */ + public boolean getAutotune() { + if (optionalAutotuneCase_ == 2) { + return (java.lang.Boolean) optionalAutotune_; + } + return false; + } + + public static final int AUTOTUNE_BUFFERS_FIELD_NUMBER = 3; + /** + * bool autotune_buffers = 3; + */ + public boolean getAutotuneBuffers() { + if (optionalAutotuneBuffersCase_ == 3) { + return (java.lang.Boolean) optionalAutotuneBuffers_; + } + return false; + } + + public static final int AUTOTUNE_CPU_BUDGET_FIELD_NUMBER = 4; + /** + * int32 autotune_cpu_budget = 4; + */ + public int getAutotuneCpuBudget() { + if (optionalAutotuneCpuBudgetCase_ == 4) { + return (java.lang.Integer) optionalAutotuneCpuBudget_; + } + return 0; + } + + public static final int AUTOTUNE_RAM_BUDGET_FIELD_NUMBER = 5; + /** + * int32 autotune_ram_budget = 5; + */ + public int getAutotuneRamBudget() { + if (optionalAutotuneRamBudgetCase_ == 5) { + return (java.lang.Integer) optionalAutotuneRamBudget_; + } + return 0; + } + + public static final int FILTER_FUSION_FIELD_NUMBER = 6; + /** + * bool filter_fusion = 6; + */ + public boolean getFilterFusion() { + if (optionalFilterFusionCase_ == 6) { + return (java.lang.Boolean) optionalFilterFusion_; + } + return false; + } + + public static final int FILTER_WITH_RANDOM_UNIFORM_FUSION_FIELD_NUMBER = 7; + /** + * bool filter_with_random_uniform_fusion = 7; + */ + public boolean getFilterWithRandomUniformFusion() { + if (optionalFilterWithRandomUniformFusionCase_ == 7) { + return (java.lang.Boolean) optionalFilterWithRandomUniformFusion_; + } + return false; + } + + public static final int HOIST_RANDOM_UNIFORM_FIELD_NUMBER = 8; + /** + * bool hoist_random_uniform = 8; + */ + public boolean getHoistRandomUniform() { + if (optionalHoistRandomUniformCase_ == 8) { + return (java.lang.Boolean) optionalHoistRandomUniform_; + } + return false; + } + + public static final int MAP_AND_BATCH_FUSION_FIELD_NUMBER = 9; + /** + * bool map_and_batch_fusion = 9; + */ + public boolean getMapAndBatchFusion() { + if (optionalMapAndBatchFusionCase_ == 9) { + return (java.lang.Boolean) optionalMapAndBatchFusion_; + } + return false; + } + + public static final int MAP_AND_FILTER_FUSION_FIELD_NUMBER = 10; + /** + * bool map_and_filter_fusion = 10; + */ + public boolean getMapAndFilterFusion() { + if (optionalMapAndFilterFusionCase_ == 10) { + return (java.lang.Boolean) optionalMapAndFilterFusion_; + } + return false; + } + + public static final int MAP_FUSION_FIELD_NUMBER = 11; + /** + * bool map_fusion = 11; + */ + public boolean getMapFusion() { + if (optionalMapFusionCase_ == 11) { + return (java.lang.Boolean) optionalMapFusion_; + } + return false; + } + + public static final int MAP_PARALLELIZATION_FIELD_NUMBER = 12; + /** + * bool map_parallelization = 12; + */ + public boolean getMapParallelization() { + if (optionalMapParallelizationCase_ == 12) { + return (java.lang.Boolean) optionalMapParallelization_; + } + return false; + } + + public static final int MAP_VECTORIZATION_FIELD_NUMBER = 13; + private org.tensorflow.proto.data.MapVectorization mapVectorization_; + /** + *
      +   * The map vectorization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public boolean hasMapVectorization() { + return mapVectorization_ != null; + } + /** + *
      +   * The map vectorization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public org.tensorflow.proto.data.MapVectorization getMapVectorization() { + return mapVectorization_ == null ? org.tensorflow.proto.data.MapVectorization.getDefaultInstance() : mapVectorization_; + } + /** + *
      +   * The map vectorization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public org.tensorflow.proto.data.MapVectorizationOrBuilder getMapVectorizationOrBuilder() { + return getMapVectorization(); + } + + public static final int NOOP_ELIMINATION_FIELD_NUMBER = 14; + /** + * bool noop_elimination = 14; + */ + public boolean getNoopElimination() { + if (optionalNoopEliminationCase_ == 14) { + return (java.lang.Boolean) optionalNoopElimination_; + } + return false; + } + + public static final int PARALLEL_BATCH_FIELD_NUMBER = 15; + /** + * bool parallel_batch = 15; + */ + public boolean getParallelBatch() { + if (optionalParallelBatchCase_ == 15) { + return (java.lang.Boolean) optionalParallelBatch_; + } + return false; + } + + public static final int REORDER_DATA_DISCARDING_OPS_FIELD_NUMBER = 16; + /** + * bool reorder_data_discarding_ops = 16; + */ + public boolean getReorderDataDiscardingOps() { + if (optionalReorderDataDiscardingOpsCase_ == 16) { + return (java.lang.Boolean) optionalReorderDataDiscardingOps_; + } + return false; + } + + public static final int SHUFFLE_AND_REPEAT_FUSION_FIELD_NUMBER = 17; + /** + * bool shuffle_and_repeat_fusion = 17; + */ + public boolean getShuffleAndRepeatFusion() { + if (optionalShuffleAndRepeatFusionCase_ == 17) { + return (java.lang.Boolean) optionalShuffleAndRepeatFusion_; + } + return false; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (optionalApplyDefaultOptimizationsCase_ == 1) { + output.writeBool( + 1, (boolean)((java.lang.Boolean) optionalApplyDefaultOptimizations_)); + } + if (optionalAutotuneCase_ == 2) { + output.writeBool( + 2, (boolean)((java.lang.Boolean) optionalAutotune_)); + } + if (optionalAutotuneBuffersCase_ == 3) { + output.writeBool( + 3, (boolean)((java.lang.Boolean) optionalAutotuneBuffers_)); + } + if (optionalAutotuneCpuBudgetCase_ == 4) { + output.writeInt32( + 4, (int)((java.lang.Integer) optionalAutotuneCpuBudget_)); + } + if (optionalAutotuneRamBudgetCase_ == 5) { + output.writeInt32( + 5, (int)((java.lang.Integer) optionalAutotuneRamBudget_)); + } + if (optionalFilterFusionCase_ == 6) { + output.writeBool( + 6, (boolean)((java.lang.Boolean) optionalFilterFusion_)); + } + if (optionalFilterWithRandomUniformFusionCase_ == 7) { + output.writeBool( + 7, (boolean)((java.lang.Boolean) optionalFilterWithRandomUniformFusion_)); + } + if (optionalHoistRandomUniformCase_ == 8) { + output.writeBool( + 8, (boolean)((java.lang.Boolean) optionalHoistRandomUniform_)); + } + if (optionalMapAndBatchFusionCase_ == 9) { + output.writeBool( + 9, (boolean)((java.lang.Boolean) optionalMapAndBatchFusion_)); + } + if (optionalMapAndFilterFusionCase_ == 10) { + output.writeBool( + 10, (boolean)((java.lang.Boolean) optionalMapAndFilterFusion_)); + } + if (optionalMapFusionCase_ == 11) { + output.writeBool( + 11, (boolean)((java.lang.Boolean) optionalMapFusion_)); + } + if (optionalMapParallelizationCase_ == 12) { + output.writeBool( + 12, (boolean)((java.lang.Boolean) optionalMapParallelization_)); + } + if (mapVectorization_ != null) { + output.writeMessage(13, getMapVectorization()); + } + if (optionalNoopEliminationCase_ == 14) { + output.writeBool( + 14, (boolean)((java.lang.Boolean) optionalNoopElimination_)); + } + if (optionalParallelBatchCase_ == 15) { + output.writeBool( + 15, (boolean)((java.lang.Boolean) optionalParallelBatch_)); + } + if (optionalReorderDataDiscardingOpsCase_ == 16) { + output.writeBool( + 16, (boolean)((java.lang.Boolean) optionalReorderDataDiscardingOps_)); + } + if (optionalShuffleAndRepeatFusionCase_ == 17) { + output.writeBool( + 17, (boolean)((java.lang.Boolean) optionalShuffleAndRepeatFusion_)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (optionalApplyDefaultOptimizationsCase_ == 1) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 1, (boolean)((java.lang.Boolean) optionalApplyDefaultOptimizations_)); + } + if (optionalAutotuneCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 2, (boolean)((java.lang.Boolean) optionalAutotune_)); + } + if (optionalAutotuneBuffersCase_ == 3) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 3, (boolean)((java.lang.Boolean) optionalAutotuneBuffers_)); + } + if (optionalAutotuneCpuBudgetCase_ == 4) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size( + 4, (int)((java.lang.Integer) optionalAutotuneCpuBudget_)); + } + if (optionalAutotuneRamBudgetCase_ == 5) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size( + 5, (int)((java.lang.Integer) optionalAutotuneRamBudget_)); + } + if (optionalFilterFusionCase_ == 6) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 6, (boolean)((java.lang.Boolean) optionalFilterFusion_)); + } + if (optionalFilterWithRandomUniformFusionCase_ == 7) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 7, (boolean)((java.lang.Boolean) optionalFilterWithRandomUniformFusion_)); + } + if (optionalHoistRandomUniformCase_ == 8) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 8, (boolean)((java.lang.Boolean) optionalHoistRandomUniform_)); + } + if (optionalMapAndBatchFusionCase_ == 9) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 9, (boolean)((java.lang.Boolean) optionalMapAndBatchFusion_)); + } + if (optionalMapAndFilterFusionCase_ == 10) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 10, (boolean)((java.lang.Boolean) optionalMapAndFilterFusion_)); + } + if (optionalMapFusionCase_ == 11) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 11, (boolean)((java.lang.Boolean) optionalMapFusion_)); + } + if (optionalMapParallelizationCase_ == 12) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 12, (boolean)((java.lang.Boolean) optionalMapParallelization_)); + } + if (mapVectorization_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(13, getMapVectorization()); + } + if (optionalNoopEliminationCase_ == 14) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 14, (boolean)((java.lang.Boolean) optionalNoopElimination_)); + } + if (optionalParallelBatchCase_ == 15) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 15, (boolean)((java.lang.Boolean) optionalParallelBatch_)); + } + if (optionalReorderDataDiscardingOpsCase_ == 16) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 16, (boolean)((java.lang.Boolean) optionalReorderDataDiscardingOps_)); + } + if (optionalShuffleAndRepeatFusionCase_ == 17) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 17, (boolean)((java.lang.Boolean) optionalShuffleAndRepeatFusion_)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.OptimizationOptions)) { + return super.equals(obj); + } + org.tensorflow.proto.data.OptimizationOptions other = (org.tensorflow.proto.data.OptimizationOptions) obj; + + if (hasMapVectorization() != other.hasMapVectorization()) return false; + if (hasMapVectorization()) { + if (!getMapVectorization() + .equals(other.getMapVectorization())) return false; + } + if (!getOptionalApplyDefaultOptimizationsCase().equals(other.getOptionalApplyDefaultOptimizationsCase())) return false; + switch (optionalApplyDefaultOptimizationsCase_) { + case 1: + if (getApplyDefaultOptimizations() + != other.getApplyDefaultOptimizations()) return false; + break; + case 0: + default: + } + if (!getOptionalAutotuneCase().equals(other.getOptionalAutotuneCase())) return false; + switch (optionalAutotuneCase_) { + case 2: + if (getAutotune() + != other.getAutotune()) return false; + break; + case 0: + default: + } + if (!getOptionalAutotuneBuffersCase().equals(other.getOptionalAutotuneBuffersCase())) return false; + switch (optionalAutotuneBuffersCase_) { + case 3: + if (getAutotuneBuffers() + != other.getAutotuneBuffers()) return false; + break; + case 0: + default: + } + if (!getOptionalAutotuneCpuBudgetCase().equals(other.getOptionalAutotuneCpuBudgetCase())) return false; + switch (optionalAutotuneCpuBudgetCase_) { + case 4: + if (getAutotuneCpuBudget() + != other.getAutotuneCpuBudget()) return false; + break; + case 0: + default: + } + if (!getOptionalAutotuneRamBudgetCase().equals(other.getOptionalAutotuneRamBudgetCase())) return false; + switch (optionalAutotuneRamBudgetCase_) { + case 5: + if (getAutotuneRamBudget() + != other.getAutotuneRamBudget()) return false; + break; + case 0: + default: + } + if (!getOptionalFilterFusionCase().equals(other.getOptionalFilterFusionCase())) return false; + switch (optionalFilterFusionCase_) { + case 6: + if (getFilterFusion() + != other.getFilterFusion()) return false; + break; + case 0: + default: + } + if (!getOptionalFilterWithRandomUniformFusionCase().equals(other.getOptionalFilterWithRandomUniformFusionCase())) return false; + switch (optionalFilterWithRandomUniformFusionCase_) { + case 7: + if (getFilterWithRandomUniformFusion() + != other.getFilterWithRandomUniformFusion()) return false; + break; + case 0: + default: + } + if (!getOptionalHoistRandomUniformCase().equals(other.getOptionalHoistRandomUniformCase())) return false; + switch (optionalHoistRandomUniformCase_) { + case 8: + if (getHoistRandomUniform() + != other.getHoistRandomUniform()) return false; + break; + case 0: + default: + } + if (!getOptionalMapAndBatchFusionCase().equals(other.getOptionalMapAndBatchFusionCase())) return false; + switch (optionalMapAndBatchFusionCase_) { + case 9: + if (getMapAndBatchFusion() + != other.getMapAndBatchFusion()) return false; + break; + case 0: + default: + } + if (!getOptionalMapAndFilterFusionCase().equals(other.getOptionalMapAndFilterFusionCase())) return false; + switch (optionalMapAndFilterFusionCase_) { + case 10: + if (getMapAndFilterFusion() + != other.getMapAndFilterFusion()) return false; + break; + case 0: + default: + } + if (!getOptionalMapFusionCase().equals(other.getOptionalMapFusionCase())) return false; + switch (optionalMapFusionCase_) { + case 11: + if (getMapFusion() + != other.getMapFusion()) return false; + break; + case 0: + default: + } + if (!getOptionalMapParallelizationCase().equals(other.getOptionalMapParallelizationCase())) return false; + switch (optionalMapParallelizationCase_) { + case 12: + if (getMapParallelization() + != other.getMapParallelization()) return false; + break; + case 0: + default: + } + if (!getOptionalNoopEliminationCase().equals(other.getOptionalNoopEliminationCase())) return false; + switch (optionalNoopEliminationCase_) { + case 14: + if (getNoopElimination() + != other.getNoopElimination()) return false; + break; + case 0: + default: + } + if (!getOptionalParallelBatchCase().equals(other.getOptionalParallelBatchCase())) return false; + switch (optionalParallelBatchCase_) { + case 15: + if (getParallelBatch() + != other.getParallelBatch()) return false; + break; + case 0: + default: + } + if (!getOptionalReorderDataDiscardingOpsCase().equals(other.getOptionalReorderDataDiscardingOpsCase())) return false; + switch (optionalReorderDataDiscardingOpsCase_) { + case 16: + if (getReorderDataDiscardingOps() + != other.getReorderDataDiscardingOps()) return false; + break; + case 0: + default: + } + if (!getOptionalShuffleAndRepeatFusionCase().equals(other.getOptionalShuffleAndRepeatFusionCase())) return false; + switch (optionalShuffleAndRepeatFusionCase_) { + case 17: + if (getShuffleAndRepeatFusion() + != other.getShuffleAndRepeatFusion()) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasMapVectorization()) { + hash = (37 * hash) + MAP_VECTORIZATION_FIELD_NUMBER; + hash = (53 * hash) + getMapVectorization().hashCode(); + } + switch (optionalApplyDefaultOptimizationsCase_) { + case 1: + hash = (37 * hash) + APPLY_DEFAULT_OPTIMIZATIONS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getApplyDefaultOptimizations()); + break; + case 0: + default: + } + switch (optionalAutotuneCase_) { + case 2: + hash = (37 * hash) + AUTOTUNE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getAutotune()); + break; + case 0: + default: + } + switch (optionalAutotuneBuffersCase_) { + case 3: + hash = (37 * hash) + AUTOTUNE_BUFFERS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getAutotuneBuffers()); + break; + case 0: + default: + } + switch (optionalAutotuneCpuBudgetCase_) { + case 4: + hash = (37 * hash) + AUTOTUNE_CPU_BUDGET_FIELD_NUMBER; + hash = (53 * hash) + getAutotuneCpuBudget(); + break; + case 0: + default: + } + switch (optionalAutotuneRamBudgetCase_) { + case 5: + hash = (37 * hash) + AUTOTUNE_RAM_BUDGET_FIELD_NUMBER; + hash = (53 * hash) + getAutotuneRamBudget(); + break; + case 0: + default: + } + switch (optionalFilterFusionCase_) { + case 6: + hash = (37 * hash) + FILTER_FUSION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getFilterFusion()); + break; + case 0: + default: + } + switch (optionalFilterWithRandomUniformFusionCase_) { + case 7: + hash = (37 * hash) + FILTER_WITH_RANDOM_UNIFORM_FUSION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getFilterWithRandomUniformFusion()); + break; + case 0: + default: + } + switch (optionalHoistRandomUniformCase_) { + case 8: + hash = (37 * hash) + HOIST_RANDOM_UNIFORM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getHoistRandomUniform()); + break; + case 0: + default: + } + switch (optionalMapAndBatchFusionCase_) { + case 9: + hash = (37 * hash) + MAP_AND_BATCH_FUSION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getMapAndBatchFusion()); + break; + case 0: + default: + } + switch (optionalMapAndFilterFusionCase_) { + case 10: + hash = (37 * hash) + MAP_AND_FILTER_FUSION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getMapAndFilterFusion()); + break; + case 0: + default: + } + switch (optionalMapFusionCase_) { + case 11: + hash = (37 * hash) + MAP_FUSION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getMapFusion()); + break; + case 0: + default: + } + switch (optionalMapParallelizationCase_) { + case 12: + hash = (37 * hash) + MAP_PARALLELIZATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getMapParallelization()); + break; + case 0: + default: + } + switch (optionalNoopEliminationCase_) { + case 14: + hash = (37 * hash) + NOOP_ELIMINATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getNoopElimination()); + break; + case 0: + default: + } + switch (optionalParallelBatchCase_) { + case 15: + hash = (37 * hash) + PARALLEL_BATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getParallelBatch()); + break; + case 0: + default: + } + switch (optionalReorderDataDiscardingOpsCase_) { + case 16: + hash = (37 * hash) + REORDER_DATA_DISCARDING_OPS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getReorderDataDiscardingOps()); + break; + case 0: + default: + } + switch (optionalShuffleAndRepeatFusionCase_) { + case 17: + hash = (37 * hash) + SHUFFLE_AND_REPEAT_FUSION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getShuffleAndRepeatFusion()); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.OptimizationOptions parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.OptimizationOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.OptimizationOptions parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.OptimizationOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.OptimizationOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code tensorflow.data.OptimizationOptions} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.OptimizationOptions) + org.tensorflow.proto.data.OptimizationOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_OptimizationOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_OptimizationOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.OptimizationOptions.class, org.tensorflow.proto.data.OptimizationOptions.Builder.class); + } + + // Construct using org.tensorflow.proto.data.OptimizationOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (mapVectorizationBuilder_ == null) { + mapVectorization_ = null; + } else { + mapVectorization_ = null; + mapVectorizationBuilder_ = null; + } + optionalApplyDefaultOptimizationsCase_ = 0; + optionalApplyDefaultOptimizations_ = null; + optionalAutotuneCase_ = 0; + optionalAutotune_ = null; + optionalAutotuneBuffersCase_ = 0; + optionalAutotuneBuffers_ = null; + optionalAutotuneCpuBudgetCase_ = 0; + optionalAutotuneCpuBudget_ = null; + optionalAutotuneRamBudgetCase_ = 0; + optionalAutotuneRamBudget_ = null; + optionalFilterFusionCase_ = 0; + optionalFilterFusion_ = null; + optionalFilterWithRandomUniformFusionCase_ = 0; + optionalFilterWithRandomUniformFusion_ = null; + optionalHoistRandomUniformCase_ = 0; + optionalHoistRandomUniform_ = null; + optionalMapAndBatchFusionCase_ = 0; + optionalMapAndBatchFusion_ = null; + optionalMapAndFilterFusionCase_ = 0; + optionalMapAndFilterFusion_ = null; + optionalMapFusionCase_ = 0; + optionalMapFusion_ = null; + optionalMapParallelizationCase_ = 0; + optionalMapParallelization_ = null; + optionalNoopEliminationCase_ = 0; + optionalNoopElimination_ = null; + optionalParallelBatchCase_ = 0; + optionalParallelBatch_ = null; + optionalReorderDataDiscardingOpsCase_ = 0; + optionalReorderDataDiscardingOps_ = null; + optionalShuffleAndRepeatFusionCase_ = 0; + optionalShuffleAndRepeatFusion_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_OptimizationOptions_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.OptimizationOptions getDefaultInstanceForType() { + return org.tensorflow.proto.data.OptimizationOptions.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.OptimizationOptions build() { + org.tensorflow.proto.data.OptimizationOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.OptimizationOptions buildPartial() { + org.tensorflow.proto.data.OptimizationOptions result = new org.tensorflow.proto.data.OptimizationOptions(this); + if (optionalApplyDefaultOptimizationsCase_ == 1) { + result.optionalApplyDefaultOptimizations_ = optionalApplyDefaultOptimizations_; + } + if (optionalAutotuneCase_ == 2) { + result.optionalAutotune_ = optionalAutotune_; + } + if (optionalAutotuneBuffersCase_ == 3) { + result.optionalAutotuneBuffers_ = optionalAutotuneBuffers_; + } + if (optionalAutotuneCpuBudgetCase_ == 4) { + result.optionalAutotuneCpuBudget_ = optionalAutotuneCpuBudget_; + } + if (optionalAutotuneRamBudgetCase_ == 5) { + result.optionalAutotuneRamBudget_ = optionalAutotuneRamBudget_; + } + if (optionalFilterFusionCase_ == 6) { + result.optionalFilterFusion_ = optionalFilterFusion_; + } + if (optionalFilterWithRandomUniformFusionCase_ == 7) { + result.optionalFilterWithRandomUniformFusion_ = optionalFilterWithRandomUniformFusion_; + } + if (optionalHoistRandomUniformCase_ == 8) { + result.optionalHoistRandomUniform_ = optionalHoistRandomUniform_; + } + if (optionalMapAndBatchFusionCase_ == 9) { + result.optionalMapAndBatchFusion_ = optionalMapAndBatchFusion_; + } + if (optionalMapAndFilterFusionCase_ == 10) { + result.optionalMapAndFilterFusion_ = optionalMapAndFilterFusion_; + } + if (optionalMapFusionCase_ == 11) { + result.optionalMapFusion_ = optionalMapFusion_; + } + if (optionalMapParallelizationCase_ == 12) { + result.optionalMapParallelization_ = optionalMapParallelization_; + } + if (mapVectorizationBuilder_ == null) { + result.mapVectorization_ = mapVectorization_; + } else { + result.mapVectorization_ = mapVectorizationBuilder_.build(); + } + if (optionalNoopEliminationCase_ == 14) { + result.optionalNoopElimination_ = optionalNoopElimination_; + } + if (optionalParallelBatchCase_ == 15) { + result.optionalParallelBatch_ = optionalParallelBatch_; + } + if (optionalReorderDataDiscardingOpsCase_ == 16) { + result.optionalReorderDataDiscardingOps_ = optionalReorderDataDiscardingOps_; + } + if (optionalShuffleAndRepeatFusionCase_ == 17) { + result.optionalShuffleAndRepeatFusion_ = optionalShuffleAndRepeatFusion_; + } + result.optionalApplyDefaultOptimizationsCase_ = optionalApplyDefaultOptimizationsCase_; + result.optionalAutotuneCase_ = optionalAutotuneCase_; + result.optionalAutotuneBuffersCase_ = optionalAutotuneBuffersCase_; + result.optionalAutotuneCpuBudgetCase_ = optionalAutotuneCpuBudgetCase_; + result.optionalAutotuneRamBudgetCase_ = optionalAutotuneRamBudgetCase_; + result.optionalFilterFusionCase_ = optionalFilterFusionCase_; + result.optionalFilterWithRandomUniformFusionCase_ = optionalFilterWithRandomUniformFusionCase_; + result.optionalHoistRandomUniformCase_ = optionalHoistRandomUniformCase_; + result.optionalMapAndBatchFusionCase_ = optionalMapAndBatchFusionCase_; + result.optionalMapAndFilterFusionCase_ = optionalMapAndFilterFusionCase_; + result.optionalMapFusionCase_ = optionalMapFusionCase_; + result.optionalMapParallelizationCase_ = optionalMapParallelizationCase_; + result.optionalNoopEliminationCase_ = optionalNoopEliminationCase_; + result.optionalParallelBatchCase_ = optionalParallelBatchCase_; + result.optionalReorderDataDiscardingOpsCase_ = optionalReorderDataDiscardingOpsCase_; + result.optionalShuffleAndRepeatFusionCase_ = optionalShuffleAndRepeatFusionCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.OptimizationOptions) { + return mergeFrom((org.tensorflow.proto.data.OptimizationOptions)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.OptimizationOptions other) { + if (other == org.tensorflow.proto.data.OptimizationOptions.getDefaultInstance()) return this; + if (other.hasMapVectorization()) { + mergeMapVectorization(other.getMapVectorization()); + } + switch (other.getOptionalApplyDefaultOptimizationsCase()) { + case APPLY_DEFAULT_OPTIMIZATIONS: { + setApplyDefaultOptimizations(other.getApplyDefaultOptimizations()); + break; + } + case OPTIONALAPPLYDEFAULTOPTIMIZATIONS_NOT_SET: { + break; + } + } + switch (other.getOptionalAutotuneCase()) { + case AUTOTUNE: { + setAutotune(other.getAutotune()); + break; + } + case OPTIONALAUTOTUNE_NOT_SET: { + break; + } + } + switch (other.getOptionalAutotuneBuffersCase()) { + case AUTOTUNE_BUFFERS: { + setAutotuneBuffers(other.getAutotuneBuffers()); + break; + } + case OPTIONALAUTOTUNEBUFFERS_NOT_SET: { + break; + } + } + switch (other.getOptionalAutotuneCpuBudgetCase()) { + case AUTOTUNE_CPU_BUDGET: { + setAutotuneCpuBudget(other.getAutotuneCpuBudget()); + break; + } + case OPTIONALAUTOTUNECPUBUDGET_NOT_SET: { + break; + } + } + switch (other.getOptionalAutotuneRamBudgetCase()) { + case AUTOTUNE_RAM_BUDGET: { + setAutotuneRamBudget(other.getAutotuneRamBudget()); + break; + } + case OPTIONALAUTOTUNERAMBUDGET_NOT_SET: { + break; + } + } + switch (other.getOptionalFilterFusionCase()) { + case FILTER_FUSION: { + setFilterFusion(other.getFilterFusion()); + break; + } + case OPTIONALFILTERFUSION_NOT_SET: { + break; + } + } + switch (other.getOptionalFilterWithRandomUniformFusionCase()) { + case FILTER_WITH_RANDOM_UNIFORM_FUSION: { + setFilterWithRandomUniformFusion(other.getFilterWithRandomUniformFusion()); + break; + } + case OPTIONALFILTERWITHRANDOMUNIFORMFUSION_NOT_SET: { + break; + } + } + switch (other.getOptionalHoistRandomUniformCase()) { + case HOIST_RANDOM_UNIFORM: { + setHoistRandomUniform(other.getHoistRandomUniform()); + break; + } + case OPTIONALHOISTRANDOMUNIFORM_NOT_SET: { + break; + } + } + switch (other.getOptionalMapAndBatchFusionCase()) { + case MAP_AND_BATCH_FUSION: { + setMapAndBatchFusion(other.getMapAndBatchFusion()); + break; + } + case OPTIONALMAPANDBATCHFUSION_NOT_SET: { + break; + } + } + switch (other.getOptionalMapAndFilterFusionCase()) { + case MAP_AND_FILTER_FUSION: { + setMapAndFilterFusion(other.getMapAndFilterFusion()); + break; + } + case OPTIONALMAPANDFILTERFUSION_NOT_SET: { + break; + } + } + switch (other.getOptionalMapFusionCase()) { + case MAP_FUSION: { + setMapFusion(other.getMapFusion()); + break; + } + case OPTIONALMAPFUSION_NOT_SET: { + break; + } + } + switch (other.getOptionalMapParallelizationCase()) { + case MAP_PARALLELIZATION: { + setMapParallelization(other.getMapParallelization()); + break; + } + case OPTIONALMAPPARALLELIZATION_NOT_SET: { + break; + } + } + switch (other.getOptionalNoopEliminationCase()) { + case NOOP_ELIMINATION: { + setNoopElimination(other.getNoopElimination()); + break; + } + case OPTIONALNOOPELIMINATION_NOT_SET: { + break; + } + } + switch (other.getOptionalParallelBatchCase()) { + case PARALLEL_BATCH: { + setParallelBatch(other.getParallelBatch()); + break; + } + case OPTIONALPARALLELBATCH_NOT_SET: { + break; + } + } + switch (other.getOptionalReorderDataDiscardingOpsCase()) { + case REORDER_DATA_DISCARDING_OPS: { + setReorderDataDiscardingOps(other.getReorderDataDiscardingOps()); + break; + } + case OPTIONALREORDERDATADISCARDINGOPS_NOT_SET: { + break; + } + } + switch (other.getOptionalShuffleAndRepeatFusionCase()) { + case SHUFFLE_AND_REPEAT_FUSION: { + setShuffleAndRepeatFusion(other.getShuffleAndRepeatFusion()); + break; + } + case OPTIONALSHUFFLEANDREPEATFUSION_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.OptimizationOptions parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.OptimizationOptions) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int optionalApplyDefaultOptimizationsCase_ = 0; + private java.lang.Object optionalApplyDefaultOptimizations_; + public OptionalApplyDefaultOptimizationsCase + getOptionalApplyDefaultOptimizationsCase() { + return OptionalApplyDefaultOptimizationsCase.forNumber( + optionalApplyDefaultOptimizationsCase_); + } + + public Builder clearOptionalApplyDefaultOptimizations() { + optionalApplyDefaultOptimizationsCase_ = 0; + optionalApplyDefaultOptimizations_ = null; + onChanged(); + return this; + } + + private int optionalAutotuneCase_ = 0; + private java.lang.Object optionalAutotune_; + public OptionalAutotuneCase + getOptionalAutotuneCase() { + return OptionalAutotuneCase.forNumber( + optionalAutotuneCase_); + } + + public Builder clearOptionalAutotune() { + optionalAutotuneCase_ = 0; + optionalAutotune_ = null; + onChanged(); + return this; + } + + private int optionalAutotuneBuffersCase_ = 0; + private java.lang.Object optionalAutotuneBuffers_; + public OptionalAutotuneBuffersCase + getOptionalAutotuneBuffersCase() { + return OptionalAutotuneBuffersCase.forNumber( + optionalAutotuneBuffersCase_); + } + + public Builder clearOptionalAutotuneBuffers() { + optionalAutotuneBuffersCase_ = 0; + optionalAutotuneBuffers_ = null; + onChanged(); + return this; + } + + private int optionalAutotuneCpuBudgetCase_ = 0; + private java.lang.Object optionalAutotuneCpuBudget_; + public OptionalAutotuneCpuBudgetCase + getOptionalAutotuneCpuBudgetCase() { + return OptionalAutotuneCpuBudgetCase.forNumber( + optionalAutotuneCpuBudgetCase_); + } + + public Builder clearOptionalAutotuneCpuBudget() { + optionalAutotuneCpuBudgetCase_ = 0; + optionalAutotuneCpuBudget_ = null; + onChanged(); + return this; + } + + private int optionalAutotuneRamBudgetCase_ = 0; + private java.lang.Object optionalAutotuneRamBudget_; + public OptionalAutotuneRamBudgetCase + getOptionalAutotuneRamBudgetCase() { + return OptionalAutotuneRamBudgetCase.forNumber( + optionalAutotuneRamBudgetCase_); + } + + public Builder clearOptionalAutotuneRamBudget() { + optionalAutotuneRamBudgetCase_ = 0; + optionalAutotuneRamBudget_ = null; + onChanged(); + return this; + } + + private int optionalFilterFusionCase_ = 0; + private java.lang.Object optionalFilterFusion_; + public OptionalFilterFusionCase + getOptionalFilterFusionCase() { + return OptionalFilterFusionCase.forNumber( + optionalFilterFusionCase_); + } + + public Builder clearOptionalFilterFusion() { + optionalFilterFusionCase_ = 0; + optionalFilterFusion_ = null; + onChanged(); + return this; + } + + private int optionalFilterWithRandomUniformFusionCase_ = 0; + private java.lang.Object optionalFilterWithRandomUniformFusion_; + public OptionalFilterWithRandomUniformFusionCase + getOptionalFilterWithRandomUniformFusionCase() { + return OptionalFilterWithRandomUniformFusionCase.forNumber( + optionalFilterWithRandomUniformFusionCase_); + } + + public Builder clearOptionalFilterWithRandomUniformFusion() { + optionalFilterWithRandomUniformFusionCase_ = 0; + optionalFilterWithRandomUniformFusion_ = null; + onChanged(); + return this; + } + + private int optionalHoistRandomUniformCase_ = 0; + private java.lang.Object optionalHoistRandomUniform_; + public OptionalHoistRandomUniformCase + getOptionalHoistRandomUniformCase() { + return OptionalHoistRandomUniformCase.forNumber( + optionalHoistRandomUniformCase_); + } + + public Builder clearOptionalHoistRandomUniform() { + optionalHoistRandomUniformCase_ = 0; + optionalHoistRandomUniform_ = null; + onChanged(); + return this; + } + + private int optionalMapAndBatchFusionCase_ = 0; + private java.lang.Object optionalMapAndBatchFusion_; + public OptionalMapAndBatchFusionCase + getOptionalMapAndBatchFusionCase() { + return OptionalMapAndBatchFusionCase.forNumber( + optionalMapAndBatchFusionCase_); + } + + public Builder clearOptionalMapAndBatchFusion() { + optionalMapAndBatchFusionCase_ = 0; + optionalMapAndBatchFusion_ = null; + onChanged(); + return this; + } + + private int optionalMapAndFilterFusionCase_ = 0; + private java.lang.Object optionalMapAndFilterFusion_; + public OptionalMapAndFilterFusionCase + getOptionalMapAndFilterFusionCase() { + return OptionalMapAndFilterFusionCase.forNumber( + optionalMapAndFilterFusionCase_); + } + + public Builder clearOptionalMapAndFilterFusion() { + optionalMapAndFilterFusionCase_ = 0; + optionalMapAndFilterFusion_ = null; + onChanged(); + return this; + } + + private int optionalMapFusionCase_ = 0; + private java.lang.Object optionalMapFusion_; + public OptionalMapFusionCase + getOptionalMapFusionCase() { + return OptionalMapFusionCase.forNumber( + optionalMapFusionCase_); + } + + public Builder clearOptionalMapFusion() { + optionalMapFusionCase_ = 0; + optionalMapFusion_ = null; + onChanged(); + return this; + } + + private int optionalMapParallelizationCase_ = 0; + private java.lang.Object optionalMapParallelization_; + public OptionalMapParallelizationCase + getOptionalMapParallelizationCase() { + return OptionalMapParallelizationCase.forNumber( + optionalMapParallelizationCase_); + } + + public Builder clearOptionalMapParallelization() { + optionalMapParallelizationCase_ = 0; + optionalMapParallelization_ = null; + onChanged(); + return this; + } + + private int optionalNoopEliminationCase_ = 0; + private java.lang.Object optionalNoopElimination_; + public OptionalNoopEliminationCase + getOptionalNoopEliminationCase() { + return OptionalNoopEliminationCase.forNumber( + optionalNoopEliminationCase_); + } + + public Builder clearOptionalNoopElimination() { + optionalNoopEliminationCase_ = 0; + optionalNoopElimination_ = null; + onChanged(); + return this; + } + + private int optionalParallelBatchCase_ = 0; + private java.lang.Object optionalParallelBatch_; + public OptionalParallelBatchCase + getOptionalParallelBatchCase() { + return OptionalParallelBatchCase.forNumber( + optionalParallelBatchCase_); + } + + public Builder clearOptionalParallelBatch() { + optionalParallelBatchCase_ = 0; + optionalParallelBatch_ = null; + onChanged(); + return this; + } + + private int optionalReorderDataDiscardingOpsCase_ = 0; + private java.lang.Object optionalReorderDataDiscardingOps_; + public OptionalReorderDataDiscardingOpsCase + getOptionalReorderDataDiscardingOpsCase() { + return OptionalReorderDataDiscardingOpsCase.forNumber( + optionalReorderDataDiscardingOpsCase_); + } + + public Builder clearOptionalReorderDataDiscardingOps() { + optionalReorderDataDiscardingOpsCase_ = 0; + optionalReorderDataDiscardingOps_ = null; + onChanged(); + return this; + } + + private int optionalShuffleAndRepeatFusionCase_ = 0; + private java.lang.Object optionalShuffleAndRepeatFusion_; + public OptionalShuffleAndRepeatFusionCase + getOptionalShuffleAndRepeatFusionCase() { + return OptionalShuffleAndRepeatFusionCase.forNumber( + optionalShuffleAndRepeatFusionCase_); + } + + public Builder clearOptionalShuffleAndRepeatFusion() { + optionalShuffleAndRepeatFusionCase_ = 0; + optionalShuffleAndRepeatFusion_ = null; + onChanged(); + return this; + } + + + /** + * bool apply_default_optimizations = 1; + */ + public boolean getApplyDefaultOptimizations() { + if (optionalApplyDefaultOptimizationsCase_ == 1) { + return (java.lang.Boolean) optionalApplyDefaultOptimizations_; + } + return false; + } + /** + * bool apply_default_optimizations = 1; + */ + public Builder setApplyDefaultOptimizations(boolean value) { + optionalApplyDefaultOptimizationsCase_ = 1; + optionalApplyDefaultOptimizations_ = value; + onChanged(); + return this; + } + /** + * bool apply_default_optimizations = 1; + */ + public Builder clearApplyDefaultOptimizations() { + if (optionalApplyDefaultOptimizationsCase_ == 1) { + optionalApplyDefaultOptimizationsCase_ = 0; + optionalApplyDefaultOptimizations_ = null; + onChanged(); + } + return this; + } + + /** + * bool autotune = 2; + */ + public boolean getAutotune() { + if (optionalAutotuneCase_ == 2) { + return (java.lang.Boolean) optionalAutotune_; + } + return false; + } + /** + * bool autotune = 2; + */ + public Builder setAutotune(boolean value) { + optionalAutotuneCase_ = 2; + optionalAutotune_ = value; + onChanged(); + return this; + } + /** + * bool autotune = 2; + */ + public Builder clearAutotune() { + if (optionalAutotuneCase_ == 2) { + optionalAutotuneCase_ = 0; + optionalAutotune_ = null; + onChanged(); + } + return this; + } + + /** + * bool autotune_buffers = 3; + */ + public boolean getAutotuneBuffers() { + if (optionalAutotuneBuffersCase_ == 3) { + return (java.lang.Boolean) optionalAutotuneBuffers_; + } + return false; + } + /** + * bool autotune_buffers = 3; + */ + public Builder setAutotuneBuffers(boolean value) { + optionalAutotuneBuffersCase_ = 3; + optionalAutotuneBuffers_ = value; + onChanged(); + return this; + } + /** + * bool autotune_buffers = 3; + */ + public Builder clearAutotuneBuffers() { + if (optionalAutotuneBuffersCase_ == 3) { + optionalAutotuneBuffersCase_ = 0; + optionalAutotuneBuffers_ = null; + onChanged(); + } + return this; + } + + /** + * int32 autotune_cpu_budget = 4; + */ + public int getAutotuneCpuBudget() { + if (optionalAutotuneCpuBudgetCase_ == 4) { + return (java.lang.Integer) optionalAutotuneCpuBudget_; + } + return 0; + } + /** + * int32 autotune_cpu_budget = 4; + */ + public Builder setAutotuneCpuBudget(int value) { + optionalAutotuneCpuBudgetCase_ = 4; + optionalAutotuneCpuBudget_ = value; + onChanged(); + return this; + } + /** + * int32 autotune_cpu_budget = 4; + */ + public Builder clearAutotuneCpuBudget() { + if (optionalAutotuneCpuBudgetCase_ == 4) { + optionalAutotuneCpuBudgetCase_ = 0; + optionalAutotuneCpuBudget_ = null; + onChanged(); + } + return this; + } + + /** + * int32 autotune_ram_budget = 5; + */ + public int getAutotuneRamBudget() { + if (optionalAutotuneRamBudgetCase_ == 5) { + return (java.lang.Integer) optionalAutotuneRamBudget_; + } + return 0; + } + /** + * int32 autotune_ram_budget = 5; + */ + public Builder setAutotuneRamBudget(int value) { + optionalAutotuneRamBudgetCase_ = 5; + optionalAutotuneRamBudget_ = value; + onChanged(); + return this; + } + /** + * int32 autotune_ram_budget = 5; + */ + public Builder clearAutotuneRamBudget() { + if (optionalAutotuneRamBudgetCase_ == 5) { + optionalAutotuneRamBudgetCase_ = 0; + optionalAutotuneRamBudget_ = null; + onChanged(); + } + return this; + } + + /** + * bool filter_fusion = 6; + */ + public boolean getFilterFusion() { + if (optionalFilterFusionCase_ == 6) { + return (java.lang.Boolean) optionalFilterFusion_; + } + return false; + } + /** + * bool filter_fusion = 6; + */ + public Builder setFilterFusion(boolean value) { + optionalFilterFusionCase_ = 6; + optionalFilterFusion_ = value; + onChanged(); + return this; + } + /** + * bool filter_fusion = 6; + */ + public Builder clearFilterFusion() { + if (optionalFilterFusionCase_ == 6) { + optionalFilterFusionCase_ = 0; + optionalFilterFusion_ = null; + onChanged(); + } + return this; + } + + /** + * bool filter_with_random_uniform_fusion = 7; + */ + public boolean getFilterWithRandomUniformFusion() { + if (optionalFilterWithRandomUniformFusionCase_ == 7) { + return (java.lang.Boolean) optionalFilterWithRandomUniformFusion_; + } + return false; + } + /** + * bool filter_with_random_uniform_fusion = 7; + */ + public Builder setFilterWithRandomUniformFusion(boolean value) { + optionalFilterWithRandomUniformFusionCase_ = 7; + optionalFilterWithRandomUniformFusion_ = value; + onChanged(); + return this; + } + /** + * bool filter_with_random_uniform_fusion = 7; + */ + public Builder clearFilterWithRandomUniformFusion() { + if (optionalFilterWithRandomUniformFusionCase_ == 7) { + optionalFilterWithRandomUniformFusionCase_ = 0; + optionalFilterWithRandomUniformFusion_ = null; + onChanged(); + } + return this; + } + + /** + * bool hoist_random_uniform = 8; + */ + public boolean getHoistRandomUniform() { + if (optionalHoistRandomUniformCase_ == 8) { + return (java.lang.Boolean) optionalHoistRandomUniform_; + } + return false; + } + /** + * bool hoist_random_uniform = 8; + */ + public Builder setHoistRandomUniform(boolean value) { + optionalHoistRandomUniformCase_ = 8; + optionalHoistRandomUniform_ = value; + onChanged(); + return this; + } + /** + * bool hoist_random_uniform = 8; + */ + public Builder clearHoistRandomUniform() { + if (optionalHoistRandomUniformCase_ == 8) { + optionalHoistRandomUniformCase_ = 0; + optionalHoistRandomUniform_ = null; + onChanged(); + } + return this; + } + + /** + * bool map_and_batch_fusion = 9; + */ + public boolean getMapAndBatchFusion() { + if (optionalMapAndBatchFusionCase_ == 9) { + return (java.lang.Boolean) optionalMapAndBatchFusion_; + } + return false; + } + /** + * bool map_and_batch_fusion = 9; + */ + public Builder setMapAndBatchFusion(boolean value) { + optionalMapAndBatchFusionCase_ = 9; + optionalMapAndBatchFusion_ = value; + onChanged(); + return this; + } + /** + * bool map_and_batch_fusion = 9; + */ + public Builder clearMapAndBatchFusion() { + if (optionalMapAndBatchFusionCase_ == 9) { + optionalMapAndBatchFusionCase_ = 0; + optionalMapAndBatchFusion_ = null; + onChanged(); + } + return this; + } + + /** + * bool map_and_filter_fusion = 10; + */ + public boolean getMapAndFilterFusion() { + if (optionalMapAndFilterFusionCase_ == 10) { + return (java.lang.Boolean) optionalMapAndFilterFusion_; + } + return false; + } + /** + * bool map_and_filter_fusion = 10; + */ + public Builder setMapAndFilterFusion(boolean value) { + optionalMapAndFilterFusionCase_ = 10; + optionalMapAndFilterFusion_ = value; + onChanged(); + return this; + } + /** + * bool map_and_filter_fusion = 10; + */ + public Builder clearMapAndFilterFusion() { + if (optionalMapAndFilterFusionCase_ == 10) { + optionalMapAndFilterFusionCase_ = 0; + optionalMapAndFilterFusion_ = null; + onChanged(); + } + return this; + } + + /** + * bool map_fusion = 11; + */ + public boolean getMapFusion() { + if (optionalMapFusionCase_ == 11) { + return (java.lang.Boolean) optionalMapFusion_; + } + return false; + } + /** + * bool map_fusion = 11; + */ + public Builder setMapFusion(boolean value) { + optionalMapFusionCase_ = 11; + optionalMapFusion_ = value; + onChanged(); + return this; + } + /** + * bool map_fusion = 11; + */ + public Builder clearMapFusion() { + if (optionalMapFusionCase_ == 11) { + optionalMapFusionCase_ = 0; + optionalMapFusion_ = null; + onChanged(); + } + return this; + } + + /** + * bool map_parallelization = 12; + */ + public boolean getMapParallelization() { + if (optionalMapParallelizationCase_ == 12) { + return (java.lang.Boolean) optionalMapParallelization_; + } + return false; + } + /** + * bool map_parallelization = 12; + */ + public Builder setMapParallelization(boolean value) { + optionalMapParallelizationCase_ = 12; + optionalMapParallelization_ = value; + onChanged(); + return this; + } + /** + * bool map_parallelization = 12; + */ + public Builder clearMapParallelization() { + if (optionalMapParallelizationCase_ == 12) { + optionalMapParallelizationCase_ = 0; + optionalMapParallelization_ = null; + onChanged(); + } + return this; + } + + private org.tensorflow.proto.data.MapVectorization mapVectorization_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.MapVectorization, org.tensorflow.proto.data.MapVectorization.Builder, org.tensorflow.proto.data.MapVectorizationOrBuilder> mapVectorizationBuilder_; + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public boolean hasMapVectorization() { + return mapVectorizationBuilder_ != null || mapVectorization_ != null; + } + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public org.tensorflow.proto.data.MapVectorization getMapVectorization() { + if (mapVectorizationBuilder_ == null) { + return mapVectorization_ == null ? org.tensorflow.proto.data.MapVectorization.getDefaultInstance() : mapVectorization_; + } else { + return mapVectorizationBuilder_.getMessage(); + } + } + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public Builder setMapVectorization(org.tensorflow.proto.data.MapVectorization value) { + if (mapVectorizationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mapVectorization_ = value; + onChanged(); + } else { + mapVectorizationBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public Builder setMapVectorization( + org.tensorflow.proto.data.MapVectorization.Builder builderForValue) { + if (mapVectorizationBuilder_ == null) { + mapVectorization_ = builderForValue.build(); + onChanged(); + } else { + mapVectorizationBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public Builder mergeMapVectorization(org.tensorflow.proto.data.MapVectorization value) { + if (mapVectorizationBuilder_ == null) { + if (mapVectorization_ != null) { + mapVectorization_ = + org.tensorflow.proto.data.MapVectorization.newBuilder(mapVectorization_).mergeFrom(value).buildPartial(); + } else { + mapVectorization_ = value; + } + onChanged(); + } else { + mapVectorizationBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public Builder clearMapVectorization() { + if (mapVectorizationBuilder_ == null) { + mapVectorization_ = null; + onChanged(); + } else { + mapVectorization_ = null; + mapVectorizationBuilder_ = null; + } + + return this; + } + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public org.tensorflow.proto.data.MapVectorization.Builder getMapVectorizationBuilder() { + + onChanged(); + return getMapVectorizationFieldBuilder().getBuilder(); + } + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + public org.tensorflow.proto.data.MapVectorizationOrBuilder getMapVectorizationOrBuilder() { + if (mapVectorizationBuilder_ != null) { + return mapVectorizationBuilder_.getMessageOrBuilder(); + } else { + return mapVectorization_ == null ? + org.tensorflow.proto.data.MapVectorization.getDefaultInstance() : mapVectorization_; + } + } + /** + *
      +     * The map vectorization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.MapVectorization, org.tensorflow.proto.data.MapVectorization.Builder, org.tensorflow.proto.data.MapVectorizationOrBuilder> + getMapVectorizationFieldBuilder() { + if (mapVectorizationBuilder_ == null) { + mapVectorizationBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.MapVectorization, org.tensorflow.proto.data.MapVectorization.Builder, org.tensorflow.proto.data.MapVectorizationOrBuilder>( + getMapVectorization(), + getParentForChildren(), + isClean()); + mapVectorization_ = null; + } + return mapVectorizationBuilder_; + } + + /** + * bool noop_elimination = 14; + */ + public boolean getNoopElimination() { + if (optionalNoopEliminationCase_ == 14) { + return (java.lang.Boolean) optionalNoopElimination_; + } + return false; + } + /** + * bool noop_elimination = 14; + */ + public Builder setNoopElimination(boolean value) { + optionalNoopEliminationCase_ = 14; + optionalNoopElimination_ = value; + onChanged(); + return this; + } + /** + * bool noop_elimination = 14; + */ + public Builder clearNoopElimination() { + if (optionalNoopEliminationCase_ == 14) { + optionalNoopEliminationCase_ = 0; + optionalNoopElimination_ = null; + onChanged(); + } + return this; + } + + /** + * bool parallel_batch = 15; + */ + public boolean getParallelBatch() { + if (optionalParallelBatchCase_ == 15) { + return (java.lang.Boolean) optionalParallelBatch_; + } + return false; + } + /** + * bool parallel_batch = 15; + */ + public Builder setParallelBatch(boolean value) { + optionalParallelBatchCase_ = 15; + optionalParallelBatch_ = value; + onChanged(); + return this; + } + /** + * bool parallel_batch = 15; + */ + public Builder clearParallelBatch() { + if (optionalParallelBatchCase_ == 15) { + optionalParallelBatchCase_ = 0; + optionalParallelBatch_ = null; + onChanged(); + } + return this; + } + + /** + * bool reorder_data_discarding_ops = 16; + */ + public boolean getReorderDataDiscardingOps() { + if (optionalReorderDataDiscardingOpsCase_ == 16) { + return (java.lang.Boolean) optionalReorderDataDiscardingOps_; + } + return false; + } + /** + * bool reorder_data_discarding_ops = 16; + */ + public Builder setReorderDataDiscardingOps(boolean value) { + optionalReorderDataDiscardingOpsCase_ = 16; + optionalReorderDataDiscardingOps_ = value; + onChanged(); + return this; + } + /** + * bool reorder_data_discarding_ops = 16; + */ + public Builder clearReorderDataDiscardingOps() { + if (optionalReorderDataDiscardingOpsCase_ == 16) { + optionalReorderDataDiscardingOpsCase_ = 0; + optionalReorderDataDiscardingOps_ = null; + onChanged(); + } + return this; + } + + /** + * bool shuffle_and_repeat_fusion = 17; + */ + public boolean getShuffleAndRepeatFusion() { + if (optionalShuffleAndRepeatFusionCase_ == 17) { + return (java.lang.Boolean) optionalShuffleAndRepeatFusion_; + } + return false; + } + /** + * bool shuffle_and_repeat_fusion = 17; + */ + public Builder setShuffleAndRepeatFusion(boolean value) { + optionalShuffleAndRepeatFusionCase_ = 17; + optionalShuffleAndRepeatFusion_ = value; + onChanged(); + return this; + } + /** + * bool shuffle_and_repeat_fusion = 17; + */ + public Builder clearShuffleAndRepeatFusion() { + if (optionalShuffleAndRepeatFusionCase_ == 17) { + optionalShuffleAndRepeatFusionCase_ = 0; + optionalShuffleAndRepeatFusion_ = null; + onChanged(); + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.OptimizationOptions) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.OptimizationOptions) + private static final org.tensorflow.proto.data.OptimizationOptions DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.OptimizationOptions(); + } + + public static org.tensorflow.proto.data.OptimizationOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OptimizationOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OptimizationOptions(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.OptimizationOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptionsOrBuilder.java new file mode 100644 index 00000000000..2197a9617fa --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptionsOrBuilder.java @@ -0,0 +1,146 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +public interface OptimizationOptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.OptimizationOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * bool apply_default_optimizations = 1; + */ + boolean getApplyDefaultOptimizations(); + + /** + * bool autotune = 2; + */ + boolean getAutotune(); + + /** + * bool autotune_buffers = 3; + */ + boolean getAutotuneBuffers(); + + /** + * int32 autotune_cpu_budget = 4; + */ + int getAutotuneCpuBudget(); + + /** + * int32 autotune_ram_budget = 5; + */ + int getAutotuneRamBudget(); + + /** + * bool filter_fusion = 6; + */ + boolean getFilterFusion(); + + /** + * bool filter_with_random_uniform_fusion = 7; + */ + boolean getFilterWithRandomUniformFusion(); + + /** + * bool hoist_random_uniform = 8; + */ + boolean getHoistRandomUniform(); + + /** + * bool map_and_batch_fusion = 9; + */ + boolean getMapAndBatchFusion(); + + /** + * bool map_and_filter_fusion = 10; + */ + boolean getMapAndFilterFusion(); + + /** + * bool map_fusion = 11; + */ + boolean getMapFusion(); + + /** + * bool map_parallelization = 12; + */ + boolean getMapParallelization(); + + /** + *
      +   * The map vectorization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + boolean hasMapVectorization(); + /** + *
      +   * The map vectorization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + org.tensorflow.proto.data.MapVectorization getMapVectorization(); + /** + *
      +   * The map vectorization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.MapVectorization map_vectorization = 13; + */ + org.tensorflow.proto.data.MapVectorizationOrBuilder getMapVectorizationOrBuilder(); + + /** + * bool noop_elimination = 14; + */ + boolean getNoopElimination(); + + /** + * bool parallel_batch = 15; + */ + boolean getParallelBatch(); + + /** + * bool reorder_data_discarding_ops = 16; + */ + boolean getReorderDataDiscardingOps(); + + /** + * bool shuffle_and_repeat_fusion = 17; + */ + boolean getShuffleAndRepeatFusion(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalApplyDefaultOptimizationsCase getOptionalApplyDefaultOptimizationsCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalAutotuneCase getOptionalAutotuneCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalAutotuneBuffersCase getOptionalAutotuneBuffersCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalAutotuneCpuBudgetCase getOptionalAutotuneCpuBudgetCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalAutotuneRamBudgetCase getOptionalAutotuneRamBudgetCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalFilterFusionCase getOptionalFilterFusionCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalFilterWithRandomUniformFusionCase getOptionalFilterWithRandomUniformFusionCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalHoistRandomUniformCase getOptionalHoistRandomUniformCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalMapAndBatchFusionCase getOptionalMapAndBatchFusionCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalMapAndFilterFusionCase getOptionalMapAndFilterFusionCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalMapFusionCase getOptionalMapFusionCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalMapParallelizationCase getOptionalMapParallelizationCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalNoopEliminationCase getOptionalNoopEliminationCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalParallelBatchCase getOptionalParallelBatchCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalReorderDataDiscardingOpsCase getOptionalReorderDataDiscardingOpsCase(); + + public org.tensorflow.proto.data.OptimizationOptions.OptionalShuffleAndRepeatFusionCase getOptionalShuffleAndRepeatFusionCase(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/Options.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/Options.java new file mode 100644 index 00000000000..b0b8481e67b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/Options.java @@ -0,0 +1,1567 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + *
      + * Message stored with Dataset objects to control how datasets are processed and
      + * optimized.
      + * 
      + * + * Protobuf type {@code tensorflow.data.Options} + */ +public final class Options extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.Options) + OptionsOrBuilder { +private static final long serialVersionUID = 0L; + // Use Options.newBuilder() to construct. + private Options(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Options() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Options(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Options( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + optionalDeterministicCase_ = 1; + optionalDeterministic_ = input.readBool(); + break; + } + case 18: { + org.tensorflow.proto.data.DistributeOptions.Builder subBuilder = null; + if (distributeOptions_ != null) { + subBuilder = distributeOptions_.toBuilder(); + } + distributeOptions_ = input.readMessage(org.tensorflow.proto.data.DistributeOptions.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(distributeOptions_); + distributeOptions_ = subBuilder.buildPartial(); + } + + break; + } + case 26: { + org.tensorflow.proto.data.OptimizationOptions.Builder subBuilder = null; + if (optimizationOptions_ != null) { + subBuilder = optimizationOptions_.toBuilder(); + } + optimizationOptions_ = input.readMessage(org.tensorflow.proto.data.OptimizationOptions.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(optimizationOptions_); + optimizationOptions_ = subBuilder.buildPartial(); + } + + break; + } + case 32: { + optionalSlackCase_ = 4; + optionalSlack_ = input.readBool(); + break; + } + case 42: { + org.tensorflow.proto.data.ThreadingOptions.Builder subBuilder = null; + if (threadingOptions_ != null) { + subBuilder = threadingOptions_.toBuilder(); + } + threadingOptions_ = input.readMessage(org.tensorflow.proto.data.ThreadingOptions.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(threadingOptions_); + threadingOptions_ = subBuilder.buildPartial(); + } + + break; + } + case 48: { + int rawValue = input.readEnum(); + optionalExternalStatePolicyCase_ = 6; + optionalExternalStatePolicy_ = rawValue; + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_Options_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_Options_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.Options.class, org.tensorflow.proto.data.Options.Builder.class); + } + + private int optionalDeterministicCase_ = 0; + private java.lang.Object optionalDeterministic_; + public enum OptionalDeterministicCase + implements com.google.protobuf.Internal.EnumLite { + DETERMINISTIC(1), + OPTIONALDETERMINISTIC_NOT_SET(0); + private final int value; + private OptionalDeterministicCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalDeterministicCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalDeterministicCase forNumber(int value) { + switch (value) { + case 1: return DETERMINISTIC; + case 0: return OPTIONALDETERMINISTIC_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalDeterministicCase + getOptionalDeterministicCase() { + return OptionalDeterministicCase.forNumber( + optionalDeterministicCase_); + } + + private int optionalSlackCase_ = 0; + private java.lang.Object optionalSlack_; + public enum OptionalSlackCase + implements com.google.protobuf.Internal.EnumLite { + SLACK(4), + OPTIONALSLACK_NOT_SET(0); + private final int value; + private OptionalSlackCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalSlackCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalSlackCase forNumber(int value) { + switch (value) { + case 4: return SLACK; + case 0: return OPTIONALSLACK_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalSlackCase + getOptionalSlackCase() { + return OptionalSlackCase.forNumber( + optionalSlackCase_); + } + + private int optionalExternalStatePolicyCase_ = 0; + private java.lang.Object optionalExternalStatePolicy_; + public enum OptionalExternalStatePolicyCase + implements com.google.protobuf.Internal.EnumLite { + EXTERNAL_STATE_POLICY(6), + OPTIONALEXTERNALSTATEPOLICY_NOT_SET(0); + private final int value; + private OptionalExternalStatePolicyCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalExternalStatePolicyCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalExternalStatePolicyCase forNumber(int value) { + switch (value) { + case 6: return EXTERNAL_STATE_POLICY; + case 0: return OPTIONALEXTERNALSTATEPOLICY_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalExternalStatePolicyCase + getOptionalExternalStatePolicyCase() { + return OptionalExternalStatePolicyCase.forNumber( + optionalExternalStatePolicyCase_); + } + + public static final int DETERMINISTIC_FIELD_NUMBER = 1; + /** + * bool deterministic = 1; + */ + public boolean getDeterministic() { + if (optionalDeterministicCase_ == 1) { + return (java.lang.Boolean) optionalDeterministic_; + } + return false; + } + + public static final int DISTRIBUTE_OPTIONS_FIELD_NUMBER = 2; + private org.tensorflow.proto.data.DistributeOptions distributeOptions_; + /** + *
      +   * The distribution strategy options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public boolean hasDistributeOptions() { + return distributeOptions_ != null; + } + /** + *
      +   * The distribution strategy options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public org.tensorflow.proto.data.DistributeOptions getDistributeOptions() { + return distributeOptions_ == null ? org.tensorflow.proto.data.DistributeOptions.getDefaultInstance() : distributeOptions_; + } + /** + *
      +   * The distribution strategy options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public org.tensorflow.proto.data.DistributeOptionsOrBuilder getDistributeOptionsOrBuilder() { + return getDistributeOptions(); + } + + public static final int OPTIMIZATION_OPTIONS_FIELD_NUMBER = 3; + private org.tensorflow.proto.data.OptimizationOptions optimizationOptions_; + /** + *
      +   * The optimization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public boolean hasOptimizationOptions() { + return optimizationOptions_ != null; + } + /** + *
      +   * The optimization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public org.tensorflow.proto.data.OptimizationOptions getOptimizationOptions() { + return optimizationOptions_ == null ? org.tensorflow.proto.data.OptimizationOptions.getDefaultInstance() : optimizationOptions_; + } + /** + *
      +   * The optimization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public org.tensorflow.proto.data.OptimizationOptionsOrBuilder getOptimizationOptionsOrBuilder() { + return getOptimizationOptions(); + } + + public static final int SLACK_FIELD_NUMBER = 4; + /** + * bool slack = 4; + */ + public boolean getSlack() { + if (optionalSlackCase_ == 4) { + return (java.lang.Boolean) optionalSlack_; + } + return false; + } + + public static final int THREADING_OPTIONS_FIELD_NUMBER = 5; + private org.tensorflow.proto.data.ThreadingOptions threadingOptions_; + /** + *
      +   * The threading options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public boolean hasThreadingOptions() { + return threadingOptions_ != null; + } + /** + *
      +   * The threading options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public org.tensorflow.proto.data.ThreadingOptions getThreadingOptions() { + return threadingOptions_ == null ? org.tensorflow.proto.data.ThreadingOptions.getDefaultInstance() : threadingOptions_; + } + /** + *
      +   * The threading options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public org.tensorflow.proto.data.ThreadingOptionsOrBuilder getThreadingOptionsOrBuilder() { + return getThreadingOptions(); + } + + public static final int EXTERNAL_STATE_POLICY_FIELD_NUMBER = 6; + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + public int getExternalStatePolicyValue() { + if (optionalExternalStatePolicyCase_ == 6) { + return (java.lang.Integer) optionalExternalStatePolicy_; + } + return 0; + } + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + public org.tensorflow.proto.data.ExternalStatePolicy getExternalStatePolicy() { + if (optionalExternalStatePolicyCase_ == 6) { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.ExternalStatePolicy result = org.tensorflow.proto.data.ExternalStatePolicy.valueOf( + (java.lang.Integer) optionalExternalStatePolicy_); + return result == null ? org.tensorflow.proto.data.ExternalStatePolicy.UNRECOGNIZED : result; + } + return org.tensorflow.proto.data.ExternalStatePolicy.POLICY_WARN; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (optionalDeterministicCase_ == 1) { + output.writeBool( + 1, (boolean)((java.lang.Boolean) optionalDeterministic_)); + } + if (distributeOptions_ != null) { + output.writeMessage(2, getDistributeOptions()); + } + if (optimizationOptions_ != null) { + output.writeMessage(3, getOptimizationOptions()); + } + if (optionalSlackCase_ == 4) { + output.writeBool( + 4, (boolean)((java.lang.Boolean) optionalSlack_)); + } + if (threadingOptions_ != null) { + output.writeMessage(5, getThreadingOptions()); + } + if (optionalExternalStatePolicyCase_ == 6) { + output.writeEnum(6, ((java.lang.Integer) optionalExternalStatePolicy_)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (optionalDeterministicCase_ == 1) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 1, (boolean)((java.lang.Boolean) optionalDeterministic_)); + } + if (distributeOptions_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getDistributeOptions()); + } + if (optimizationOptions_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getOptimizationOptions()); + } + if (optionalSlackCase_ == 4) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 4, (boolean)((java.lang.Boolean) optionalSlack_)); + } + if (threadingOptions_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, getThreadingOptions()); + } + if (optionalExternalStatePolicyCase_ == 6) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(6, ((java.lang.Integer) optionalExternalStatePolicy_)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.Options)) { + return super.equals(obj); + } + org.tensorflow.proto.data.Options other = (org.tensorflow.proto.data.Options) obj; + + if (hasDistributeOptions() != other.hasDistributeOptions()) return false; + if (hasDistributeOptions()) { + if (!getDistributeOptions() + .equals(other.getDistributeOptions())) return false; + } + if (hasOptimizationOptions() != other.hasOptimizationOptions()) return false; + if (hasOptimizationOptions()) { + if (!getOptimizationOptions() + .equals(other.getOptimizationOptions())) return false; + } + if (hasThreadingOptions() != other.hasThreadingOptions()) return false; + if (hasThreadingOptions()) { + if (!getThreadingOptions() + .equals(other.getThreadingOptions())) return false; + } + if (!getOptionalDeterministicCase().equals(other.getOptionalDeterministicCase())) return false; + switch (optionalDeterministicCase_) { + case 1: + if (getDeterministic() + != other.getDeterministic()) return false; + break; + case 0: + default: + } + if (!getOptionalSlackCase().equals(other.getOptionalSlackCase())) return false; + switch (optionalSlackCase_) { + case 4: + if (getSlack() + != other.getSlack()) return false; + break; + case 0: + default: + } + if (!getOptionalExternalStatePolicyCase().equals(other.getOptionalExternalStatePolicyCase())) return false; + switch (optionalExternalStatePolicyCase_) { + case 6: + if (getExternalStatePolicyValue() + != other.getExternalStatePolicyValue()) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasDistributeOptions()) { + hash = (37 * hash) + DISTRIBUTE_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getDistributeOptions().hashCode(); + } + if (hasOptimizationOptions()) { + hash = (37 * hash) + OPTIMIZATION_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getOptimizationOptions().hashCode(); + } + if (hasThreadingOptions()) { + hash = (37 * hash) + THREADING_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getThreadingOptions().hashCode(); + } + switch (optionalDeterministicCase_) { + case 1: + hash = (37 * hash) + DETERMINISTIC_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getDeterministic()); + break; + case 0: + default: + } + switch (optionalSlackCase_) { + case 4: + hash = (37 * hash) + SLACK_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getSlack()); + break; + case 0: + default: + } + switch (optionalExternalStatePolicyCase_) { + case 6: + hash = (37 * hash) + EXTERNAL_STATE_POLICY_FIELD_NUMBER; + hash = (53 * hash) + getExternalStatePolicyValue(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.Options parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.Options parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.Options parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.Options parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.Options parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.Options parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.Options parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.Options parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.Options parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.Options parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.Options parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.Options parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.Options prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +   * Message stored with Dataset objects to control how datasets are processed and
      +   * optimized.
      +   * 
      + * + * Protobuf type {@code tensorflow.data.Options} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.Options) + org.tensorflow.proto.data.OptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_Options_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_Options_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.Options.class, org.tensorflow.proto.data.Options.Builder.class); + } + + // Construct using org.tensorflow.proto.data.Options.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (distributeOptionsBuilder_ == null) { + distributeOptions_ = null; + } else { + distributeOptions_ = null; + distributeOptionsBuilder_ = null; + } + if (optimizationOptionsBuilder_ == null) { + optimizationOptions_ = null; + } else { + optimizationOptions_ = null; + optimizationOptionsBuilder_ = null; + } + if (threadingOptionsBuilder_ == null) { + threadingOptions_ = null; + } else { + threadingOptions_ = null; + threadingOptionsBuilder_ = null; + } + optionalDeterministicCase_ = 0; + optionalDeterministic_ = null; + optionalSlackCase_ = 0; + optionalSlack_ = null; + optionalExternalStatePolicyCase_ = 0; + optionalExternalStatePolicy_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_Options_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.Options getDefaultInstanceForType() { + return org.tensorflow.proto.data.Options.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.Options build() { + org.tensorflow.proto.data.Options result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.Options buildPartial() { + org.tensorflow.proto.data.Options result = new org.tensorflow.proto.data.Options(this); + if (optionalDeterministicCase_ == 1) { + result.optionalDeterministic_ = optionalDeterministic_; + } + if (distributeOptionsBuilder_ == null) { + result.distributeOptions_ = distributeOptions_; + } else { + result.distributeOptions_ = distributeOptionsBuilder_.build(); + } + if (optimizationOptionsBuilder_ == null) { + result.optimizationOptions_ = optimizationOptions_; + } else { + result.optimizationOptions_ = optimizationOptionsBuilder_.build(); + } + if (optionalSlackCase_ == 4) { + result.optionalSlack_ = optionalSlack_; + } + if (threadingOptionsBuilder_ == null) { + result.threadingOptions_ = threadingOptions_; + } else { + result.threadingOptions_ = threadingOptionsBuilder_.build(); + } + if (optionalExternalStatePolicyCase_ == 6) { + result.optionalExternalStatePolicy_ = optionalExternalStatePolicy_; + } + result.optionalDeterministicCase_ = optionalDeterministicCase_; + result.optionalSlackCase_ = optionalSlackCase_; + result.optionalExternalStatePolicyCase_ = optionalExternalStatePolicyCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.Options) { + return mergeFrom((org.tensorflow.proto.data.Options)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.Options other) { + if (other == org.tensorflow.proto.data.Options.getDefaultInstance()) return this; + if (other.hasDistributeOptions()) { + mergeDistributeOptions(other.getDistributeOptions()); + } + if (other.hasOptimizationOptions()) { + mergeOptimizationOptions(other.getOptimizationOptions()); + } + if (other.hasThreadingOptions()) { + mergeThreadingOptions(other.getThreadingOptions()); + } + switch (other.getOptionalDeterministicCase()) { + case DETERMINISTIC: { + setDeterministic(other.getDeterministic()); + break; + } + case OPTIONALDETERMINISTIC_NOT_SET: { + break; + } + } + switch (other.getOptionalSlackCase()) { + case SLACK: { + setSlack(other.getSlack()); + break; + } + case OPTIONALSLACK_NOT_SET: { + break; + } + } + switch (other.getOptionalExternalStatePolicyCase()) { + case EXTERNAL_STATE_POLICY: { + setExternalStatePolicyValue(other.getExternalStatePolicyValue()); + break; + } + case OPTIONALEXTERNALSTATEPOLICY_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.Options parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.Options) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int optionalDeterministicCase_ = 0; + private java.lang.Object optionalDeterministic_; + public OptionalDeterministicCase + getOptionalDeterministicCase() { + return OptionalDeterministicCase.forNumber( + optionalDeterministicCase_); + } + + public Builder clearOptionalDeterministic() { + optionalDeterministicCase_ = 0; + optionalDeterministic_ = null; + onChanged(); + return this; + } + + private int optionalSlackCase_ = 0; + private java.lang.Object optionalSlack_; + public OptionalSlackCase + getOptionalSlackCase() { + return OptionalSlackCase.forNumber( + optionalSlackCase_); + } + + public Builder clearOptionalSlack() { + optionalSlackCase_ = 0; + optionalSlack_ = null; + onChanged(); + return this; + } + + private int optionalExternalStatePolicyCase_ = 0; + private java.lang.Object optionalExternalStatePolicy_; + public OptionalExternalStatePolicyCase + getOptionalExternalStatePolicyCase() { + return OptionalExternalStatePolicyCase.forNumber( + optionalExternalStatePolicyCase_); + } + + public Builder clearOptionalExternalStatePolicy() { + optionalExternalStatePolicyCase_ = 0; + optionalExternalStatePolicy_ = null; + onChanged(); + return this; + } + + + /** + * bool deterministic = 1; + */ + public boolean getDeterministic() { + if (optionalDeterministicCase_ == 1) { + return (java.lang.Boolean) optionalDeterministic_; + } + return false; + } + /** + * bool deterministic = 1; + */ + public Builder setDeterministic(boolean value) { + optionalDeterministicCase_ = 1; + optionalDeterministic_ = value; + onChanged(); + return this; + } + /** + * bool deterministic = 1; + */ + public Builder clearDeterministic() { + if (optionalDeterministicCase_ == 1) { + optionalDeterministicCase_ = 0; + optionalDeterministic_ = null; + onChanged(); + } + return this; + } + + private org.tensorflow.proto.data.DistributeOptions distributeOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.DistributeOptions, org.tensorflow.proto.data.DistributeOptions.Builder, org.tensorflow.proto.data.DistributeOptionsOrBuilder> distributeOptionsBuilder_; + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public boolean hasDistributeOptions() { + return distributeOptionsBuilder_ != null || distributeOptions_ != null; + } + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public org.tensorflow.proto.data.DistributeOptions getDistributeOptions() { + if (distributeOptionsBuilder_ == null) { + return distributeOptions_ == null ? org.tensorflow.proto.data.DistributeOptions.getDefaultInstance() : distributeOptions_; + } else { + return distributeOptionsBuilder_.getMessage(); + } + } + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public Builder setDistributeOptions(org.tensorflow.proto.data.DistributeOptions value) { + if (distributeOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + distributeOptions_ = value; + onChanged(); + } else { + distributeOptionsBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public Builder setDistributeOptions( + org.tensorflow.proto.data.DistributeOptions.Builder builderForValue) { + if (distributeOptionsBuilder_ == null) { + distributeOptions_ = builderForValue.build(); + onChanged(); + } else { + distributeOptionsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public Builder mergeDistributeOptions(org.tensorflow.proto.data.DistributeOptions value) { + if (distributeOptionsBuilder_ == null) { + if (distributeOptions_ != null) { + distributeOptions_ = + org.tensorflow.proto.data.DistributeOptions.newBuilder(distributeOptions_).mergeFrom(value).buildPartial(); + } else { + distributeOptions_ = value; + } + onChanged(); + } else { + distributeOptionsBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public Builder clearDistributeOptions() { + if (distributeOptionsBuilder_ == null) { + distributeOptions_ = null; + onChanged(); + } else { + distributeOptions_ = null; + distributeOptionsBuilder_ = null; + } + + return this; + } + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public org.tensorflow.proto.data.DistributeOptions.Builder getDistributeOptionsBuilder() { + + onChanged(); + return getDistributeOptionsFieldBuilder().getBuilder(); + } + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + public org.tensorflow.proto.data.DistributeOptionsOrBuilder getDistributeOptionsOrBuilder() { + if (distributeOptionsBuilder_ != null) { + return distributeOptionsBuilder_.getMessageOrBuilder(); + } else { + return distributeOptions_ == null ? + org.tensorflow.proto.data.DistributeOptions.getDefaultInstance() : distributeOptions_; + } + } + /** + *
      +     * The distribution strategy options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.DistributeOptions, org.tensorflow.proto.data.DistributeOptions.Builder, org.tensorflow.proto.data.DistributeOptionsOrBuilder> + getDistributeOptionsFieldBuilder() { + if (distributeOptionsBuilder_ == null) { + distributeOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.DistributeOptions, org.tensorflow.proto.data.DistributeOptions.Builder, org.tensorflow.proto.data.DistributeOptionsOrBuilder>( + getDistributeOptions(), + getParentForChildren(), + isClean()); + distributeOptions_ = null; + } + return distributeOptionsBuilder_; + } + + private org.tensorflow.proto.data.OptimizationOptions optimizationOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.OptimizationOptions, org.tensorflow.proto.data.OptimizationOptions.Builder, org.tensorflow.proto.data.OptimizationOptionsOrBuilder> optimizationOptionsBuilder_; + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public boolean hasOptimizationOptions() { + return optimizationOptionsBuilder_ != null || optimizationOptions_ != null; + } + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public org.tensorflow.proto.data.OptimizationOptions getOptimizationOptions() { + if (optimizationOptionsBuilder_ == null) { + return optimizationOptions_ == null ? org.tensorflow.proto.data.OptimizationOptions.getDefaultInstance() : optimizationOptions_; + } else { + return optimizationOptionsBuilder_.getMessage(); + } + } + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public Builder setOptimizationOptions(org.tensorflow.proto.data.OptimizationOptions value) { + if (optimizationOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + optimizationOptions_ = value; + onChanged(); + } else { + optimizationOptionsBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public Builder setOptimizationOptions( + org.tensorflow.proto.data.OptimizationOptions.Builder builderForValue) { + if (optimizationOptionsBuilder_ == null) { + optimizationOptions_ = builderForValue.build(); + onChanged(); + } else { + optimizationOptionsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public Builder mergeOptimizationOptions(org.tensorflow.proto.data.OptimizationOptions value) { + if (optimizationOptionsBuilder_ == null) { + if (optimizationOptions_ != null) { + optimizationOptions_ = + org.tensorflow.proto.data.OptimizationOptions.newBuilder(optimizationOptions_).mergeFrom(value).buildPartial(); + } else { + optimizationOptions_ = value; + } + onChanged(); + } else { + optimizationOptionsBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public Builder clearOptimizationOptions() { + if (optimizationOptionsBuilder_ == null) { + optimizationOptions_ = null; + onChanged(); + } else { + optimizationOptions_ = null; + optimizationOptionsBuilder_ = null; + } + + return this; + } + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public org.tensorflow.proto.data.OptimizationOptions.Builder getOptimizationOptionsBuilder() { + + onChanged(); + return getOptimizationOptionsFieldBuilder().getBuilder(); + } + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + public org.tensorflow.proto.data.OptimizationOptionsOrBuilder getOptimizationOptionsOrBuilder() { + if (optimizationOptionsBuilder_ != null) { + return optimizationOptionsBuilder_.getMessageOrBuilder(); + } else { + return optimizationOptions_ == null ? + org.tensorflow.proto.data.OptimizationOptions.getDefaultInstance() : optimizationOptions_; + } + } + /** + *
      +     * The optimization options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.OptimizationOptions, org.tensorflow.proto.data.OptimizationOptions.Builder, org.tensorflow.proto.data.OptimizationOptionsOrBuilder> + getOptimizationOptionsFieldBuilder() { + if (optimizationOptionsBuilder_ == null) { + optimizationOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.OptimizationOptions, org.tensorflow.proto.data.OptimizationOptions.Builder, org.tensorflow.proto.data.OptimizationOptionsOrBuilder>( + getOptimizationOptions(), + getParentForChildren(), + isClean()); + optimizationOptions_ = null; + } + return optimizationOptionsBuilder_; + } + + /** + * bool slack = 4; + */ + public boolean getSlack() { + if (optionalSlackCase_ == 4) { + return (java.lang.Boolean) optionalSlack_; + } + return false; + } + /** + * bool slack = 4; + */ + public Builder setSlack(boolean value) { + optionalSlackCase_ = 4; + optionalSlack_ = value; + onChanged(); + return this; + } + /** + * bool slack = 4; + */ + public Builder clearSlack() { + if (optionalSlackCase_ == 4) { + optionalSlackCase_ = 0; + optionalSlack_ = null; + onChanged(); + } + return this; + } + + private org.tensorflow.proto.data.ThreadingOptions threadingOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.ThreadingOptions, org.tensorflow.proto.data.ThreadingOptions.Builder, org.tensorflow.proto.data.ThreadingOptionsOrBuilder> threadingOptionsBuilder_; + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public boolean hasThreadingOptions() { + return threadingOptionsBuilder_ != null || threadingOptions_ != null; + } + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public org.tensorflow.proto.data.ThreadingOptions getThreadingOptions() { + if (threadingOptionsBuilder_ == null) { + return threadingOptions_ == null ? org.tensorflow.proto.data.ThreadingOptions.getDefaultInstance() : threadingOptions_; + } else { + return threadingOptionsBuilder_.getMessage(); + } + } + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public Builder setThreadingOptions(org.tensorflow.proto.data.ThreadingOptions value) { + if (threadingOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + threadingOptions_ = value; + onChanged(); + } else { + threadingOptionsBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public Builder setThreadingOptions( + org.tensorflow.proto.data.ThreadingOptions.Builder builderForValue) { + if (threadingOptionsBuilder_ == null) { + threadingOptions_ = builderForValue.build(); + onChanged(); + } else { + threadingOptionsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public Builder mergeThreadingOptions(org.tensorflow.proto.data.ThreadingOptions value) { + if (threadingOptionsBuilder_ == null) { + if (threadingOptions_ != null) { + threadingOptions_ = + org.tensorflow.proto.data.ThreadingOptions.newBuilder(threadingOptions_).mergeFrom(value).buildPartial(); + } else { + threadingOptions_ = value; + } + onChanged(); + } else { + threadingOptionsBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public Builder clearThreadingOptions() { + if (threadingOptionsBuilder_ == null) { + threadingOptions_ = null; + onChanged(); + } else { + threadingOptions_ = null; + threadingOptionsBuilder_ = null; + } + + return this; + } + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public org.tensorflow.proto.data.ThreadingOptions.Builder getThreadingOptionsBuilder() { + + onChanged(); + return getThreadingOptionsFieldBuilder().getBuilder(); + } + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + public org.tensorflow.proto.data.ThreadingOptionsOrBuilder getThreadingOptionsOrBuilder() { + if (threadingOptionsBuilder_ != null) { + return threadingOptionsBuilder_.getMessageOrBuilder(); + } else { + return threadingOptions_ == null ? + org.tensorflow.proto.data.ThreadingOptions.getDefaultInstance() : threadingOptions_; + } + } + /** + *
      +     * The threading options associated with the dataset.
      +     * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.ThreadingOptions, org.tensorflow.proto.data.ThreadingOptions.Builder, org.tensorflow.proto.data.ThreadingOptionsOrBuilder> + getThreadingOptionsFieldBuilder() { + if (threadingOptionsBuilder_ == null) { + threadingOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.ThreadingOptions, org.tensorflow.proto.data.ThreadingOptions.Builder, org.tensorflow.proto.data.ThreadingOptionsOrBuilder>( + getThreadingOptions(), + getParentForChildren(), + isClean()); + threadingOptions_ = null; + } + return threadingOptionsBuilder_; + } + + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + public int getExternalStatePolicyValue() { + if (optionalExternalStatePolicyCase_ == 6) { + return ((java.lang.Integer) optionalExternalStatePolicy_).intValue(); + } + return 0; + } + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + public Builder setExternalStatePolicyValue(int value) { + optionalExternalStatePolicyCase_ = 6; + optionalExternalStatePolicy_ = value; + onChanged(); + return this; + } + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + public org.tensorflow.proto.data.ExternalStatePolicy getExternalStatePolicy() { + if (optionalExternalStatePolicyCase_ == 6) { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.ExternalStatePolicy result = org.tensorflow.proto.data.ExternalStatePolicy.valueOf( + (java.lang.Integer) optionalExternalStatePolicy_); + return result == null ? org.tensorflow.proto.data.ExternalStatePolicy.UNRECOGNIZED : result; + } + return org.tensorflow.proto.data.ExternalStatePolicy.POLICY_WARN; + } + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + public Builder setExternalStatePolicy(org.tensorflow.proto.data.ExternalStatePolicy value) { + if (value == null) { + throw new NullPointerException(); + } + optionalExternalStatePolicyCase_ = 6; + optionalExternalStatePolicy_ = value.getNumber(); + onChanged(); + return this; + } + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + public Builder clearExternalStatePolicy() { + if (optionalExternalStatePolicyCase_ == 6) { + optionalExternalStatePolicyCase_ = 0; + optionalExternalStatePolicy_ = null; + onChanged(); + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.Options) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.Options) + private static final org.tensorflow.proto.data.Options DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.Options(); + } + + public static org.tensorflow.proto.data.Options getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Options parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Options(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.Options getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptionsOrBuilder.java new file mode 100644 index 00000000000..b4f2077d1ca --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptionsOrBuilder.java @@ -0,0 +1,109 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +public interface OptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.Options) + com.google.protobuf.MessageOrBuilder { + + /** + * bool deterministic = 1; + */ + boolean getDeterministic(); + + /** + *
      +   * The distribution strategy options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + boolean hasDistributeOptions(); + /** + *
      +   * The distribution strategy options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + org.tensorflow.proto.data.DistributeOptions getDistributeOptions(); + /** + *
      +   * The distribution strategy options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.DistributeOptions distribute_options = 2; + */ + org.tensorflow.proto.data.DistributeOptionsOrBuilder getDistributeOptionsOrBuilder(); + + /** + *
      +   * The optimization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + boolean hasOptimizationOptions(); + /** + *
      +   * The optimization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + org.tensorflow.proto.data.OptimizationOptions getOptimizationOptions(); + /** + *
      +   * The optimization options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.OptimizationOptions optimization_options = 3; + */ + org.tensorflow.proto.data.OptimizationOptionsOrBuilder getOptimizationOptionsOrBuilder(); + + /** + * bool slack = 4; + */ + boolean getSlack(); + + /** + *
      +   * The threading options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + boolean hasThreadingOptions(); + /** + *
      +   * The threading options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + org.tensorflow.proto.data.ThreadingOptions getThreadingOptions(); + /** + *
      +   * The threading options associated with the dataset.
      +   * 
      + * + * .tensorflow.data.ThreadingOptions threading_options = 5; + */ + org.tensorflow.proto.data.ThreadingOptionsOrBuilder getThreadingOptionsOrBuilder(); + + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + int getExternalStatePolicyValue(); + /** + * .tensorflow.data.ExternalStatePolicy external_state_policy = 6; + */ + org.tensorflow.proto.data.ExternalStatePolicy getExternalStatePolicy(); + + public org.tensorflow.proto.data.Options.OptionalDeterministicCase getOptionalDeterministicCase(); + + public org.tensorflow.proto.data.Options.OptionalSlackCase getOptionalSlackCase(); + + public org.tensorflow.proto.data.Options.OptionalExternalStatePolicyCase getOptionalExternalStatePolicyCase(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptions.java new file mode 100644 index 00000000000..eebc5aaf459 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptions.java @@ -0,0 +1,695 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + * Protobuf type {@code tensorflow.data.ThreadingOptions} + */ +public final class ThreadingOptions extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.ThreadingOptions) + ThreadingOptionsOrBuilder { +private static final long serialVersionUID = 0L; + // Use ThreadingOptions.newBuilder() to construct. + private ThreadingOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ThreadingOptions() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ThreadingOptions(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ThreadingOptions( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + optionalMaxIntraOpParallelismCase_ = 1; + optionalMaxIntraOpParallelism_ = input.readInt32(); + break; + } + case 16: { + optionalPrivateThreadpoolSizeCase_ = 2; + optionalPrivateThreadpoolSize_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_ThreadingOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.ThreadingOptions.class, org.tensorflow.proto.data.ThreadingOptions.Builder.class); + } + + private int optionalMaxIntraOpParallelismCase_ = 0; + private java.lang.Object optionalMaxIntraOpParallelism_; + public enum OptionalMaxIntraOpParallelismCase + implements com.google.protobuf.Internal.EnumLite { + MAX_INTRA_OP_PARALLELISM(1), + OPTIONALMAXINTRAOPPARALLELISM_NOT_SET(0); + private final int value; + private OptionalMaxIntraOpParallelismCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalMaxIntraOpParallelismCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalMaxIntraOpParallelismCase forNumber(int value) { + switch (value) { + case 1: return MAX_INTRA_OP_PARALLELISM; + case 0: return OPTIONALMAXINTRAOPPARALLELISM_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalMaxIntraOpParallelismCase + getOptionalMaxIntraOpParallelismCase() { + return OptionalMaxIntraOpParallelismCase.forNumber( + optionalMaxIntraOpParallelismCase_); + } + + private int optionalPrivateThreadpoolSizeCase_ = 0; + private java.lang.Object optionalPrivateThreadpoolSize_; + public enum OptionalPrivateThreadpoolSizeCase + implements com.google.protobuf.Internal.EnumLite { + PRIVATE_THREADPOOL_SIZE(2), + OPTIONALPRIVATETHREADPOOLSIZE_NOT_SET(0); + private final int value; + private OptionalPrivateThreadpoolSizeCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalPrivateThreadpoolSizeCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalPrivateThreadpoolSizeCase forNumber(int value) { + switch (value) { + case 2: return PRIVATE_THREADPOOL_SIZE; + case 0: return OPTIONALPRIVATETHREADPOOLSIZE_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalPrivateThreadpoolSizeCase + getOptionalPrivateThreadpoolSizeCase() { + return OptionalPrivateThreadpoolSizeCase.forNumber( + optionalPrivateThreadpoolSizeCase_); + } + + public static final int MAX_INTRA_OP_PARALLELISM_FIELD_NUMBER = 1; + /** + * int32 max_intra_op_parallelism = 1; + */ + public int getMaxIntraOpParallelism() { + if (optionalMaxIntraOpParallelismCase_ == 1) { + return (java.lang.Integer) optionalMaxIntraOpParallelism_; + } + return 0; + } + + public static final int PRIVATE_THREADPOOL_SIZE_FIELD_NUMBER = 2; + /** + * int32 private_threadpool_size = 2; + */ + public int getPrivateThreadpoolSize() { + if (optionalPrivateThreadpoolSizeCase_ == 2) { + return (java.lang.Integer) optionalPrivateThreadpoolSize_; + } + return 0; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (optionalMaxIntraOpParallelismCase_ == 1) { + output.writeInt32( + 1, (int)((java.lang.Integer) optionalMaxIntraOpParallelism_)); + } + if (optionalPrivateThreadpoolSizeCase_ == 2) { + output.writeInt32( + 2, (int)((java.lang.Integer) optionalPrivateThreadpoolSize_)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (optionalMaxIntraOpParallelismCase_ == 1) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size( + 1, (int)((java.lang.Integer) optionalMaxIntraOpParallelism_)); + } + if (optionalPrivateThreadpoolSizeCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size( + 2, (int)((java.lang.Integer) optionalPrivateThreadpoolSize_)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.ThreadingOptions)) { + return super.equals(obj); + } + org.tensorflow.proto.data.ThreadingOptions other = (org.tensorflow.proto.data.ThreadingOptions) obj; + + if (!getOptionalMaxIntraOpParallelismCase().equals(other.getOptionalMaxIntraOpParallelismCase())) return false; + switch (optionalMaxIntraOpParallelismCase_) { + case 1: + if (getMaxIntraOpParallelism() + != other.getMaxIntraOpParallelism()) return false; + break; + case 0: + default: + } + if (!getOptionalPrivateThreadpoolSizeCase().equals(other.getOptionalPrivateThreadpoolSizeCase())) return false; + switch (optionalPrivateThreadpoolSizeCase_) { + case 2: + if (getPrivateThreadpoolSize() + != other.getPrivateThreadpoolSize()) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (optionalMaxIntraOpParallelismCase_) { + case 1: + hash = (37 * hash) + MAX_INTRA_OP_PARALLELISM_FIELD_NUMBER; + hash = (53 * hash) + getMaxIntraOpParallelism(); + break; + case 0: + default: + } + switch (optionalPrivateThreadpoolSizeCase_) { + case 2: + hash = (37 * hash) + PRIVATE_THREADPOOL_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPrivateThreadpoolSize(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.ThreadingOptions parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.ThreadingOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.ThreadingOptions parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.ThreadingOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.ThreadingOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code tensorflow.data.ThreadingOptions} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.ThreadingOptions) + org.tensorflow.proto.data.ThreadingOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_ThreadingOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.ThreadingOptions.class, org.tensorflow.proto.data.ThreadingOptions.Builder.class); + } + + // Construct using org.tensorflow.proto.data.ThreadingOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + optionalMaxIntraOpParallelismCase_ = 0; + optionalMaxIntraOpParallelism_ = null; + optionalPrivateThreadpoolSizeCase_ = 0; + optionalPrivateThreadpoolSize_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_ThreadingOptions_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.ThreadingOptions getDefaultInstanceForType() { + return org.tensorflow.proto.data.ThreadingOptions.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.ThreadingOptions build() { + org.tensorflow.proto.data.ThreadingOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.ThreadingOptions buildPartial() { + org.tensorflow.proto.data.ThreadingOptions result = new org.tensorflow.proto.data.ThreadingOptions(this); + if (optionalMaxIntraOpParallelismCase_ == 1) { + result.optionalMaxIntraOpParallelism_ = optionalMaxIntraOpParallelism_; + } + if (optionalPrivateThreadpoolSizeCase_ == 2) { + result.optionalPrivateThreadpoolSize_ = optionalPrivateThreadpoolSize_; + } + result.optionalMaxIntraOpParallelismCase_ = optionalMaxIntraOpParallelismCase_; + result.optionalPrivateThreadpoolSizeCase_ = optionalPrivateThreadpoolSizeCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.ThreadingOptions) { + return mergeFrom((org.tensorflow.proto.data.ThreadingOptions)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.ThreadingOptions other) { + if (other == org.tensorflow.proto.data.ThreadingOptions.getDefaultInstance()) return this; + switch (other.getOptionalMaxIntraOpParallelismCase()) { + case MAX_INTRA_OP_PARALLELISM: { + setMaxIntraOpParallelism(other.getMaxIntraOpParallelism()); + break; + } + case OPTIONALMAXINTRAOPPARALLELISM_NOT_SET: { + break; + } + } + switch (other.getOptionalPrivateThreadpoolSizeCase()) { + case PRIVATE_THREADPOOL_SIZE: { + setPrivateThreadpoolSize(other.getPrivateThreadpoolSize()); + break; + } + case OPTIONALPRIVATETHREADPOOLSIZE_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.ThreadingOptions parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.ThreadingOptions) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int optionalMaxIntraOpParallelismCase_ = 0; + private java.lang.Object optionalMaxIntraOpParallelism_; + public OptionalMaxIntraOpParallelismCase + getOptionalMaxIntraOpParallelismCase() { + return OptionalMaxIntraOpParallelismCase.forNumber( + optionalMaxIntraOpParallelismCase_); + } + + public Builder clearOptionalMaxIntraOpParallelism() { + optionalMaxIntraOpParallelismCase_ = 0; + optionalMaxIntraOpParallelism_ = null; + onChanged(); + return this; + } + + private int optionalPrivateThreadpoolSizeCase_ = 0; + private java.lang.Object optionalPrivateThreadpoolSize_; + public OptionalPrivateThreadpoolSizeCase + getOptionalPrivateThreadpoolSizeCase() { + return OptionalPrivateThreadpoolSizeCase.forNumber( + optionalPrivateThreadpoolSizeCase_); + } + + public Builder clearOptionalPrivateThreadpoolSize() { + optionalPrivateThreadpoolSizeCase_ = 0; + optionalPrivateThreadpoolSize_ = null; + onChanged(); + return this; + } + + + /** + * int32 max_intra_op_parallelism = 1; + */ + public int getMaxIntraOpParallelism() { + if (optionalMaxIntraOpParallelismCase_ == 1) { + return (java.lang.Integer) optionalMaxIntraOpParallelism_; + } + return 0; + } + /** + * int32 max_intra_op_parallelism = 1; + */ + public Builder setMaxIntraOpParallelism(int value) { + optionalMaxIntraOpParallelismCase_ = 1; + optionalMaxIntraOpParallelism_ = value; + onChanged(); + return this; + } + /** + * int32 max_intra_op_parallelism = 1; + */ + public Builder clearMaxIntraOpParallelism() { + if (optionalMaxIntraOpParallelismCase_ == 1) { + optionalMaxIntraOpParallelismCase_ = 0; + optionalMaxIntraOpParallelism_ = null; + onChanged(); + } + return this; + } + + /** + * int32 private_threadpool_size = 2; + */ + public int getPrivateThreadpoolSize() { + if (optionalPrivateThreadpoolSizeCase_ == 2) { + return (java.lang.Integer) optionalPrivateThreadpoolSize_; + } + return 0; + } + /** + * int32 private_threadpool_size = 2; + */ + public Builder setPrivateThreadpoolSize(int value) { + optionalPrivateThreadpoolSizeCase_ = 2; + optionalPrivateThreadpoolSize_ = value; + onChanged(); + return this; + } + /** + * int32 private_threadpool_size = 2; + */ + public Builder clearPrivateThreadpoolSize() { + if (optionalPrivateThreadpoolSizeCase_ == 2) { + optionalPrivateThreadpoolSizeCase_ = 0; + optionalPrivateThreadpoolSize_ = null; + onChanged(); + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.ThreadingOptions) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.ThreadingOptions) + private static final org.tensorflow.proto.data.ThreadingOptions DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.ThreadingOptions(); + } + + public static org.tensorflow.proto.data.ThreadingOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ThreadingOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ThreadingOptions(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.ThreadingOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptionsOrBuilder.java new file mode 100644 index 00000000000..3a4d602db55 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptionsOrBuilder.java @@ -0,0 +1,23 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +public interface ThreadingOptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.ThreadingOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 max_intra_op_parallelism = 1; + */ + int getMaxIntraOpParallelism(); + + /** + * int32 private_threadpool_size = 2; + */ + int getPrivateThreadpoolSize(); + + public org.tensorflow.proto.data.ThreadingOptions.OptionalMaxIntraOpParallelismCase getOptionalMaxIntraOpParallelismCase(); + + public org.tensorflow.proto.data.ThreadingOptions.OptionalPrivateThreadpoolSizeCase getOptionalPrivateThreadpoolSizeCase(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java index 33ea43d5005..94ae5448669 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/service_config.proto +// source: tensorflow/core/protobuf/service_config.proto package org.tensorflow.proto.data.experimental; @@ -1232,6 +1232,35 @@ public interface WorkerConfigOrBuilder extends * int64 dispatcher_timeout_ms = 6; */ long getDispatcherTimeoutMs(); + + /** + *
      +     * The protocol for the worker to use when transferring data to clients.
      +     * 
      + * + * string data_transfer_protocol = 7; + */ + java.lang.String getDataTransferProtocol(); + /** + *
      +     * The protocol for the worker to use when transferring data to clients.
      +     * 
      + * + * string data_transfer_protocol = 7; + */ + com.google.protobuf.ByteString + getDataTransferProtocolBytes(); + + /** + *
      +     * When shutting down a worker, how long to wait for the gRPC server to
      +     * process the final requests. This is used to achieve clean shutdown in unit
      +     * tests.
      +     * 
      + * + * int64 shutdown_quiet_period_ms = 9; + */ + long getShutdownQuietPeriodMs(); } /** *
      @@ -1253,6 +1282,7 @@ private WorkerConfig() {
             protocol_ = "";
             dispatcherAddress_ = "";
             workerAddress_ = "";
      +      dataTransferProtocol_ = "";
           }
       
           @java.lang.Override
      @@ -1318,6 +1348,17 @@ private WorkerConfig(
                     dispatcherTimeoutMs_ = input.readInt64();
                     break;
                   }
      +            case 58: {
      +              java.lang.String s = input.readStringRequireUtf8();
      +
      +              dataTransferProtocol_ = s;
      +              break;
      +            }
      +            case 72: {
      +
      +              shutdownQuietPeriodMs_ = input.readInt64();
      +              break;
      +            }
                   default: {
                     if (!parseUnknownField(
                         input, unknownFields, extensionRegistry, tag)) {
      @@ -1521,6 +1562,63 @@ public long getDispatcherTimeoutMs() {
             return dispatcherTimeoutMs_;
           }
       
      +    public static final int DATA_TRANSFER_PROTOCOL_FIELD_NUMBER = 7;
      +    private volatile java.lang.Object dataTransferProtocol_;
      +    /**
      +     * 
      +     * The protocol for the worker to use when transferring data to clients.
      +     * 
      + * + * string data_transfer_protocol = 7; + */ + public java.lang.String getDataTransferProtocol() { + java.lang.Object ref = dataTransferProtocol_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + dataTransferProtocol_ = s; + return s; + } + } + /** + *
      +     * The protocol for the worker to use when transferring data to clients.
      +     * 
      + * + * string data_transfer_protocol = 7; + */ + public com.google.protobuf.ByteString + getDataTransferProtocolBytes() { + java.lang.Object ref = dataTransferProtocol_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + dataTransferProtocol_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SHUTDOWN_QUIET_PERIOD_MS_FIELD_NUMBER = 9; + private long shutdownQuietPeriodMs_; + /** + *
      +     * When shutting down a worker, how long to wait for the gRPC server to
      +     * process the final requests. This is used to achieve clean shutdown in unit
      +     * tests.
      +     * 
      + * + * int64 shutdown_quiet_period_ms = 9; + */ + public long getShutdownQuietPeriodMs() { + return shutdownQuietPeriodMs_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -1553,6 +1651,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (dispatcherTimeoutMs_ != 0L) { output.writeInt64(6, dispatcherTimeoutMs_); } + if (!getDataTransferProtocolBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 7, dataTransferProtocol_); + } + if (shutdownQuietPeriodMs_ != 0L) { + output.writeInt64(9, shutdownQuietPeriodMs_); + } unknownFields.writeTo(output); } @@ -1583,6 +1687,13 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(6, dispatcherTimeoutMs_); } + if (!getDataTransferProtocolBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, dataTransferProtocol_); + } + if (shutdownQuietPeriodMs_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(9, shutdownQuietPeriodMs_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1610,6 +1721,10 @@ public boolean equals(final java.lang.Object obj) { != other.getHeartbeatIntervalMs()) return false; if (getDispatcherTimeoutMs() != other.getDispatcherTimeoutMs()) return false; + if (!getDataTransferProtocol() + .equals(other.getDataTransferProtocol())) return false; + if (getShutdownQuietPeriodMs() + != other.getShutdownQuietPeriodMs()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -1636,6 +1751,11 @@ public int hashCode() { hash = (37 * hash) + DISPATCHER_TIMEOUT_MS_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getDispatcherTimeoutMs()); + hash = (37 * hash) + DATA_TRANSFER_PROTOCOL_FIELD_NUMBER; + hash = (53 * hash) + getDataTransferProtocol().hashCode(); + hash = (37 * hash) + SHUTDOWN_QUIET_PERIOD_MS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getShutdownQuietPeriodMs()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1785,6 +1905,10 @@ public Builder clear() { dispatcherTimeoutMs_ = 0L; + dataTransferProtocol_ = ""; + + shutdownQuietPeriodMs_ = 0L; + return this; } @@ -1817,6 +1941,8 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig buildPa result.workerAddress_ = workerAddress_; result.heartbeatIntervalMs_ = heartbeatIntervalMs_; result.dispatcherTimeoutMs_ = dispatcherTimeoutMs_; + result.dataTransferProtocol_ = dataTransferProtocol_; + result.shutdownQuietPeriodMs_ = shutdownQuietPeriodMs_; onBuilt(); return result; } @@ -1886,6 +2012,13 @@ public Builder mergeFrom(org.tensorflow.proto.data.experimental.ServiceConfig.Wo if (other.getDispatcherTimeoutMs() != 0L) { setDispatcherTimeoutMs(other.getDispatcherTimeoutMs()); } + if (!other.getDataTransferProtocol().isEmpty()) { + dataTransferProtocol_ = other.dataTransferProtocol_; + onChanged(); + } + if (other.getShutdownQuietPeriodMs() != 0L) { + setShutdownQuietPeriodMs(other.getShutdownQuietPeriodMs()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -2311,6 +2444,139 @@ public Builder clearDispatcherTimeoutMs() { onChanged(); return this; } + + private java.lang.Object dataTransferProtocol_ = ""; + /** + *
      +       * The protocol for the worker to use when transferring data to clients.
      +       * 
      + * + * string data_transfer_protocol = 7; + */ + public java.lang.String getDataTransferProtocol() { + java.lang.Object ref = dataTransferProtocol_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + dataTransferProtocol_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
      +       * The protocol for the worker to use when transferring data to clients.
      +       * 
      + * + * string data_transfer_protocol = 7; + */ + public com.google.protobuf.ByteString + getDataTransferProtocolBytes() { + java.lang.Object ref = dataTransferProtocol_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + dataTransferProtocol_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
      +       * The protocol for the worker to use when transferring data to clients.
      +       * 
      + * + * string data_transfer_protocol = 7; + */ + public Builder setDataTransferProtocol( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + dataTransferProtocol_ = value; + onChanged(); + return this; + } + /** + *
      +       * The protocol for the worker to use when transferring data to clients.
      +       * 
      + * + * string data_transfer_protocol = 7; + */ + public Builder clearDataTransferProtocol() { + + dataTransferProtocol_ = getDefaultInstance().getDataTransferProtocol(); + onChanged(); + return this; + } + /** + *
      +       * The protocol for the worker to use when transferring data to clients.
      +       * 
      + * + * string data_transfer_protocol = 7; + */ + public Builder setDataTransferProtocolBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + dataTransferProtocol_ = value; + onChanged(); + return this; + } + + private long shutdownQuietPeriodMs_ ; + /** + *
      +       * When shutting down a worker, how long to wait for the gRPC server to
      +       * process the final requests. This is used to achieve clean shutdown in unit
      +       * tests.
      +       * 
      + * + * int64 shutdown_quiet_period_ms = 9; + */ + public long getShutdownQuietPeriodMs() { + return shutdownQuietPeriodMs_; + } + /** + *
      +       * When shutting down a worker, how long to wait for the gRPC server to
      +       * process the final requests. This is used to achieve clean shutdown in unit
      +       * tests.
      +       * 
      + * + * int64 shutdown_quiet_period_ms = 9; + */ + public Builder setShutdownQuietPeriodMs(long value) { + + shutdownQuietPeriodMs_ = value; + onChanged(); + return this; + } + /** + *
      +       * When shutting down a worker, how long to wait for the gRPC server to
      +       * process the final requests. This is used to achieve clean shutdown in unit
      +       * tests.
      +       * 
      + * + * int64 shutdown_quiet_period_ms = 9; + */ + public Builder clearShutdownQuietPeriodMs() { + + shutdownQuietPeriodMs_ = 0L; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -2383,18 +2649,19 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig getDefa descriptor; static { java.lang.String[] descriptorData = { - "\n?tensorflow/core/protobuf/data/experime" + - "ntal/service_config.proto\022\034tensorflow.da" + - "ta.experimental\"\236\001\n\020DispatcherConfig\022\014\n\004" + - "port\030\001 \001(\003\022\020\n\010protocol\030\002 \001(\t\022\020\n\010work_dir" + - "\030\003 \001(\t\022\033\n\023fault_tolerant_mode\030\004 \001(\010\022 \n\030j" + - "ob_gc_check_interval_ms\030\005 \001(\003\022\031\n\021job_gc_" + - "timeout_ms\030\006 \001(\003\"\240\001\n\014WorkerConfig\022\014\n\004por" + - "t\030\001 \001(\003\022\020\n\010protocol\030\002 \001(\t\022\032\n\022dispatcher_" + - "address\030\003 \001(\t\022\026\n\016worker_address\030\004 \001(\t\022\035\n" + - "\025heartbeat_interval_ms\030\005 \001(\003\022\035\n\025dispatch" + - "er_timeout_ms\030\006 \001(\003B(\n&org.tensorflow.pr" + - "oto.data.experimentalb\006proto3" + "\n-tensorflow/core/protobuf/service_confi" + + "g.proto\022\034tensorflow.data.experimental\"\236\001" + + "\n\020DispatcherConfig\022\014\n\004port\030\001 \001(\003\022\020\n\010prot" + + "ocol\030\002 \001(\t\022\020\n\010work_dir\030\003 \001(\t\022\033\n\023fault_to" + + "lerant_mode\030\004 \001(\010\022 \n\030job_gc_check_interv" + + "al_ms\030\005 \001(\003\022\031\n\021job_gc_timeout_ms\030\006 \001(\003\"\342" + + "\001\n\014WorkerConfig\022\014\n\004port\030\001 \001(\003\022\020\n\010protoco" + + "l\030\002 \001(\t\022\032\n\022dispatcher_address\030\003 \001(\t\022\026\n\016w" + + "orker_address\030\004 \001(\t\022\035\n\025heartbeat_interva" + + "l_ms\030\005 \001(\003\022\035\n\025dispatcher_timeout_ms\030\006 \001(" + + "\003\022\036\n\026data_transfer_protocol\030\007 \001(\t\022 \n\030shu" + + "tdown_quiet_period_ms\030\t \001(\003B(\n&org.tenso" + + "rflow.proto.data.experimentalb\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -2411,7 +2678,7 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig getDefa internal_static_tensorflow_data_experimental_WorkerConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_experimental_WorkerConfig_descriptor, - new java.lang.String[] { "Port", "Protocol", "DispatcherAddress", "WorkerAddress", "HeartbeatIntervalMs", "DispatcherTimeoutMs", }); + new java.lang.String[] { "Port", "Protocol", "DispatcherAddress", "WorkerAddress", "HeartbeatIntervalMs", "DispatcherTimeoutMs", "DataTransferProtocol", "ShutdownQuietPeriodMs", }); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotMetadataRecord.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotMetadataRecord.java index 696b438c3c8..1ad5e1b6c74 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotMetadataRecord.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotMetadataRecord.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotMetadataRecordOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotMetadataRecordOrBuilder.java index 97e3dddd818..5533ed93eb3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotMetadataRecordOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotMetadataRecordOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotProtos.java index b4f4cbceaf4..289d5308ec9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotProtos.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; @@ -43,25 +43,24 @@ public static void registerAllExtensions( descriptor; static { java.lang.String[] descriptorData = { - "\n9tensorflow/core/protobuf/data/experime" + - "ntal/snapshot.proto\022\034tensorflow.data.exp" + - "erimental\032&tensorflow/core/framework/ten" + - "sor.proto\032,tensorflow/core/framework/ten" + - "sor_shape.proto\032%tensorflow/core/framewo" + - "rk/types.proto\"9\n\016SnapshotRecord\022\'\n\006tens" + - "or\030\001 \003(\0132\027.tensorflow.TensorProto\"\270\001\n\026Sn" + - "apshotMetadataRecord\022\022\n\ngraph_hash\030\001 \001(\t" + - "\022\016\n\006run_id\030\002 \001(\t\022\032\n\022creation_timestamp\030\003" + - " \001(\003\022\017\n\007version\030\004 \001(\003\022#\n\005dtype\030\005 \003(\0162\024.t" + - "ensorflow.DataType\022\024\n\014num_elements\030\006 \001(\003" + - "\022\022\n\tfinalized\030\350\007 \001(\010\"_\n\016TensorMetadata\0222" + - "\n\014tensor_shape\030\002 \001(\0132\034.tensorflow.Tensor" + - "ShapeProto\022\031\n\021tensor_size_bytes\030\003 \001(\003\"_\n" + - "\026SnapshotTensorMetadata\022E\n\017tensor_metada" + - "ta\030\001 \003(\0132,.tensorflow.data.experimental." + - "TensorMetadataB:\n&org.tensorflow.proto.d" + - "ata.experimentalB\016SnapshotProtosP\001b\006prot" + - "o3" + "\n\'tensorflow/core/protobuf/snapshot.prot" + + "o\022\034tensorflow.data.experimental\032&tensorf" + + "low/core/framework/tensor.proto\032,tensorf" + + "low/core/framework/tensor_shape.proto\032%t" + + "ensorflow/core/framework/types.proto\"9\n\016" + + "SnapshotRecord\022\'\n\006tensor\030\001 \003(\0132\027.tensorf" + + "low.TensorProto\"\270\001\n\026SnapshotMetadataReco" + + "rd\022\022\n\ngraph_hash\030\001 \001(\t\022\016\n\006run_id\030\002 \001(\t\022\032" + + "\n\022creation_timestamp\030\003 \001(\003\022\017\n\007version\030\004 " + + "\001(\003\022#\n\005dtype\030\005 \003(\0162\024.tensorflow.DataType" + + "\022\024\n\014num_elements\030\006 \001(\003\022\022\n\tfinalized\030\350\007 \001" + + "(\010\"_\n\016TensorMetadata\0222\n\014tensor_shape\030\002 \001" + + "(\0132\034.tensorflow.TensorShapeProto\022\031\n\021tens" + + "or_size_bytes\030\003 \001(\003\"_\n\026SnapshotTensorMet" + + "adata\022E\n\017tensor_metadata\030\001 \003(\0132,.tensorf" + + "low.data.experimental.TensorMetadataB:\n&" + + "org.tensorflow.proto.data.experimentalB\016" + + "SnapshotProtosP\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotRecord.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotRecord.java index e6388b928fc..e81a8b745f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotRecord.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotRecord.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotRecordOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotRecordOrBuilder.java index 485e1656377..c87321a9074 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotRecordOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotRecordOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotTensorMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotTensorMetadata.java index 59b0fba153d..e2ac61e4dcf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotTensorMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotTensorMetadata.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotTensorMetadataOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotTensorMetadataOrBuilder.java index 73feb8671a6..d18a1149928 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotTensorMetadataOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/SnapshotTensorMetadataOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/TensorMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/TensorMetadata.java index 87dfbe8bf78..b8b44dff6ed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/TensorMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/TensorMetadata.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/TensorMetadataOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/TensorMetadataOrBuilder.java index 0649aabc2ca..3aadfb8de72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/TensorMetadataOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/TensorMetadataOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/protobuf/data/experimental/snapshot.proto +// source: tensorflow/core/protobuf/snapshot.proto package org.tensorflow.proto.data.experimental; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/AutotuneAlgorithm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/AutotuneAlgorithm.java new file mode 100644 index 00000000000..773c76aff89 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/AutotuneAlgorithm.java @@ -0,0 +1,107 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/model.proto + +package org.tensorflow.proto.data.model; + +/** + *
      + * Algorithm used for model autotuning optimization.
      + * 
      + * + * Protobuf enum {@code tensorflow.data.model.AutotuneAlgorithm} + */ +public enum AutotuneAlgorithm + implements com.google.protobuf.ProtocolMessageEnum { + /** + * HILL_CLIMB = 0; + */ + HILL_CLIMB(0), + /** + * GRADIENT_DESCENT = 1; + */ + GRADIENT_DESCENT(1), + UNRECOGNIZED(-1), + ; + + /** + * HILL_CLIMB = 0; + */ + public static final int HILL_CLIMB_VALUE = 0; + /** + * GRADIENT_DESCENT = 1; + */ + public static final int GRADIENT_DESCENT_VALUE = 1; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static AutotuneAlgorithm valueOf(int value) { + return forNumber(value); + } + + public static AutotuneAlgorithm forNumber(int value) { + switch (value) { + case 0: return HILL_CLIMB; + case 1: return GRADIENT_DESCENT; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + AutotuneAlgorithm> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public AutotuneAlgorithm findValueByNumber(int number) { + return AutotuneAlgorithm.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final AutotuneAlgorithm[] VALUES = values(); + + public static AutotuneAlgorithm valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private AutotuneAlgorithm(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:tensorflow.data.model.AutotuneAlgorithm) +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java new file mode 100644 index 00000000000..21b0beecfb2 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java @@ -0,0 +1,5592 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/model.proto + +package org.tensorflow.proto.data.model; + +/** + *
      + * Protocol buffer representing the data used by the autotuning modeling
      + * framework.
      + * 
      + * + * Protobuf type {@code tensorflow.data.model.ModelProto} + */ +public final class ModelProto extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.model.ModelProto) + ModelProtoOrBuilder { +private static final long serialVersionUID = 0L; + // Use ModelProto.newBuilder() to construct. + private ModelProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ModelProto() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ModelProto(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ModelProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + org.tensorflow.proto.data.model.ModelProto.Node.Builder subBuilder = null; + if (output_ != null) { + subBuilder = output_.toBuilder(); + } + output_ = input.readMessage(org.tensorflow.proto.data.model.ModelProto.Node.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(output_); + output_ = subBuilder.buildPartial(); + } + + break; + } + case 16: { + + idCounter_ = input.readInt64(); + break; + } + case 24: { + + collectResourceUsage_ = input.readBool(); + break; + } + case 34: { + org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder subBuilder = null; + if (optimizationParams_ != null) { + subBuilder = optimizationParams_.toBuilder(); + } + optimizationParams_ = input.readMessage(org.tensorflow.proto.data.model.ModelProto.OptimizationParams.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(optimizationParams_); + optimizationParams_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.model.ModelProto.class, org.tensorflow.proto.data.model.ModelProto.Builder.class); + } + + public interface NodeOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.model.ModelProto.Node) + com.google.protobuf.MessageOrBuilder { + + /** + *
      +     * Unique node ID.
      +     * 
      + * + * int64 id = 1; + */ + long getId(); + + /** + *
      +     * Human-readable name of the node.
      +     * 
      + * + * string name = 2; + */ + java.lang.String getName(); + /** + *
      +     * Human-readable name of the node.
      +     * 
      + * + * string name = 2; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
      +     * An indication whether autotuning is enabled for this node.
      +     * 
      + * + * bool autotune = 3; + */ + boolean getAutotune(); + + /** + *
      +     * The number of bytes stored in this node's buffer.
      +     * 
      + * + * int64 buffered_bytes = 4; + */ + long getBufferedBytes(); + + /** + *
      +     * The number of elements stored in this node's buffer.
      +     * 
      + * + * int64 buffered_elements = 5; + */ + long getBufferedElements(); + + /** + *
      +     * The number of bytes consumed by the node.
      +     * 
      + * + * int64 bytes_consumed = 6; + */ + long getBytesConsumed(); + + /** + *
      +     * The number of bytes produced by the node.
      +     * 
      + * + * int64 bytes_produced = 7; + */ + long getBytesProduced(); + + /** + *
      +     * The number of elements produced by the node.
      +     * 
      + * + * int64 num_elements = 8; + */ + long getNumElements(); + + /** + *
      +     * The aggregate processing time spent in this node.
      +     * 
      + * + * int64 processing_time = 9; + */ + long getProcessingTime(); + + /** + *
      +     * An indication whether this node records metrics about produced and
      +     * consumed elements.
      +     * 
      + * + * bool record_metrics = 10; + */ + boolean getRecordMetrics(); + + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + java.util.List + getParametersList(); + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + org.tensorflow.proto.data.model.ModelProto.Node.Parameter getParameters(int index); + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + int getParametersCount(); + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + java.util.List + getParametersOrBuilderList(); + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + org.tensorflow.proto.data.model.ModelProto.Node.ParameterOrBuilder getParametersOrBuilder( + int index); + + /** + *
      +     * Statistic of inputs processing time history.
      +     * 
      + * + * double input_processing_time_sum = 12; + */ + double getInputProcessingTimeSum(); + + /** + * int64 input_processing_time_count = 13; + */ + long getInputProcessingTimeCount(); + + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + java.util.List + getInputsList(); + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + org.tensorflow.proto.data.model.ModelProto.Node getInputs(int index); + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + int getInputsCount(); + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + java.util.List + getInputsOrBuilderList(); + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getInputsOrBuilder( + int index); + + /** + *
      +     * Class of this node.
      +     * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + int getNodeClassValue(); + /** + *
      +     * Class of this node.
      +     * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + org.tensorflow.proto.data.model.NodeClass getNodeClass(); + + /** + *
      +     * Ratio of input to output elements. This is only used by KNOWN_RATIO and
      +     * ASYNC_KNOWN_RATIO nodes.
      +     * 
      + * + * double ratio = 16; + */ + double getRatio(); + + /** + *
      +     * Ratio identifies how many parallelism calls are introduced by one
      +     * buffered element. This is only used by ASYNC_KNOWN_RATIO nodes.
      +     * 
      + * + * double memory_ratio = 17; + */ + double getMemoryRatio(); + } + /** + *
      +   * General representation of a node in the model.
      +   * 
      + * + * Protobuf type {@code tensorflow.data.model.ModelProto.Node} + */ + public static final class Node extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.model.ModelProto.Node) + NodeOrBuilder { + private static final long serialVersionUID = 0L; + // Use Node.newBuilder() to construct. + private Node(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Node() { + name_ = ""; + parameters_ = java.util.Collections.emptyList(); + inputs_ = java.util.Collections.emptyList(); + nodeClass_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Node(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Node( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + id_ = input.readInt64(); + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 24: { + + autotune_ = input.readBool(); + break; + } + case 32: { + + bufferedBytes_ = input.readInt64(); + break; + } + case 40: { + + bufferedElements_ = input.readInt64(); + break; + } + case 48: { + + bytesConsumed_ = input.readInt64(); + break; + } + case 56: { + + bytesProduced_ = input.readInt64(); + break; + } + case 64: { + + numElements_ = input.readInt64(); + break; + } + case 72: { + + processingTime_ = input.readInt64(); + break; + } + case 80: { + + recordMetrics_ = input.readBool(); + break; + } + case 90: { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + parameters_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + parameters_.add( + input.readMessage(org.tensorflow.proto.data.model.ModelProto.Node.Parameter.parser(), extensionRegistry)); + break; + } + case 97: { + + inputProcessingTimeSum_ = input.readDouble(); + break; + } + case 104: { + + inputProcessingTimeCount_ = input.readInt64(); + break; + } + case 114: { + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + inputs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + inputs_.add( + input.readMessage(org.tensorflow.proto.data.model.ModelProto.Node.parser(), extensionRegistry)); + break; + } + case 120: { + int rawValue = input.readEnum(); + + nodeClass_ = rawValue; + break; + } + case 129: { + + ratio_ = input.readDouble(); + break; + } + case 137: { + + memoryRatio_ = input.readDouble(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + parameters_ = java.util.Collections.unmodifiableList(parameters_); + } + if (((mutable_bitField0_ & 0x00000002) != 0)) { + inputs_ = java.util.Collections.unmodifiableList(inputs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.model.ModelProto.Node.class, org.tensorflow.proto.data.model.ModelProto.Node.Builder.class); + } + + public interface ParameterOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.model.ModelProto.Node.Parameter) + com.google.protobuf.MessageOrBuilder { + + /** + *
      +       * Human-readable name of the parameter.
      +       * 
      + * + * string name = 1; + */ + java.lang.String getName(); + /** + *
      +       * Human-readable name of the parameter.
      +       * 
      + * + * string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
      +       * Identifies the model value of the parameter. This can be different from
      +       * the actual value (e.g. during optimization search).
      +       * 
      + * + * double value = 2; + */ + double getValue(); + + /** + *
      +       * The actual value of the parameter.
      +       * 
      + * + * double state_value = 3; + */ + double getStateValue(); + + /** + *
      +       * Minimum value of the parameter.
      +       * 
      + * + * double min = 4; + */ + double getMin(); + + /** + *
      +       * Maximum value of the parameter.
      +       * 
      + * + * double max = 5; + */ + double getMax(); + + /** + *
      +       * Identifies whether the parameter should participate in autotuning.
      +       * 
      + * + * bool tunable = 6; + */ + boolean getTunable(); + } + /** + *
      +     * Represents a node parameter.
      +     * 
      + * + * Protobuf type {@code tensorflow.data.model.ModelProto.Node.Parameter} + */ + public static final class Parameter extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.model.ModelProto.Node.Parameter) + ParameterOrBuilder { + private static final long serialVersionUID = 0L; + // Use Parameter.newBuilder() to construct. + private Parameter(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Parameter() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Parameter(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Parameter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 17: { + + value_ = input.readDouble(); + break; + } + case 25: { + + stateValue_ = input.readDouble(); + break; + } + case 33: { + + min_ = input.readDouble(); + break; + } + case 41: { + + max_ = input.readDouble(); + break; + } + case 48: { + + tunable_ = input.readBool(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_Parameter_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_Parameter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.model.ModelProto.Node.Parameter.class, org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + *
      +       * Human-readable name of the parameter.
      +       * 
      + * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
      +       * Human-readable name of the parameter.
      +       * 
      + * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALUE_FIELD_NUMBER = 2; + private double value_; + /** + *
      +       * Identifies the model value of the parameter. This can be different from
      +       * the actual value (e.g. during optimization search).
      +       * 
      + * + * double value = 2; + */ + public double getValue() { + return value_; + } + + public static final int STATE_VALUE_FIELD_NUMBER = 3; + private double stateValue_; + /** + *
      +       * The actual value of the parameter.
      +       * 
      + * + * double state_value = 3; + */ + public double getStateValue() { + return stateValue_; + } + + public static final int MIN_FIELD_NUMBER = 4; + private double min_; + /** + *
      +       * Minimum value of the parameter.
      +       * 
      + * + * double min = 4; + */ + public double getMin() { + return min_; + } + + public static final int MAX_FIELD_NUMBER = 5; + private double max_; + /** + *
      +       * Maximum value of the parameter.
      +       * 
      + * + * double max = 5; + */ + public double getMax() { + return max_; + } + + public static final int TUNABLE_FIELD_NUMBER = 6; + private boolean tunable_; + /** + *
      +       * Identifies whether the parameter should participate in autotuning.
      +       * 
      + * + * bool tunable = 6; + */ + public boolean getTunable() { + return tunable_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (value_ != 0D) { + output.writeDouble(2, value_); + } + if (stateValue_ != 0D) { + output.writeDouble(3, stateValue_); + } + if (min_ != 0D) { + output.writeDouble(4, min_); + } + if (max_ != 0D) { + output.writeDouble(5, max_); + } + if (tunable_ != false) { + output.writeBool(6, tunable_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (value_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, value_); + } + if (stateValue_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(3, stateValue_); + } + if (min_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(4, min_); + } + if (max_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(5, max_); + } + if (tunable_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, tunable_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.model.ModelProto.Node.Parameter)) { + return super.equals(obj); + } + org.tensorflow.proto.data.model.ModelProto.Node.Parameter other = (org.tensorflow.proto.data.model.ModelProto.Node.Parameter) obj; + + if (!getName() + .equals(other.getName())) return false; + if (java.lang.Double.doubleToLongBits(getValue()) + != java.lang.Double.doubleToLongBits( + other.getValue())) return false; + if (java.lang.Double.doubleToLongBits(getStateValue()) + != java.lang.Double.doubleToLongBits( + other.getStateValue())) return false; + if (java.lang.Double.doubleToLongBits(getMin()) + != java.lang.Double.doubleToLongBits( + other.getMin())) return false; + if (java.lang.Double.doubleToLongBits(getMax()) + != java.lang.Double.doubleToLongBits( + other.getMax())) return false; + if (getTunable() + != other.getTunable()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getValue())); + hash = (37 * hash) + STATE_VALUE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getStateValue())); + hash = (37 * hash) + MIN_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getMin())); + hash = (37 * hash) + MAX_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getMax())); + hash = (37 * hash) + TUNABLE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getTunable()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.model.ModelProto.Node.Parameter prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +       * Represents a node parameter.
      +       * 
      + * + * Protobuf type {@code tensorflow.data.model.ModelProto.Node.Parameter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.model.ModelProto.Node.Parameter) + org.tensorflow.proto.data.model.ModelProto.Node.ParameterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_Parameter_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_Parameter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.model.ModelProto.Node.Parameter.class, org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder.class); + } + + // Construct using org.tensorflow.proto.data.model.ModelProto.Node.Parameter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + value_ = 0D; + + stateValue_ = 0D; + + min_ = 0D; + + max_ = 0D; + + tunable_ = false; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_Parameter_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter getDefaultInstanceForType() { + return org.tensorflow.proto.data.model.ModelProto.Node.Parameter.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter build() { + org.tensorflow.proto.data.model.ModelProto.Node.Parameter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter buildPartial() { + org.tensorflow.proto.data.model.ModelProto.Node.Parameter result = new org.tensorflow.proto.data.model.ModelProto.Node.Parameter(this); + result.name_ = name_; + result.value_ = value_; + result.stateValue_ = stateValue_; + result.min_ = min_; + result.max_ = max_; + result.tunable_ = tunable_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.model.ModelProto.Node.Parameter) { + return mergeFrom((org.tensorflow.proto.data.model.ModelProto.Node.Parameter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.model.ModelProto.Node.Parameter other) { + if (other == org.tensorflow.proto.data.model.ModelProto.Node.Parameter.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.getValue() != 0D) { + setValue(other.getValue()); + } + if (other.getStateValue() != 0D) { + setStateValue(other.getStateValue()); + } + if (other.getMin() != 0D) { + setMin(other.getMin()); + } + if (other.getMax() != 0D) { + setMax(other.getMax()); + } + if (other.getTunable() != false) { + setTunable(other.getTunable()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.model.ModelProto.Node.Parameter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.model.ModelProto.Node.Parameter) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + *
      +         * Human-readable name of the parameter.
      +         * 
      + * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
      +         * Human-readable name of the parameter.
      +         * 
      + * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
      +         * Human-readable name of the parameter.
      +         * 
      + * + * string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
      +         * Human-readable name of the parameter.
      +         * 
      + * + * string name = 1; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
      +         * Human-readable name of the parameter.
      +         * 
      + * + * string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private double value_ ; + /** + *
      +         * Identifies the model value of the parameter. This can be different from
      +         * the actual value (e.g. during optimization search).
      +         * 
      + * + * double value = 2; + */ + public double getValue() { + return value_; + } + /** + *
      +         * Identifies the model value of the parameter. This can be different from
      +         * the actual value (e.g. during optimization search).
      +         * 
      + * + * double value = 2; + */ + public Builder setValue(double value) { + + value_ = value; + onChanged(); + return this; + } + /** + *
      +         * Identifies the model value of the parameter. This can be different from
      +         * the actual value (e.g. during optimization search).
      +         * 
      + * + * double value = 2; + */ + public Builder clearValue() { + + value_ = 0D; + onChanged(); + return this; + } + + private double stateValue_ ; + /** + *
      +         * The actual value of the parameter.
      +         * 
      + * + * double state_value = 3; + */ + public double getStateValue() { + return stateValue_; + } + /** + *
      +         * The actual value of the parameter.
      +         * 
      + * + * double state_value = 3; + */ + public Builder setStateValue(double value) { + + stateValue_ = value; + onChanged(); + return this; + } + /** + *
      +         * The actual value of the parameter.
      +         * 
      + * + * double state_value = 3; + */ + public Builder clearStateValue() { + + stateValue_ = 0D; + onChanged(); + return this; + } + + private double min_ ; + /** + *
      +         * Minimum value of the parameter.
      +         * 
      + * + * double min = 4; + */ + public double getMin() { + return min_; + } + /** + *
      +         * Minimum value of the parameter.
      +         * 
      + * + * double min = 4; + */ + public Builder setMin(double value) { + + min_ = value; + onChanged(); + return this; + } + /** + *
      +         * Minimum value of the parameter.
      +         * 
      + * + * double min = 4; + */ + public Builder clearMin() { + + min_ = 0D; + onChanged(); + return this; + } + + private double max_ ; + /** + *
      +         * Maximum value of the parameter.
      +         * 
      + * + * double max = 5; + */ + public double getMax() { + return max_; + } + /** + *
      +         * Maximum value of the parameter.
      +         * 
      + * + * double max = 5; + */ + public Builder setMax(double value) { + + max_ = value; + onChanged(); + return this; + } + /** + *
      +         * Maximum value of the parameter.
      +         * 
      + * + * double max = 5; + */ + public Builder clearMax() { + + max_ = 0D; + onChanged(); + return this; + } + + private boolean tunable_ ; + /** + *
      +         * Identifies whether the parameter should participate in autotuning.
      +         * 
      + * + * bool tunable = 6; + */ + public boolean getTunable() { + return tunable_; + } + /** + *
      +         * Identifies whether the parameter should participate in autotuning.
      +         * 
      + * + * bool tunable = 6; + */ + public Builder setTunable(boolean value) { + + tunable_ = value; + onChanged(); + return this; + } + /** + *
      +         * Identifies whether the parameter should participate in autotuning.
      +         * 
      + * + * bool tunable = 6; + */ + public Builder clearTunable() { + + tunable_ = false; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.model.ModelProto.Node.Parameter) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.model.ModelProto.Node.Parameter) + private static final org.tensorflow.proto.data.model.ModelProto.Node.Parameter DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.model.ModelProto.Node.Parameter(); + } + + public static org.tensorflow.proto.data.model.ModelProto.Node.Parameter getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Parameter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Parameter(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public static final int ID_FIELD_NUMBER = 1; + private long id_; + /** + *
      +     * Unique node ID.
      +     * 
      + * + * int64 id = 1; + */ + public long getId() { + return id_; + } + + public static final int NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object name_; + /** + *
      +     * Human-readable name of the node.
      +     * 
      + * + * string name = 2; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
      +     * Human-readable name of the node.
      +     * 
      + * + * string name = 2; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int AUTOTUNE_FIELD_NUMBER = 3; + private boolean autotune_; + /** + *
      +     * An indication whether autotuning is enabled for this node.
      +     * 
      + * + * bool autotune = 3; + */ + public boolean getAutotune() { + return autotune_; + } + + public static final int BUFFERED_BYTES_FIELD_NUMBER = 4; + private long bufferedBytes_; + /** + *
      +     * The number of bytes stored in this node's buffer.
      +     * 
      + * + * int64 buffered_bytes = 4; + */ + public long getBufferedBytes() { + return bufferedBytes_; + } + + public static final int BUFFERED_ELEMENTS_FIELD_NUMBER = 5; + private long bufferedElements_; + /** + *
      +     * The number of elements stored in this node's buffer.
      +     * 
      + * + * int64 buffered_elements = 5; + */ + public long getBufferedElements() { + return bufferedElements_; + } + + public static final int BYTES_CONSUMED_FIELD_NUMBER = 6; + private long bytesConsumed_; + /** + *
      +     * The number of bytes consumed by the node.
      +     * 
      + * + * int64 bytes_consumed = 6; + */ + public long getBytesConsumed() { + return bytesConsumed_; + } + + public static final int BYTES_PRODUCED_FIELD_NUMBER = 7; + private long bytesProduced_; + /** + *
      +     * The number of bytes produced by the node.
      +     * 
      + * + * int64 bytes_produced = 7; + */ + public long getBytesProduced() { + return bytesProduced_; + } + + public static final int NUM_ELEMENTS_FIELD_NUMBER = 8; + private long numElements_; + /** + *
      +     * The number of elements produced by the node.
      +     * 
      + * + * int64 num_elements = 8; + */ + public long getNumElements() { + return numElements_; + } + + public static final int PROCESSING_TIME_FIELD_NUMBER = 9; + private long processingTime_; + /** + *
      +     * The aggregate processing time spent in this node.
      +     * 
      + * + * int64 processing_time = 9; + */ + public long getProcessingTime() { + return processingTime_; + } + + public static final int RECORD_METRICS_FIELD_NUMBER = 10; + private boolean recordMetrics_; + /** + *
      +     * An indication whether this node records metrics about produced and
      +     * consumed elements.
      +     * 
      + * + * bool record_metrics = 10; + */ + public boolean getRecordMetrics() { + return recordMetrics_; + } + + public static final int PARAMETERS_FIELD_NUMBER = 11; + private java.util.List parameters_; + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public java.util.List getParametersList() { + return parameters_; + } + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public java.util.List + getParametersOrBuilderList() { + return parameters_; + } + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public int getParametersCount() { + return parameters_.size(); + } + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter getParameters(int index) { + return parameters_.get(index); + } + /** + *
      +     * Parameters of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.ParameterOrBuilder getParametersOrBuilder( + int index) { + return parameters_.get(index); + } + + public static final int INPUT_PROCESSING_TIME_SUM_FIELD_NUMBER = 12; + private double inputProcessingTimeSum_; + /** + *
      +     * Statistic of inputs processing time history.
      +     * 
      + * + * double input_processing_time_sum = 12; + */ + public double getInputProcessingTimeSum() { + return inputProcessingTimeSum_; + } + + public static final int INPUT_PROCESSING_TIME_COUNT_FIELD_NUMBER = 13; + private long inputProcessingTimeCount_; + /** + * int64 input_processing_time_count = 13; + */ + public long getInputProcessingTimeCount() { + return inputProcessingTimeCount_; + } + + public static final int INPUTS_FIELD_NUMBER = 14; + private java.util.List inputs_; + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public java.util.List getInputsList() { + return inputs_; + } + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public java.util.List + getInputsOrBuilderList() { + return inputs_; + } + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public int getInputsCount() { + return inputs_.size(); + } + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public org.tensorflow.proto.data.model.ModelProto.Node getInputs(int index) { + return inputs_.get(index); + } + /** + *
      +     * Inputs of this node.
      +     * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getInputsOrBuilder( + int index) { + return inputs_.get(index); + } + + public static final int NODE_CLASS_FIELD_NUMBER = 15; + private int nodeClass_; + /** + *
      +     * Class of this node.
      +     * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + public int getNodeClassValue() { + return nodeClass_; + } + /** + *
      +     * Class of this node.
      +     * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + public org.tensorflow.proto.data.model.NodeClass getNodeClass() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.model.NodeClass result = org.tensorflow.proto.data.model.NodeClass.valueOf(nodeClass_); + return result == null ? org.tensorflow.proto.data.model.NodeClass.UNRECOGNIZED : result; + } + + public static final int RATIO_FIELD_NUMBER = 16; + private double ratio_; + /** + *
      +     * Ratio of input to output elements. This is only used by KNOWN_RATIO and
      +     * ASYNC_KNOWN_RATIO nodes.
      +     * 
      + * + * double ratio = 16; + */ + public double getRatio() { + return ratio_; + } + + public static final int MEMORY_RATIO_FIELD_NUMBER = 17; + private double memoryRatio_; + /** + *
      +     * Ratio identifies how many parallelism calls are introduced by one
      +     * buffered element. This is only used by ASYNC_KNOWN_RATIO nodes.
      +     * 
      + * + * double memory_ratio = 17; + */ + public double getMemoryRatio() { + return memoryRatio_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (id_ != 0L) { + output.writeInt64(1, id_); + } + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_); + } + if (autotune_ != false) { + output.writeBool(3, autotune_); + } + if (bufferedBytes_ != 0L) { + output.writeInt64(4, bufferedBytes_); + } + if (bufferedElements_ != 0L) { + output.writeInt64(5, bufferedElements_); + } + if (bytesConsumed_ != 0L) { + output.writeInt64(6, bytesConsumed_); + } + if (bytesProduced_ != 0L) { + output.writeInt64(7, bytesProduced_); + } + if (numElements_ != 0L) { + output.writeInt64(8, numElements_); + } + if (processingTime_ != 0L) { + output.writeInt64(9, processingTime_); + } + if (recordMetrics_ != false) { + output.writeBool(10, recordMetrics_); + } + for (int i = 0; i < parameters_.size(); i++) { + output.writeMessage(11, parameters_.get(i)); + } + if (inputProcessingTimeSum_ != 0D) { + output.writeDouble(12, inputProcessingTimeSum_); + } + if (inputProcessingTimeCount_ != 0L) { + output.writeInt64(13, inputProcessingTimeCount_); + } + for (int i = 0; i < inputs_.size(); i++) { + output.writeMessage(14, inputs_.get(i)); + } + if (nodeClass_ != org.tensorflow.proto.data.model.NodeClass.UNKNOWN.getNumber()) { + output.writeEnum(15, nodeClass_); + } + if (ratio_ != 0D) { + output.writeDouble(16, ratio_); + } + if (memoryRatio_ != 0D) { + output.writeDouble(17, memoryRatio_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (id_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, id_); + } + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_); + } + if (autotune_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, autotune_); + } + if (bufferedBytes_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, bufferedBytes_); + } + if (bufferedElements_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(5, bufferedElements_); + } + if (bytesConsumed_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(6, bytesConsumed_); + } + if (bytesProduced_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(7, bytesProduced_); + } + if (numElements_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(8, numElements_); + } + if (processingTime_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(9, processingTime_); + } + if (recordMetrics_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(10, recordMetrics_); + } + for (int i = 0; i < parameters_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(11, parameters_.get(i)); + } + if (inputProcessingTimeSum_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(12, inputProcessingTimeSum_); + } + if (inputProcessingTimeCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(13, inputProcessingTimeCount_); + } + for (int i = 0; i < inputs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(14, inputs_.get(i)); + } + if (nodeClass_ != org.tensorflow.proto.data.model.NodeClass.UNKNOWN.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(15, nodeClass_); + } + if (ratio_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(16, ratio_); + } + if (memoryRatio_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(17, memoryRatio_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.model.ModelProto.Node)) { + return super.equals(obj); + } + org.tensorflow.proto.data.model.ModelProto.Node other = (org.tensorflow.proto.data.model.ModelProto.Node) obj; + + if (getId() + != other.getId()) return false; + if (!getName() + .equals(other.getName())) return false; + if (getAutotune() + != other.getAutotune()) return false; + if (getBufferedBytes() + != other.getBufferedBytes()) return false; + if (getBufferedElements() + != other.getBufferedElements()) return false; + if (getBytesConsumed() + != other.getBytesConsumed()) return false; + if (getBytesProduced() + != other.getBytesProduced()) return false; + if (getNumElements() + != other.getNumElements()) return false; + if (getProcessingTime() + != other.getProcessingTime()) return false; + if (getRecordMetrics() + != other.getRecordMetrics()) return false; + if (!getParametersList() + .equals(other.getParametersList())) return false; + if (java.lang.Double.doubleToLongBits(getInputProcessingTimeSum()) + != java.lang.Double.doubleToLongBits( + other.getInputProcessingTimeSum())) return false; + if (getInputProcessingTimeCount() + != other.getInputProcessingTimeCount()) return false; + if (!getInputsList() + .equals(other.getInputsList())) return false; + if (nodeClass_ != other.nodeClass_) return false; + if (java.lang.Double.doubleToLongBits(getRatio()) + != java.lang.Double.doubleToLongBits( + other.getRatio())) return false; + if (java.lang.Double.doubleToLongBits(getMemoryRatio()) + != java.lang.Double.doubleToLongBits( + other.getMemoryRatio())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getId()); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + AUTOTUNE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getAutotune()); + hash = (37 * hash) + BUFFERED_BYTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getBufferedBytes()); + hash = (37 * hash) + BUFFERED_ELEMENTS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getBufferedElements()); + hash = (37 * hash) + BYTES_CONSUMED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getBytesConsumed()); + hash = (37 * hash) + BYTES_PRODUCED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getBytesProduced()); + hash = (37 * hash) + NUM_ELEMENTS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getNumElements()); + hash = (37 * hash) + PROCESSING_TIME_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getProcessingTime()); + hash = (37 * hash) + RECORD_METRICS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getRecordMetrics()); + if (getParametersCount() > 0) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + getParametersList().hashCode(); + } + hash = (37 * hash) + INPUT_PROCESSING_TIME_SUM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getInputProcessingTimeSum())); + hash = (37 * hash) + INPUT_PROCESSING_TIME_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getInputProcessingTimeCount()); + if (getInputsCount() > 0) { + hash = (37 * hash) + INPUTS_FIELD_NUMBER; + hash = (53 * hash) + getInputsList().hashCode(); + } + hash = (37 * hash) + NODE_CLASS_FIELD_NUMBER; + hash = (53 * hash) + nodeClass_; + hash = (37 * hash) + RATIO_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getRatio())); + hash = (37 * hash) + MEMORY_RATIO_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getMemoryRatio())); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.Node parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.model.ModelProto.Node prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +     * General representation of a node in the model.
      +     * 
      + * + * Protobuf type {@code tensorflow.data.model.ModelProto.Node} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.model.ModelProto.Node) + org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.model.ModelProto.Node.class, org.tensorflow.proto.data.model.ModelProto.Node.Builder.class); + } + + // Construct using org.tensorflow.proto.data.model.ModelProto.Node.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getParametersFieldBuilder(); + getInputsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + id_ = 0L; + + name_ = ""; + + autotune_ = false; + + bufferedBytes_ = 0L; + + bufferedElements_ = 0L; + + bytesConsumed_ = 0L; + + bytesProduced_ = 0L; + + numElements_ = 0L; + + processingTime_ = 0L; + + recordMetrics_ = false; + + if (parametersBuilder_ == null) { + parameters_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + parametersBuilder_.clear(); + } + inputProcessingTimeSum_ = 0D; + + inputProcessingTimeCount_ = 0L; + + if (inputsBuilder_ == null) { + inputs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + inputsBuilder_.clear(); + } + nodeClass_ = 0; + + ratio_ = 0D; + + memoryRatio_ = 0D; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_Node_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.Node getDefaultInstanceForType() { + return org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.Node build() { + org.tensorflow.proto.data.model.ModelProto.Node result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.Node buildPartial() { + org.tensorflow.proto.data.model.ModelProto.Node result = new org.tensorflow.proto.data.model.ModelProto.Node(this); + int from_bitField0_ = bitField0_; + result.id_ = id_; + result.name_ = name_; + result.autotune_ = autotune_; + result.bufferedBytes_ = bufferedBytes_; + result.bufferedElements_ = bufferedElements_; + result.bytesConsumed_ = bytesConsumed_; + result.bytesProduced_ = bytesProduced_; + result.numElements_ = numElements_; + result.processingTime_ = processingTime_; + result.recordMetrics_ = recordMetrics_; + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + parameters_ = java.util.Collections.unmodifiableList(parameters_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + result.inputProcessingTimeSum_ = inputProcessingTimeSum_; + result.inputProcessingTimeCount_ = inputProcessingTimeCount_; + if (inputsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + inputs_ = java.util.Collections.unmodifiableList(inputs_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.inputs_ = inputs_; + } else { + result.inputs_ = inputsBuilder_.build(); + } + result.nodeClass_ = nodeClass_; + result.ratio_ = ratio_; + result.memoryRatio_ = memoryRatio_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.model.ModelProto.Node) { + return mergeFrom((org.tensorflow.proto.data.model.ModelProto.Node)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.model.ModelProto.Node other) { + if (other == org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance()) return this; + if (other.getId() != 0L) { + setId(other.getId()); + } + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.getAutotune() != false) { + setAutotune(other.getAutotune()); + } + if (other.getBufferedBytes() != 0L) { + setBufferedBytes(other.getBufferedBytes()); + } + if (other.getBufferedElements() != 0L) { + setBufferedElements(other.getBufferedElements()); + } + if (other.getBytesConsumed() != 0L) { + setBytesConsumed(other.getBytesConsumed()); + } + if (other.getBytesProduced() != 0L) { + setBytesProduced(other.getBytesProduced()); + } + if (other.getNumElements() != 0L) { + setNumElements(other.getNumElements()); + } + if (other.getProcessingTime() != 0L) { + setProcessingTime(other.getProcessingTime()); + } + if (other.getRecordMetrics() != false) { + setRecordMetrics(other.getRecordMetrics()); + } + if (parametersBuilder_ == null) { + if (!other.parameters_.isEmpty()) { + if (parameters_.isEmpty()) { + parameters_ = other.parameters_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureParametersIsMutable(); + parameters_.addAll(other.parameters_); + } + onChanged(); + } + } else { + if (!other.parameters_.isEmpty()) { + if (parametersBuilder_.isEmpty()) { + parametersBuilder_.dispose(); + parametersBuilder_ = null; + parameters_ = other.parameters_; + bitField0_ = (bitField0_ & ~0x00000001); + parametersBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getParametersFieldBuilder() : null; + } else { + parametersBuilder_.addAllMessages(other.parameters_); + } + } + } + if (other.getInputProcessingTimeSum() != 0D) { + setInputProcessingTimeSum(other.getInputProcessingTimeSum()); + } + if (other.getInputProcessingTimeCount() != 0L) { + setInputProcessingTimeCount(other.getInputProcessingTimeCount()); + } + if (inputsBuilder_ == null) { + if (!other.inputs_.isEmpty()) { + if (inputs_.isEmpty()) { + inputs_ = other.inputs_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureInputsIsMutable(); + inputs_.addAll(other.inputs_); + } + onChanged(); + } + } else { + if (!other.inputs_.isEmpty()) { + if (inputsBuilder_.isEmpty()) { + inputsBuilder_.dispose(); + inputsBuilder_ = null; + inputs_ = other.inputs_; + bitField0_ = (bitField0_ & ~0x00000002); + inputsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getInputsFieldBuilder() : null; + } else { + inputsBuilder_.addAllMessages(other.inputs_); + } + } + } + if (other.nodeClass_ != 0) { + setNodeClassValue(other.getNodeClassValue()); + } + if (other.getRatio() != 0D) { + setRatio(other.getRatio()); + } + if (other.getMemoryRatio() != 0D) { + setMemoryRatio(other.getMemoryRatio()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.model.ModelProto.Node parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.model.ModelProto.Node) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private long id_ ; + /** + *
      +       * Unique node ID.
      +       * 
      + * + * int64 id = 1; + */ + public long getId() { + return id_; + } + /** + *
      +       * Unique node ID.
      +       * 
      + * + * int64 id = 1; + */ + public Builder setId(long value) { + + id_ = value; + onChanged(); + return this; + } + /** + *
      +       * Unique node ID.
      +       * 
      + * + * int64 id = 1; + */ + public Builder clearId() { + + id_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object name_ = ""; + /** + *
      +       * Human-readable name of the node.
      +       * 
      + * + * string name = 2; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
      +       * Human-readable name of the node.
      +       * 
      + * + * string name = 2; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
      +       * Human-readable name of the node.
      +       * 
      + * + * string name = 2; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
      +       * Human-readable name of the node.
      +       * 
      + * + * string name = 2; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
      +       * Human-readable name of the node.
      +       * 
      + * + * string name = 2; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private boolean autotune_ ; + /** + *
      +       * An indication whether autotuning is enabled for this node.
      +       * 
      + * + * bool autotune = 3; + */ + public boolean getAutotune() { + return autotune_; + } + /** + *
      +       * An indication whether autotuning is enabled for this node.
      +       * 
      + * + * bool autotune = 3; + */ + public Builder setAutotune(boolean value) { + + autotune_ = value; + onChanged(); + return this; + } + /** + *
      +       * An indication whether autotuning is enabled for this node.
      +       * 
      + * + * bool autotune = 3; + */ + public Builder clearAutotune() { + + autotune_ = false; + onChanged(); + return this; + } + + private long bufferedBytes_ ; + /** + *
      +       * The number of bytes stored in this node's buffer.
      +       * 
      + * + * int64 buffered_bytes = 4; + */ + public long getBufferedBytes() { + return bufferedBytes_; + } + /** + *
      +       * The number of bytes stored in this node's buffer.
      +       * 
      + * + * int64 buffered_bytes = 4; + */ + public Builder setBufferedBytes(long value) { + + bufferedBytes_ = value; + onChanged(); + return this; + } + /** + *
      +       * The number of bytes stored in this node's buffer.
      +       * 
      + * + * int64 buffered_bytes = 4; + */ + public Builder clearBufferedBytes() { + + bufferedBytes_ = 0L; + onChanged(); + return this; + } + + private long bufferedElements_ ; + /** + *
      +       * The number of elements stored in this node's buffer.
      +       * 
      + * + * int64 buffered_elements = 5; + */ + public long getBufferedElements() { + return bufferedElements_; + } + /** + *
      +       * The number of elements stored in this node's buffer.
      +       * 
      + * + * int64 buffered_elements = 5; + */ + public Builder setBufferedElements(long value) { + + bufferedElements_ = value; + onChanged(); + return this; + } + /** + *
      +       * The number of elements stored in this node's buffer.
      +       * 
      + * + * int64 buffered_elements = 5; + */ + public Builder clearBufferedElements() { + + bufferedElements_ = 0L; + onChanged(); + return this; + } + + private long bytesConsumed_ ; + /** + *
      +       * The number of bytes consumed by the node.
      +       * 
      + * + * int64 bytes_consumed = 6; + */ + public long getBytesConsumed() { + return bytesConsumed_; + } + /** + *
      +       * The number of bytes consumed by the node.
      +       * 
      + * + * int64 bytes_consumed = 6; + */ + public Builder setBytesConsumed(long value) { + + bytesConsumed_ = value; + onChanged(); + return this; + } + /** + *
      +       * The number of bytes consumed by the node.
      +       * 
      + * + * int64 bytes_consumed = 6; + */ + public Builder clearBytesConsumed() { + + bytesConsumed_ = 0L; + onChanged(); + return this; + } + + private long bytesProduced_ ; + /** + *
      +       * The number of bytes produced by the node.
      +       * 
      + * + * int64 bytes_produced = 7; + */ + public long getBytesProduced() { + return bytesProduced_; + } + /** + *
      +       * The number of bytes produced by the node.
      +       * 
      + * + * int64 bytes_produced = 7; + */ + public Builder setBytesProduced(long value) { + + bytesProduced_ = value; + onChanged(); + return this; + } + /** + *
      +       * The number of bytes produced by the node.
      +       * 
      + * + * int64 bytes_produced = 7; + */ + public Builder clearBytesProduced() { + + bytesProduced_ = 0L; + onChanged(); + return this; + } + + private long numElements_ ; + /** + *
      +       * The number of elements produced by the node.
      +       * 
      + * + * int64 num_elements = 8; + */ + public long getNumElements() { + return numElements_; + } + /** + *
      +       * The number of elements produced by the node.
      +       * 
      + * + * int64 num_elements = 8; + */ + public Builder setNumElements(long value) { + + numElements_ = value; + onChanged(); + return this; + } + /** + *
      +       * The number of elements produced by the node.
      +       * 
      + * + * int64 num_elements = 8; + */ + public Builder clearNumElements() { + + numElements_ = 0L; + onChanged(); + return this; + } + + private long processingTime_ ; + /** + *
      +       * The aggregate processing time spent in this node.
      +       * 
      + * + * int64 processing_time = 9; + */ + public long getProcessingTime() { + return processingTime_; + } + /** + *
      +       * The aggregate processing time spent in this node.
      +       * 
      + * + * int64 processing_time = 9; + */ + public Builder setProcessingTime(long value) { + + processingTime_ = value; + onChanged(); + return this; + } + /** + *
      +       * The aggregate processing time spent in this node.
      +       * 
      + * + * int64 processing_time = 9; + */ + public Builder clearProcessingTime() { + + processingTime_ = 0L; + onChanged(); + return this; + } + + private boolean recordMetrics_ ; + /** + *
      +       * An indication whether this node records metrics about produced and
      +       * consumed elements.
      +       * 
      + * + * bool record_metrics = 10; + */ + public boolean getRecordMetrics() { + return recordMetrics_; + } + /** + *
      +       * An indication whether this node records metrics about produced and
      +       * consumed elements.
      +       * 
      + * + * bool record_metrics = 10; + */ + public Builder setRecordMetrics(boolean value) { + + recordMetrics_ = value; + onChanged(); + return this; + } + /** + *
      +       * An indication whether this node records metrics about produced and
      +       * consumed elements.
      +       * 
      + * + * bool record_metrics = 10; + */ + public Builder clearRecordMetrics() { + + recordMetrics_ = false; + onChanged(); + return this; + } + + private java.util.List parameters_ = + java.util.Collections.emptyList(); + private void ensureParametersIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + parameters_ = new java.util.ArrayList(parameters_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node.Parameter, org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder, org.tensorflow.proto.data.model.ModelProto.Node.ParameterOrBuilder> parametersBuilder_; + + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public java.util.List getParametersList() { + if (parametersBuilder_ == null) { + return java.util.Collections.unmodifiableList(parameters_); + } else { + return parametersBuilder_.getMessageList(); + } + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public int getParametersCount() { + if (parametersBuilder_ == null) { + return parameters_.size(); + } else { + return parametersBuilder_.getCount(); + } + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter getParameters(int index) { + if (parametersBuilder_ == null) { + return parameters_.get(index); + } else { + return parametersBuilder_.getMessage(index); + } + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder setParameters( + int index, org.tensorflow.proto.data.model.ModelProto.Node.Parameter value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParametersIsMutable(); + parameters_.set(index, value); + onChanged(); + } else { + parametersBuilder_.setMessage(index, value); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder setParameters( + int index, org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder builderForValue) { + if (parametersBuilder_ == null) { + ensureParametersIsMutable(); + parameters_.set(index, builderForValue.build()); + onChanged(); + } else { + parametersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder addParameters(org.tensorflow.proto.data.model.ModelProto.Node.Parameter value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParametersIsMutable(); + parameters_.add(value); + onChanged(); + } else { + parametersBuilder_.addMessage(value); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder addParameters( + int index, org.tensorflow.proto.data.model.ModelProto.Node.Parameter value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParametersIsMutable(); + parameters_.add(index, value); + onChanged(); + } else { + parametersBuilder_.addMessage(index, value); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder addParameters( + org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder builderForValue) { + if (parametersBuilder_ == null) { + ensureParametersIsMutable(); + parameters_.add(builderForValue.build()); + onChanged(); + } else { + parametersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder addParameters( + int index, org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder builderForValue) { + if (parametersBuilder_ == null) { + ensureParametersIsMutable(); + parameters_.add(index, builderForValue.build()); + onChanged(); + } else { + parametersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder addAllParameters( + java.lang.Iterable values) { + if (parametersBuilder_ == null) { + ensureParametersIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, parameters_); + onChanged(); + } else { + parametersBuilder_.addAllMessages(values); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + parametersBuilder_.clear(); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public Builder removeParameters(int index) { + if (parametersBuilder_ == null) { + ensureParametersIsMutable(); + parameters_.remove(index); + onChanged(); + } else { + parametersBuilder_.remove(index); + } + return this; + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder getParametersBuilder( + int index) { + return getParametersFieldBuilder().getBuilder(index); + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.ParameterOrBuilder getParametersOrBuilder( + int index) { + if (parametersBuilder_ == null) { + return parameters_.get(index); } else { + return parametersBuilder_.getMessageOrBuilder(index); + } + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public java.util.List + getParametersOrBuilderList() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(parameters_); + } + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder addParametersBuilder() { + return getParametersFieldBuilder().addBuilder( + org.tensorflow.proto.data.model.ModelProto.Node.Parameter.getDefaultInstance()); + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder addParametersBuilder( + int index) { + return getParametersFieldBuilder().addBuilder( + index, org.tensorflow.proto.data.model.ModelProto.Node.Parameter.getDefaultInstance()); + } + /** + *
      +       * Parameters of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11; + */ + public java.util.List + getParametersBuilderList() { + return getParametersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node.Parameter, org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder, org.tensorflow.proto.data.model.ModelProto.Node.ParameterOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node.Parameter, org.tensorflow.proto.data.model.ModelProto.Node.Parameter.Builder, org.tensorflow.proto.data.model.ModelProto.Node.ParameterOrBuilder>( + parameters_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + private double inputProcessingTimeSum_ ; + /** + *
      +       * Statistic of inputs processing time history.
      +       * 
      + * + * double input_processing_time_sum = 12; + */ + public double getInputProcessingTimeSum() { + return inputProcessingTimeSum_; + } + /** + *
      +       * Statistic of inputs processing time history.
      +       * 
      + * + * double input_processing_time_sum = 12; + */ + public Builder setInputProcessingTimeSum(double value) { + + inputProcessingTimeSum_ = value; + onChanged(); + return this; + } + /** + *
      +       * Statistic of inputs processing time history.
      +       * 
      + * + * double input_processing_time_sum = 12; + */ + public Builder clearInputProcessingTimeSum() { + + inputProcessingTimeSum_ = 0D; + onChanged(); + return this; + } + + private long inputProcessingTimeCount_ ; + /** + * int64 input_processing_time_count = 13; + */ + public long getInputProcessingTimeCount() { + return inputProcessingTimeCount_; + } + /** + * int64 input_processing_time_count = 13; + */ + public Builder setInputProcessingTimeCount(long value) { + + inputProcessingTimeCount_ = value; + onChanged(); + return this; + } + /** + * int64 input_processing_time_count = 13; + */ + public Builder clearInputProcessingTimeCount() { + + inputProcessingTimeCount_ = 0L; + onChanged(); + return this; + } + + private java.util.List inputs_ = + java.util.Collections.emptyList(); + private void ensureInputsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + inputs_ = new java.util.ArrayList(inputs_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder> inputsBuilder_; + + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public java.util.List getInputsList() { + if (inputsBuilder_ == null) { + return java.util.Collections.unmodifiableList(inputs_); + } else { + return inputsBuilder_.getMessageList(); + } + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public int getInputsCount() { + if (inputsBuilder_ == null) { + return inputs_.size(); + } else { + return inputsBuilder_.getCount(); + } + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public org.tensorflow.proto.data.model.ModelProto.Node getInputs(int index) { + if (inputsBuilder_ == null) { + return inputs_.get(index); + } else { + return inputsBuilder_.getMessage(index); + } + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder setInputs( + int index, org.tensorflow.proto.data.model.ModelProto.Node value) { + if (inputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInputsIsMutable(); + inputs_.set(index, value); + onChanged(); + } else { + inputsBuilder_.setMessage(index, value); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder setInputs( + int index, org.tensorflow.proto.data.model.ModelProto.Node.Builder builderForValue) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + inputs_.set(index, builderForValue.build()); + onChanged(); + } else { + inputsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder addInputs(org.tensorflow.proto.data.model.ModelProto.Node value) { + if (inputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInputsIsMutable(); + inputs_.add(value); + onChanged(); + } else { + inputsBuilder_.addMessage(value); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder addInputs( + int index, org.tensorflow.proto.data.model.ModelProto.Node value) { + if (inputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInputsIsMutable(); + inputs_.add(index, value); + onChanged(); + } else { + inputsBuilder_.addMessage(index, value); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder addInputs( + org.tensorflow.proto.data.model.ModelProto.Node.Builder builderForValue) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + inputs_.add(builderForValue.build()); + onChanged(); + } else { + inputsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder addInputs( + int index, org.tensorflow.proto.data.model.ModelProto.Node.Builder builderForValue) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + inputs_.add(index, builderForValue.build()); + onChanged(); + } else { + inputsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder addAllInputs( + java.lang.Iterable values) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, inputs_); + onChanged(); + } else { + inputsBuilder_.addAllMessages(values); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder clearInputs() { + if (inputsBuilder_ == null) { + inputs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + inputsBuilder_.clear(); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public Builder removeInputs(int index) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + inputs_.remove(index); + onChanged(); + } else { + inputsBuilder_.remove(index); + } + return this; + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Builder getInputsBuilder( + int index) { + return getInputsFieldBuilder().getBuilder(index); + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getInputsOrBuilder( + int index) { + if (inputsBuilder_ == null) { + return inputs_.get(index); } else { + return inputsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public java.util.List + getInputsOrBuilderList() { + if (inputsBuilder_ != null) { + return inputsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(inputs_); + } + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Builder addInputsBuilder() { + return getInputsFieldBuilder().addBuilder( + org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance()); + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Builder addInputsBuilder( + int index) { + return getInputsFieldBuilder().addBuilder( + index, org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance()); + } + /** + *
      +       * Inputs of this node.
      +       * 
      + * + * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + */ + public java.util.List + getInputsBuilderList() { + return getInputsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder> + getInputsFieldBuilder() { + if (inputsBuilder_ == null) { + inputsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder>( + inputs_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + inputs_ = null; + } + return inputsBuilder_; + } + + private int nodeClass_ = 0; + /** + *
      +       * Class of this node.
      +       * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + public int getNodeClassValue() { + return nodeClass_; + } + /** + *
      +       * Class of this node.
      +       * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + public Builder setNodeClassValue(int value) { + nodeClass_ = value; + onChanged(); + return this; + } + /** + *
      +       * Class of this node.
      +       * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + public org.tensorflow.proto.data.model.NodeClass getNodeClass() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.model.NodeClass result = org.tensorflow.proto.data.model.NodeClass.valueOf(nodeClass_); + return result == null ? org.tensorflow.proto.data.model.NodeClass.UNRECOGNIZED : result; + } + /** + *
      +       * Class of this node.
      +       * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + public Builder setNodeClass(org.tensorflow.proto.data.model.NodeClass value) { + if (value == null) { + throw new NullPointerException(); + } + + nodeClass_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
      +       * Class of this node.
      +       * 
      + * + * .tensorflow.data.model.NodeClass node_class = 15; + */ + public Builder clearNodeClass() { + + nodeClass_ = 0; + onChanged(); + return this; + } + + private double ratio_ ; + /** + *
      +       * Ratio of input to output elements. This is only used by KNOWN_RATIO and
      +       * ASYNC_KNOWN_RATIO nodes.
      +       * 
      + * + * double ratio = 16; + */ + public double getRatio() { + return ratio_; + } + /** + *
      +       * Ratio of input to output elements. This is only used by KNOWN_RATIO and
      +       * ASYNC_KNOWN_RATIO nodes.
      +       * 
      + * + * double ratio = 16; + */ + public Builder setRatio(double value) { + + ratio_ = value; + onChanged(); + return this; + } + /** + *
      +       * Ratio of input to output elements. This is only used by KNOWN_RATIO and
      +       * ASYNC_KNOWN_RATIO nodes.
      +       * 
      + * + * double ratio = 16; + */ + public Builder clearRatio() { + + ratio_ = 0D; + onChanged(); + return this; + } + + private double memoryRatio_ ; + /** + *
      +       * Ratio identifies how many parallelism calls are introduced by one
      +       * buffered element. This is only used by ASYNC_KNOWN_RATIO nodes.
      +       * 
      + * + * double memory_ratio = 17; + */ + public double getMemoryRatio() { + return memoryRatio_; + } + /** + *
      +       * Ratio identifies how many parallelism calls are introduced by one
      +       * buffered element. This is only used by ASYNC_KNOWN_RATIO nodes.
      +       * 
      + * + * double memory_ratio = 17; + */ + public Builder setMemoryRatio(double value) { + + memoryRatio_ = value; + onChanged(); + return this; + } + /** + *
      +       * Ratio identifies how many parallelism calls are introduced by one
      +       * buffered element. This is only used by ASYNC_KNOWN_RATIO nodes.
      +       * 
      + * + * double memory_ratio = 17; + */ + public Builder clearMemoryRatio() { + + memoryRatio_ = 0D; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.model.ModelProto.Node) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.model.ModelProto.Node) + private static final org.tensorflow.proto.data.model.ModelProto.Node DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.model.ModelProto.Node(); + } + + public static org.tensorflow.proto.data.model.ModelProto.Node getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Node parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Node(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.Node getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface OptimizationParamsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.model.ModelProto.OptimizationParams) + com.google.protobuf.MessageOrBuilder { + + /** + *
      +     * Algorithm used for autotuning optimization.
      +     * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + int getAlgorithmValue(); + /** + *
      +     * Algorithm used for autotuning optimization.
      +     * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + org.tensorflow.proto.data.model.AutotuneAlgorithm getAlgorithm(); + + /** + *
      +     * Number of available logical threads.
      +     * 
      + * + * int64 cpu_budget = 2; + */ + long getCpuBudget(); + + /** + *
      +     * Amount of available memory in bytes.
      +     * 
      + * + * int64 ram_budget = 3; + */ + long getRamBudget(); + + /** + *
      +     * Time between two consecutive `GetNext` calls to the iterator represented
      +     * by the output node.
      +     * 
      + * + * double model_input_time = 4; + */ + double getModelInputTime(); + } + /** + *
      +   * Contains parameters of the model autotuning optimization.
      +   * 
      + * + * Protobuf type {@code tensorflow.data.model.ModelProto.OptimizationParams} + */ + public static final class OptimizationParams extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.model.ModelProto.OptimizationParams) + OptimizationParamsOrBuilder { + private static final long serialVersionUID = 0L; + // Use OptimizationParams.newBuilder() to construct. + private OptimizationParams(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private OptimizationParams() { + algorithm_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new OptimizationParams(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OptimizationParams( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + int rawValue = input.readEnum(); + + algorithm_ = rawValue; + break; + } + case 16: { + + cpuBudget_ = input.readInt64(); + break; + } + case 24: { + + ramBudget_ = input.readInt64(); + break; + } + case 33: { + + modelInputTime_ = input.readDouble(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_OptimizationParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.model.ModelProto.OptimizationParams.class, org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder.class); + } + + public static final int ALGORITHM_FIELD_NUMBER = 1; + private int algorithm_; + /** + *
      +     * Algorithm used for autotuning optimization.
      +     * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + public int getAlgorithmValue() { + return algorithm_; + } + /** + *
      +     * Algorithm used for autotuning optimization.
      +     * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + public org.tensorflow.proto.data.model.AutotuneAlgorithm getAlgorithm() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.model.AutotuneAlgorithm result = org.tensorflow.proto.data.model.AutotuneAlgorithm.valueOf(algorithm_); + return result == null ? org.tensorflow.proto.data.model.AutotuneAlgorithm.UNRECOGNIZED : result; + } + + public static final int CPU_BUDGET_FIELD_NUMBER = 2; + private long cpuBudget_; + /** + *
      +     * Number of available logical threads.
      +     * 
      + * + * int64 cpu_budget = 2; + */ + public long getCpuBudget() { + return cpuBudget_; + } + + public static final int RAM_BUDGET_FIELD_NUMBER = 3; + private long ramBudget_; + /** + *
      +     * Amount of available memory in bytes.
      +     * 
      + * + * int64 ram_budget = 3; + */ + public long getRamBudget() { + return ramBudget_; + } + + public static final int MODEL_INPUT_TIME_FIELD_NUMBER = 4; + private double modelInputTime_; + /** + *
      +     * Time between two consecutive `GetNext` calls to the iterator represented
      +     * by the output node.
      +     * 
      + * + * double model_input_time = 4; + */ + public double getModelInputTime() { + return modelInputTime_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (algorithm_ != org.tensorflow.proto.data.model.AutotuneAlgorithm.HILL_CLIMB.getNumber()) { + output.writeEnum(1, algorithm_); + } + if (cpuBudget_ != 0L) { + output.writeInt64(2, cpuBudget_); + } + if (ramBudget_ != 0L) { + output.writeInt64(3, ramBudget_); + } + if (modelInputTime_ != 0D) { + output.writeDouble(4, modelInputTime_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (algorithm_ != org.tensorflow.proto.data.model.AutotuneAlgorithm.HILL_CLIMB.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, algorithm_); + } + if (cpuBudget_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, cpuBudget_); + } + if (ramBudget_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, ramBudget_); + } + if (modelInputTime_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(4, modelInputTime_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.model.ModelProto.OptimizationParams)) { + return super.equals(obj); + } + org.tensorflow.proto.data.model.ModelProto.OptimizationParams other = (org.tensorflow.proto.data.model.ModelProto.OptimizationParams) obj; + + if (algorithm_ != other.algorithm_) return false; + if (getCpuBudget() + != other.getCpuBudget()) return false; + if (getRamBudget() + != other.getRamBudget()) return false; + if (java.lang.Double.doubleToLongBits(getModelInputTime()) + != java.lang.Double.doubleToLongBits( + other.getModelInputTime())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ALGORITHM_FIELD_NUMBER; + hash = (53 * hash) + algorithm_; + hash = (37 * hash) + CPU_BUDGET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getCpuBudget()); + hash = (37 * hash) + RAM_BUDGET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getRamBudget()); + hash = (37 * hash) + MODEL_INPUT_TIME_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getModelInputTime())); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.model.ModelProto.OptimizationParams prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +     * Contains parameters of the model autotuning optimization.
      +     * 
      + * + * Protobuf type {@code tensorflow.data.model.ModelProto.OptimizationParams} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.model.ModelProto.OptimizationParams) + org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_OptimizationParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.model.ModelProto.OptimizationParams.class, org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder.class); + } + + // Construct using org.tensorflow.proto.data.model.ModelProto.OptimizationParams.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + algorithm_ = 0; + + cpuBudget_ = 0L; + + ramBudget_ = 0L; + + modelInputTime_ = 0D; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.OptimizationParams getDefaultInstanceForType() { + return org.tensorflow.proto.data.model.ModelProto.OptimizationParams.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.OptimizationParams build() { + org.tensorflow.proto.data.model.ModelProto.OptimizationParams result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.OptimizationParams buildPartial() { + org.tensorflow.proto.data.model.ModelProto.OptimizationParams result = new org.tensorflow.proto.data.model.ModelProto.OptimizationParams(this); + result.algorithm_ = algorithm_; + result.cpuBudget_ = cpuBudget_; + result.ramBudget_ = ramBudget_; + result.modelInputTime_ = modelInputTime_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.model.ModelProto.OptimizationParams) { + return mergeFrom((org.tensorflow.proto.data.model.ModelProto.OptimizationParams)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.model.ModelProto.OptimizationParams other) { + if (other == org.tensorflow.proto.data.model.ModelProto.OptimizationParams.getDefaultInstance()) return this; + if (other.algorithm_ != 0) { + setAlgorithmValue(other.getAlgorithmValue()); + } + if (other.getCpuBudget() != 0L) { + setCpuBudget(other.getCpuBudget()); + } + if (other.getRamBudget() != 0L) { + setRamBudget(other.getRamBudget()); + } + if (other.getModelInputTime() != 0D) { + setModelInputTime(other.getModelInputTime()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.model.ModelProto.OptimizationParams parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.model.ModelProto.OptimizationParams) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int algorithm_ = 0; + /** + *
      +       * Algorithm used for autotuning optimization.
      +       * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + public int getAlgorithmValue() { + return algorithm_; + } + /** + *
      +       * Algorithm used for autotuning optimization.
      +       * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + public Builder setAlgorithmValue(int value) { + algorithm_ = value; + onChanged(); + return this; + } + /** + *
      +       * Algorithm used for autotuning optimization.
      +       * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + public org.tensorflow.proto.data.model.AutotuneAlgorithm getAlgorithm() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.model.AutotuneAlgorithm result = org.tensorflow.proto.data.model.AutotuneAlgorithm.valueOf(algorithm_); + return result == null ? org.tensorflow.proto.data.model.AutotuneAlgorithm.UNRECOGNIZED : result; + } + /** + *
      +       * Algorithm used for autotuning optimization.
      +       * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + public Builder setAlgorithm(org.tensorflow.proto.data.model.AutotuneAlgorithm value) { + if (value == null) { + throw new NullPointerException(); + } + + algorithm_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
      +       * Algorithm used for autotuning optimization.
      +       * 
      + * + * .tensorflow.data.model.AutotuneAlgorithm algorithm = 1; + */ + public Builder clearAlgorithm() { + + algorithm_ = 0; + onChanged(); + return this; + } + + private long cpuBudget_ ; + /** + *
      +       * Number of available logical threads.
      +       * 
      + * + * int64 cpu_budget = 2; + */ + public long getCpuBudget() { + return cpuBudget_; + } + /** + *
      +       * Number of available logical threads.
      +       * 
      + * + * int64 cpu_budget = 2; + */ + public Builder setCpuBudget(long value) { + + cpuBudget_ = value; + onChanged(); + return this; + } + /** + *
      +       * Number of available logical threads.
      +       * 
      + * + * int64 cpu_budget = 2; + */ + public Builder clearCpuBudget() { + + cpuBudget_ = 0L; + onChanged(); + return this; + } + + private long ramBudget_ ; + /** + *
      +       * Amount of available memory in bytes.
      +       * 
      + * + * int64 ram_budget = 3; + */ + public long getRamBudget() { + return ramBudget_; + } + /** + *
      +       * Amount of available memory in bytes.
      +       * 
      + * + * int64 ram_budget = 3; + */ + public Builder setRamBudget(long value) { + + ramBudget_ = value; + onChanged(); + return this; + } + /** + *
      +       * Amount of available memory in bytes.
      +       * 
      + * + * int64 ram_budget = 3; + */ + public Builder clearRamBudget() { + + ramBudget_ = 0L; + onChanged(); + return this; + } + + private double modelInputTime_ ; + /** + *
      +       * Time between two consecutive `GetNext` calls to the iterator represented
      +       * by the output node.
      +       * 
      + * + * double model_input_time = 4; + */ + public double getModelInputTime() { + return modelInputTime_; + } + /** + *
      +       * Time between two consecutive `GetNext` calls to the iterator represented
      +       * by the output node.
      +       * 
      + * + * double model_input_time = 4; + */ + public Builder setModelInputTime(double value) { + + modelInputTime_ = value; + onChanged(); + return this; + } + /** + *
      +       * Time between two consecutive `GetNext` calls to the iterator represented
      +       * by the output node.
      +       * 
      + * + * double model_input_time = 4; + */ + public Builder clearModelInputTime() { + + modelInputTime_ = 0D; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.model.ModelProto.OptimizationParams) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.model.ModelProto.OptimizationParams) + private static final org.tensorflow.proto.data.model.ModelProto.OptimizationParams DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.model.ModelProto.OptimizationParams(); + } + + public static org.tensorflow.proto.data.model.ModelProto.OptimizationParams getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OptimizationParams parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OptimizationParams(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto.OptimizationParams getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public static final int OUTPUT_FIELD_NUMBER = 1; + private org.tensorflow.proto.data.model.ModelProto.Node output_; + /** + *
      +   * Output node of this model.
      +   * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public boolean hasOutput() { + return output_ != null; + } + /** + *
      +   * Output node of this model.
      +   * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public org.tensorflow.proto.data.model.ModelProto.Node getOutput() { + return output_ == null ? org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance() : output_; + } + /** + *
      +   * Output node of this model.
      +   * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getOutputOrBuilder() { + return getOutput(); + } + + public static final int ID_COUNTER_FIELD_NUMBER = 2; + private long idCounter_; + /** + *
      +   * Counter for node IDs of this model.
      +   * 
      + * + * int64 id_counter = 2; + */ + public long getIdCounter() { + return idCounter_; + } + + public static final int COLLECT_RESOURCE_USAGE_FIELD_NUMBER = 3; + private boolean collectResourceUsage_; + /** + *
      +   * Indicates whether the modeling framework should collect resource usage,
      +   * e.g. CPU, memory.
      +   * 
      + * + * bool collect_resource_usage = 3; + */ + public boolean getCollectResourceUsage() { + return collectResourceUsage_; + } + + public static final int OPTIMIZATION_PARAMS_FIELD_NUMBER = 4; + private org.tensorflow.proto.data.model.ModelProto.OptimizationParams optimizationParams_; + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public boolean hasOptimizationParams() { + return optimizationParams_ != null; + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public org.tensorflow.proto.data.model.ModelProto.OptimizationParams getOptimizationParams() { + return optimizationParams_ == null ? org.tensorflow.proto.data.model.ModelProto.OptimizationParams.getDefaultInstance() : optimizationParams_; + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder getOptimizationParamsOrBuilder() { + return getOptimizationParams(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (output_ != null) { + output.writeMessage(1, getOutput()); + } + if (idCounter_ != 0L) { + output.writeInt64(2, idCounter_); + } + if (collectResourceUsage_ != false) { + output.writeBool(3, collectResourceUsage_); + } + if (optimizationParams_ != null) { + output.writeMessage(4, getOptimizationParams()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (output_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getOutput()); + } + if (idCounter_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, idCounter_); + } + if (collectResourceUsage_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, collectResourceUsage_); + } + if (optimizationParams_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getOptimizationParams()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.model.ModelProto)) { + return super.equals(obj); + } + org.tensorflow.proto.data.model.ModelProto other = (org.tensorflow.proto.data.model.ModelProto) obj; + + if (hasOutput() != other.hasOutput()) return false; + if (hasOutput()) { + if (!getOutput() + .equals(other.getOutput())) return false; + } + if (getIdCounter() + != other.getIdCounter()) return false; + if (getCollectResourceUsage() + != other.getCollectResourceUsage()) return false; + if (hasOptimizationParams() != other.hasOptimizationParams()) return false; + if (hasOptimizationParams()) { + if (!getOptimizationParams() + .equals(other.getOptimizationParams())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOutput()) { + hash = (37 * hash) + OUTPUT_FIELD_NUMBER; + hash = (53 * hash) + getOutput().hashCode(); + } + hash = (37 * hash) + ID_COUNTER_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getIdCounter()); + hash = (37 * hash) + COLLECT_RESOURCE_USAGE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getCollectResourceUsage()); + if (hasOptimizationParams()) { + hash = (37 * hash) + OPTIMIZATION_PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getOptimizationParams().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.model.ModelProto parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.model.ModelProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.model.ModelProto prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +   * Protocol buffer representing the data used by the autotuning modeling
      +   * framework.
      +   * 
      + * + * Protobuf type {@code tensorflow.data.model.ModelProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.model.ModelProto) + org.tensorflow.proto.data.model.ModelProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.model.ModelProto.class, org.tensorflow.proto.data.model.ModelProto.Builder.class); + } + + // Construct using org.tensorflow.proto.data.model.ModelProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (outputBuilder_ == null) { + output_ = null; + } else { + output_ = null; + outputBuilder_ = null; + } + idCounter_ = 0L; + + collectResourceUsage_ = false; + + if (optimizationParamsBuilder_ == null) { + optimizationParams_ = null; + } else { + optimizationParams_ = null; + optimizationParamsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto getDefaultInstanceForType() { + return org.tensorflow.proto.data.model.ModelProto.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto build() { + org.tensorflow.proto.data.model.ModelProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto buildPartial() { + org.tensorflow.proto.data.model.ModelProto result = new org.tensorflow.proto.data.model.ModelProto(this); + if (outputBuilder_ == null) { + result.output_ = output_; + } else { + result.output_ = outputBuilder_.build(); + } + result.idCounter_ = idCounter_; + result.collectResourceUsage_ = collectResourceUsage_; + if (optimizationParamsBuilder_ == null) { + result.optimizationParams_ = optimizationParams_; + } else { + result.optimizationParams_ = optimizationParamsBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.model.ModelProto) { + return mergeFrom((org.tensorflow.proto.data.model.ModelProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.model.ModelProto other) { + if (other == org.tensorflow.proto.data.model.ModelProto.getDefaultInstance()) return this; + if (other.hasOutput()) { + mergeOutput(other.getOutput()); + } + if (other.getIdCounter() != 0L) { + setIdCounter(other.getIdCounter()); + } + if (other.getCollectResourceUsage() != false) { + setCollectResourceUsage(other.getCollectResourceUsage()); + } + if (other.hasOptimizationParams()) { + mergeOptimizationParams(other.getOptimizationParams()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.model.ModelProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.model.ModelProto) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private org.tensorflow.proto.data.model.ModelProto.Node output_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder> outputBuilder_; + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public boolean hasOutput() { + return outputBuilder_ != null || output_ != null; + } + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public org.tensorflow.proto.data.model.ModelProto.Node getOutput() { + if (outputBuilder_ == null) { + return output_ == null ? org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance() : output_; + } else { + return outputBuilder_.getMessage(); + } + } + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public Builder setOutput(org.tensorflow.proto.data.model.ModelProto.Node value) { + if (outputBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + output_ = value; + onChanged(); + } else { + outputBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public Builder setOutput( + org.tensorflow.proto.data.model.ModelProto.Node.Builder builderForValue) { + if (outputBuilder_ == null) { + output_ = builderForValue.build(); + onChanged(); + } else { + outputBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public Builder mergeOutput(org.tensorflow.proto.data.model.ModelProto.Node value) { + if (outputBuilder_ == null) { + if (output_ != null) { + output_ = + org.tensorflow.proto.data.model.ModelProto.Node.newBuilder(output_).mergeFrom(value).buildPartial(); + } else { + output_ = value; + } + onChanged(); + } else { + outputBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public Builder clearOutput() { + if (outputBuilder_ == null) { + output_ = null; + onChanged(); + } else { + output_ = null; + outputBuilder_ = null; + } + + return this; + } + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public org.tensorflow.proto.data.model.ModelProto.Node.Builder getOutputBuilder() { + + onChanged(); + return getOutputFieldBuilder().getBuilder(); + } + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + public org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getOutputOrBuilder() { + if (outputBuilder_ != null) { + return outputBuilder_.getMessageOrBuilder(); + } else { + return output_ == null ? + org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance() : output_; + } + } + /** + *
      +     * Output node of this model.
      +     * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder> + getOutputFieldBuilder() { + if (outputBuilder_ == null) { + outputBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder>( + getOutput(), + getParentForChildren(), + isClean()); + output_ = null; + } + return outputBuilder_; + } + + private long idCounter_ ; + /** + *
      +     * Counter for node IDs of this model.
      +     * 
      + * + * int64 id_counter = 2; + */ + public long getIdCounter() { + return idCounter_; + } + /** + *
      +     * Counter for node IDs of this model.
      +     * 
      + * + * int64 id_counter = 2; + */ + public Builder setIdCounter(long value) { + + idCounter_ = value; + onChanged(); + return this; + } + /** + *
      +     * Counter for node IDs of this model.
      +     * 
      + * + * int64 id_counter = 2; + */ + public Builder clearIdCounter() { + + idCounter_ = 0L; + onChanged(); + return this; + } + + private boolean collectResourceUsage_ ; + /** + *
      +     * Indicates whether the modeling framework should collect resource usage,
      +     * e.g. CPU, memory.
      +     * 
      + * + * bool collect_resource_usage = 3; + */ + public boolean getCollectResourceUsage() { + return collectResourceUsage_; + } + /** + *
      +     * Indicates whether the modeling framework should collect resource usage,
      +     * e.g. CPU, memory.
      +     * 
      + * + * bool collect_resource_usage = 3; + */ + public Builder setCollectResourceUsage(boolean value) { + + collectResourceUsage_ = value; + onChanged(); + return this; + } + /** + *
      +     * Indicates whether the modeling framework should collect resource usage,
      +     * e.g. CPU, memory.
      +     * 
      + * + * bool collect_resource_usage = 3; + */ + public Builder clearCollectResourceUsage() { + + collectResourceUsage_ = false; + onChanged(); + return this; + } + + private org.tensorflow.proto.data.model.ModelProto.OptimizationParams optimizationParams_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.OptimizationParams, org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder, org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder> optimizationParamsBuilder_; + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public boolean hasOptimizationParams() { + return optimizationParamsBuilder_ != null || optimizationParams_ != null; + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public org.tensorflow.proto.data.model.ModelProto.OptimizationParams getOptimizationParams() { + if (optimizationParamsBuilder_ == null) { + return optimizationParams_ == null ? org.tensorflow.proto.data.model.ModelProto.OptimizationParams.getDefaultInstance() : optimizationParams_; + } else { + return optimizationParamsBuilder_.getMessage(); + } + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public Builder setOptimizationParams(org.tensorflow.proto.data.model.ModelProto.OptimizationParams value) { + if (optimizationParamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + optimizationParams_ = value; + onChanged(); + } else { + optimizationParamsBuilder_.setMessage(value); + } + + return this; + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public Builder setOptimizationParams( + org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder builderForValue) { + if (optimizationParamsBuilder_ == null) { + optimizationParams_ = builderForValue.build(); + onChanged(); + } else { + optimizationParamsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public Builder mergeOptimizationParams(org.tensorflow.proto.data.model.ModelProto.OptimizationParams value) { + if (optimizationParamsBuilder_ == null) { + if (optimizationParams_ != null) { + optimizationParams_ = + org.tensorflow.proto.data.model.ModelProto.OptimizationParams.newBuilder(optimizationParams_).mergeFrom(value).buildPartial(); + } else { + optimizationParams_ = value; + } + onChanged(); + } else { + optimizationParamsBuilder_.mergeFrom(value); + } + + return this; + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public Builder clearOptimizationParams() { + if (optimizationParamsBuilder_ == null) { + optimizationParams_ = null; + onChanged(); + } else { + optimizationParams_ = null; + optimizationParamsBuilder_ = null; + } + + return this; + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder getOptimizationParamsBuilder() { + + onChanged(); + return getOptimizationParamsFieldBuilder().getBuilder(); + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + public org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder getOptimizationParamsOrBuilder() { + if (optimizationParamsBuilder_ != null) { + return optimizationParamsBuilder_.getMessageOrBuilder(); + } else { + return optimizationParams_ == null ? + org.tensorflow.proto.data.model.ModelProto.OptimizationParams.getDefaultInstance() : optimizationParams_; + } + } + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.OptimizationParams, org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder, org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder> + getOptimizationParamsFieldBuilder() { + if (optimizationParamsBuilder_ == null) { + optimizationParamsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.model.ModelProto.OptimizationParams, org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder, org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder>( + getOptimizationParams(), + getParentForChildren(), + isClean()); + optimizationParams_ = null; + } + return optimizationParamsBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.model.ModelProto) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.model.ModelProto) + private static final org.tensorflow.proto.data.model.ModelProto DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.model.ModelProto(); + } + + public static org.tensorflow.proto.data.model.ModelProto getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ModelProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ModelProto(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.model.ModelProto getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtoOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtoOrBuilder.java new file mode 100644 index 00000000000..956471c72a0 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtoOrBuilder.java @@ -0,0 +1,66 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/model.proto + +package org.tensorflow.proto.data.model; + +public interface ModelProtoOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.model.ModelProto) + com.google.protobuf.MessageOrBuilder { + + /** + *
      +   * Output node of this model.
      +   * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + boolean hasOutput(); + /** + *
      +   * Output node of this model.
      +   * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + org.tensorflow.proto.data.model.ModelProto.Node getOutput(); + /** + *
      +   * Output node of this model.
      +   * 
      + * + * .tensorflow.data.model.ModelProto.Node output = 1; + */ + org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getOutputOrBuilder(); + + /** + *
      +   * Counter for node IDs of this model.
      +   * 
      + * + * int64 id_counter = 2; + */ + long getIdCounter(); + + /** + *
      +   * Indicates whether the modeling framework should collect resource usage,
      +   * e.g. CPU, memory.
      +   * 
      + * + * bool collect_resource_usage = 3; + */ + boolean getCollectResourceUsage(); + + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + boolean hasOptimizationParams(); + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + org.tensorflow.proto.data.model.ModelProto.OptimizationParams getOptimizationParams(); + /** + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + */ + org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder getOptimizationParamsOrBuilder(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java new file mode 100644 index 00000000000..61b8c103012 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java @@ -0,0 +1,111 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/model.proto + +package org.tensorflow.proto.data.model; + +public final class ModelProtos { + private ModelProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_model_ModelProto_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_model_ModelProto_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_model_ModelProto_Node_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_model_ModelProto_Node_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_model_ModelProto_Node_Parameter_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_model_ModelProto_Node_Parameter_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_model_ModelProto_OptimizationParams_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n%tensorflow/core/framework/model.proto\022" + + "\025tensorflow.data.model\"\313\007\n\nModelProto\0226\n" + + "\006output\030\001 \001(\0132&.tensorflow.data.model.Mo" + + "delProto.Node\022\022\n\nid_counter\030\002 \001(\003\022\036\n\026col" + + "lect_resource_usage\030\003 \001(\010\022Q\n\023optimizatio" + + "n_params\030\004 \001(\01324.tensorflow.data.model.M" + + "odelProto.OptimizationParams\032\347\004\n\004Node\022\n\n" + + "\002id\030\001 \001(\003\022\014\n\004name\030\002 \001(\t\022\020\n\010autotune\030\003 \001(" + + "\010\022\026\n\016buffered_bytes\030\004 \001(\003\022\031\n\021buffered_el" + + "ements\030\005 \001(\003\022\026\n\016bytes_consumed\030\006 \001(\003\022\026\n\016" + + "bytes_produced\030\007 \001(\003\022\024\n\014num_elements\030\010 \001" + + "(\003\022\027\n\017processing_time\030\t \001(\003\022\026\n\016record_me" + + "trics\030\n \001(\010\022D\n\nparameters\030\013 \003(\01320.tensor" + + "flow.data.model.ModelProto.Node.Paramete" + + "r\022!\n\031input_processing_time_sum\030\014 \001(\001\022#\n\033" + + "input_processing_time_count\030\r \001(\003\0226\n\006inp" + + "uts\030\016 \003(\0132&.tensorflow.data.model.ModelP" + + "roto.Node\0224\n\nnode_class\030\017 \001(\0162 .tensorfl" + + "ow.data.model.NodeClass\022\r\n\005ratio\030\020 \001(\001\022\024" + + "\n\014memory_ratio\030\021 \001(\001\032h\n\tParameter\022\014\n\004nam" + + "e\030\001 \001(\t\022\r\n\005value\030\002 \001(\001\022\023\n\013state_value\030\003 " + + "\001(\001\022\013\n\003min\030\004 \001(\001\022\013\n\003max\030\005 \001(\001\022\017\n\007tunable" + + "\030\006 \001(\010\032\223\001\n\022OptimizationParams\022;\n\talgorit" + + "hm\030\001 \001(\0162(.tensorflow.data.model.Autotun" + + "eAlgorithm\022\022\n\ncpu_budget\030\002 \001(\003\022\022\n\nram_bu" + + "dget\030\003 \001(\003\022\030\n\020model_input_time\030\004 \001(\001*\203\001\n" + + "\tNodeClass\022\013\n\007UNKNOWN\020\000\022\023\n\017INTERLEAVE_MA" + + "NY\020\001\022\031\n\025ASYNC_INTERLEAVE_MANY\020\002\022\017\n\013KNOWN" + + "_RATIO\020\003\022\025\n\021ASYNC_KNOWN_RATIO\020\004\022\021\n\rUNKNO" + + "WN_RATIO\020\005*9\n\021AutotuneAlgorithm\022\016\n\nHILL_" + + "CLIMB\020\000\022\024\n\020GRADIENT_DESCENT\020\001B3\n\037org.ten" + + "sorflow.proto.data.modelB\013ModelProtosP\001\370" + + "\001\001b\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_tensorflow_data_model_ModelProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_data_model_ModelProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_model_ModelProto_descriptor, + new java.lang.String[] { "Output", "IdCounter", "CollectResourceUsage", "OptimizationParams", }); + internal_static_tensorflow_data_model_ModelProto_Node_descriptor = + internal_static_tensorflow_data_model_ModelProto_descriptor.getNestedTypes().get(0); + internal_static_tensorflow_data_model_ModelProto_Node_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_model_ModelProto_Node_descriptor, + new java.lang.String[] { "Id", "Name", "Autotune", "BufferedBytes", "BufferedElements", "BytesConsumed", "BytesProduced", "NumElements", "ProcessingTime", "RecordMetrics", "Parameters", "InputProcessingTimeSum", "InputProcessingTimeCount", "Inputs", "NodeClass", "Ratio", "MemoryRatio", }); + internal_static_tensorflow_data_model_ModelProto_Node_Parameter_descriptor = + internal_static_tensorflow_data_model_ModelProto_Node_descriptor.getNestedTypes().get(0); + internal_static_tensorflow_data_model_ModelProto_Node_Parameter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_model_ModelProto_Node_Parameter_descriptor, + new java.lang.String[] { "Name", "Value", "StateValue", "Min", "Max", "Tunable", }); + internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor = + internal_static_tensorflow_data_model_ModelProto_descriptor.getNestedTypes().get(1); + internal_static_tensorflow_data_model_ModelProto_OptimizationParams_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor, + new java.lang.String[] { "Algorithm", "CpuBudget", "RamBudget", "ModelInputTime", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/NodeClass.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/NodeClass.java new file mode 100644 index 00000000000..951013a27a7 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/NodeClass.java @@ -0,0 +1,143 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/model.proto + +package org.tensorflow.proto.data.model; + +/** + *
      + * Class of a node in the performance model.
      + * 
      + * + * Protobuf enum {@code tensorflow.data.model.NodeClass} + */ +public enum NodeClass + implements com.google.protobuf.ProtocolMessageEnum { + /** + * UNKNOWN = 0; + */ + UNKNOWN(0), + /** + * INTERLEAVE_MANY = 1; + */ + INTERLEAVE_MANY(1), + /** + * ASYNC_INTERLEAVE_MANY = 2; + */ + ASYNC_INTERLEAVE_MANY(2), + /** + * KNOWN_RATIO = 3; + */ + KNOWN_RATIO(3), + /** + * ASYNC_KNOWN_RATIO = 4; + */ + ASYNC_KNOWN_RATIO(4), + /** + * UNKNOWN_RATIO = 5; + */ + UNKNOWN_RATIO(5), + UNRECOGNIZED(-1), + ; + + /** + * UNKNOWN = 0; + */ + public static final int UNKNOWN_VALUE = 0; + /** + * INTERLEAVE_MANY = 1; + */ + public static final int INTERLEAVE_MANY_VALUE = 1; + /** + * ASYNC_INTERLEAVE_MANY = 2; + */ + public static final int ASYNC_INTERLEAVE_MANY_VALUE = 2; + /** + * KNOWN_RATIO = 3; + */ + public static final int KNOWN_RATIO_VALUE = 3; + /** + * ASYNC_KNOWN_RATIO = 4; + */ + public static final int ASYNC_KNOWN_RATIO_VALUE = 4; + /** + * UNKNOWN_RATIO = 5; + */ + public static final int UNKNOWN_RATIO_VALUE = 5; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static NodeClass valueOf(int value) { + return forNumber(value); + } + + public static NodeClass forNumber(int value) { + switch (value) { + case 0: return UNKNOWN; + case 1: return INTERLEAVE_MANY; + case 2: return ASYNC_INTERLEAVE_MANY; + case 3: return KNOWN_RATIO; + case 4: return ASYNC_KNOWN_RATIO; + case 5: return UNKNOWN_RATIO; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + NodeClass> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public NodeClass findValueByNumber(int number) { + return NodeClass.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.tensorflow.proto.data.model.ModelProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final NodeClass[] VALUES = values(); + + public static NodeClass valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private NodeClass(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:tensorflow.data.model.NodeClass) +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/example/BytesList.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/example/BytesList.java index d8158b11f38..e172470df1c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/example/BytesList.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/example/BytesList.java @@ -5,6 +5,7 @@ /** *
      + * LINT.IfChange
        * Containers to hold repeated fundamental values.
        * 
      * @@ -283,6 +284,7 @@ protected Builder newBuilderForType( } /** *
      +   * LINT.IfChange
          * Containers to hold repeated fundamental values.
          * 
      * diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java index c756a6ef126..0688a5ad541 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java @@ -501,6 +501,15 @@ public interface ExperimentalOrBuilder extends * int64 xla_fusion_autotuner_thresh = 15; */ long getXlaFusionAutotunerThresh(); + + /** + *
      +     * Whether runtime execution uses TFRT.
      +     * 
      + * + * bool use_tfrt = 18; + */ + boolean getUseTfrt(); } /** *
      @@ -647,6 +656,11 @@ private Experimental(
                     mlirBridgeRollout_ = rawValue;
                     break;
                   }
      +            case 144: {
      +
      +              useTfrt_ = input.readBool();
      +              break;
      +            }
                   default: {
                     if (!parseUnknownField(
                         input, unknownFields, extensionRegistry, tag)) {
      @@ -713,6 +727,30 @@ public enum MlirBridgeRollout
              * MLIR_BRIDGE_ROLLOUT_DISABLED = 2;
              */
             MLIR_BRIDGE_ROLLOUT_DISABLED(2),
      +      /**
      +       * 
      +       * Enable the MLIR bridge on a per graph basis based on an analysis of
      +       * the features used in the graph. If the features used by the graph are
      +       * supported by the MLIR bridge, the MLIR bridge will be used to run the
      +       * graph.
      +       * 
      + * + * MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED = 3; + */ + MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED(3), + /** + *
      +       * Enable the MLIR bridge in a fallback mode on a per graph basis based
      +       * on an analysis of the features used in the graph.
      +       * Running the MLIR bridge in the fallback mode means that it is
      +       * executed and it commits all the changes to the TF graph in case
      +       * of success. And it does not in case of failures and let the old bridge
      +       * to process the TF graph.
      +       * 
      + * + * MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED = 4; + */ + MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED(4), UNRECOGNIZED(-1), ; @@ -741,6 +779,30 @@ public enum MlirBridgeRollout * MLIR_BRIDGE_ROLLOUT_DISABLED = 2; */ public static final int MLIR_BRIDGE_ROLLOUT_DISABLED_VALUE = 2; + /** + *
      +       * Enable the MLIR bridge on a per graph basis based on an analysis of
      +       * the features used in the graph. If the features used by the graph are
      +       * supported by the MLIR bridge, the MLIR bridge will be used to run the
      +       * graph.
      +       * 
      + * + * MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED = 3; + */ + public static final int MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED_VALUE = 3; + /** + *
      +       * Enable the MLIR bridge in a fallback mode on a per graph basis based
      +       * on an analysis of the features used in the graph.
      +       * Running the MLIR bridge in the fallback mode means that it is
      +       * executed and it commits all the changes to the TF graph in case
      +       * of success. And it does not in case of failures and let the old bridge
      +       * to process the TF graph.
      +       * 
      + * + * MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED = 4; + */ + public static final int MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED_VALUE = 4; public final int getNumber() { @@ -764,6 +826,8 @@ public static MlirBridgeRollout forNumber(int value) { case 0: return MLIR_BRIDGE_ROLLOUT_UNSPECIFIED; case 1: return MLIR_BRIDGE_ROLLOUT_ENABLED; case 2: return MLIR_BRIDGE_ROLLOUT_DISABLED; + case 3: return MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED; + case 4: return MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED; default: return null; } } @@ -1179,6 +1243,19 @@ public long getXlaFusionAutotunerThresh() { return xlaFusionAutotunerThresh_; } + public static final int USE_TFRT_FIELD_NUMBER = 18; + private boolean useTfrt_; + /** + *
      +     * Whether runtime execution uses TFRT.
      +     * 
      + * + * bool use_tfrt = 18; + */ + public boolean getUseTfrt() { + return useTfrt_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -1241,6 +1318,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (mlirBridgeRollout_ != org.tensorflow.proto.framework.ConfigProto.Experimental.MlirBridgeRollout.MLIR_BRIDGE_ROLLOUT_UNSPECIFIED.getNumber()) { output.writeEnum(17, mlirBridgeRollout_); } + if (useTfrt_ != false) { + output.writeBool(18, useTfrt_); + } unknownFields.writeTo(output); } @@ -1312,6 +1392,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeEnumSize(17, mlirBridgeRollout_); } + if (useTfrt_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(18, useTfrt_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1361,6 +1445,8 @@ public boolean equals(final java.lang.Object obj) { != other.getDisableOutputPartitionGraphs()) return false; if (getXlaFusionAutotunerThresh() != other.getXlaFusionAutotunerThresh()) return false; + if (getUseTfrt() + != other.getUseTfrt()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -1417,6 +1503,9 @@ public int hashCode() { hash = (37 * hash) + XLA_FUSION_AUTOTUNER_THRESH_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getXlaFusionAutotunerThresh()); + hash = (37 * hash) + USE_TFRT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getUseTfrt()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1592,6 +1681,8 @@ public Builder clear() { xlaFusionAutotunerThresh_ = 0L; + useTfrt_ = false; + return this; } @@ -1638,6 +1729,7 @@ public org.tensorflow.proto.framework.ConfigProto.Experimental buildPartial() { result.enableMlirGraphOptimization_ = enableMlirGraphOptimization_; result.disableOutputPartitionGraphs_ = disableOutputPartitionGraphs_; result.xlaFusionAutotunerThresh_ = xlaFusionAutotunerThresh_; + result.useTfrt_ = useTfrt_; onBuilt(); return result; } @@ -1736,6 +1828,9 @@ public Builder mergeFrom(org.tensorflow.proto.framework.ConfigProto.Experimental if (other.getXlaFusionAutotunerThresh() != 0L) { setXlaFusionAutotunerThresh(other.getXlaFusionAutotunerThresh()); } + if (other.getUseTfrt() != false) { + setUseTfrt(other.getUseTfrt()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -2808,6 +2903,44 @@ public Builder clearXlaFusionAutotunerThresh() { onChanged(); return this; } + + private boolean useTfrt_ ; + /** + *
      +       * Whether runtime execution uses TFRT.
      +       * 
      + * + * bool use_tfrt = 18; + */ + public boolean getUseTfrt() { + return useTfrt_; + } + /** + *
      +       * Whether runtime execution uses TFRT.
      +       * 
      + * + * bool use_tfrt = 18; + */ + public Builder setUseTfrt(boolean value) { + + useTfrt_ = value; + onChanged(); + return this; + } + /** + *
      +       * Whether runtime execution uses TFRT.
      +       * 
      + * + * bool use_tfrt = 18; + */ + public Builder clearUseTfrt() { + + useTfrt_ = false; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java index 83eabfe7d8d..2479ebdc921 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java @@ -175,7 +175,7 @@ public static void registerAllExtensions( "\032\n\022cache_rpc_response\030\004 \001(\010\022*\n\"disable_s" + "ession_connection_sharing\030\005 \001(\010\"0\n\017Sessi" + "onMetadata\022\014\n\004name\030\001 \001(\t\022\017\n\007version\030\002 \001(" + - "\003\"\232\014\n\013ConfigProto\022>\n\014device_count\030\001 \003(\0132" + + "\003\"\214\r\n\013ConfigProto\022>\n\014device_count\030\001 \003(\0132" + "(.tensorflow.ConfigProto.DeviceCountEntr" + "y\022$\n\034intra_op_parallelism_threads\030\002 \001(\005\022" + "$\n\034inter_op_parallelism_threads\030\005 \001(\005\022\037\n" + @@ -194,7 +194,7 @@ public static void registerAllExtensions( "evices_in_session\030\021 \001(\010\022:\n\014experimental\030" + "\020 \001(\0132$.tensorflow.ConfigProto.Experimen" + "tal\0322\n\020DeviceCountEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005" + - "value\030\002 \001(\005:\0028\001\032\224\006\n\014Experimental\022\037\n\027coll" + + "value\030\002 \001(\005:\0028\001\032\206\007\n\014Experimental\022\037\n\027coll" + "ective_group_leader\030\001 \001(\t\022\025\n\rexecutor_ty" + "pe\030\003 \001(\t\022\032\n\022recv_buf_max_chunk\030\004 \001(\005\022\031\n\021" + "use_numa_affinity\030\005 \001(\010\0225\n-collective_de" + @@ -210,52 +210,55 @@ public static void registerAllExtensions( "gProto.Experimental.MlirBridgeRollout\022&\n" + "\036enable_mlir_graph_optimization\030\020 \001(\010\022\'\n" + "\037disable_output_partition_graphs\030\016 \001(\010\022#" + - "\n\033xla_fusion_autotuner_thresh\030\017 \001(\003\"{\n\021M" + - "lirBridgeRollout\022#\n\037MLIR_BRIDGE_ROLLOUT_" + - "UNSPECIFIED\020\000\022\037\n\033MLIR_BRIDGE_ROLLOUT_ENA" + - "BLED\020\001\022 \n\034MLIR_BRIDGE_ROLLOUT_DISABLED\020\002" + - "J\004\010\002\020\003\"\341\004\n\nRunOptions\0226\n\013trace_level\030\001 \001" + - "(\0162!.tensorflow.RunOptions.TraceLevel\022\025\n" + - "\rtimeout_in_ms\030\002 \001(\003\022\034\n\024inter_op_thread_" + - "pool\030\003 \001(\005\022\037\n\027output_partition_graphs\030\005 " + - "\001(\010\022/\n\rdebug_options\030\006 \001(\0132\030.tensorflow." + - "DebugOptions\022*\n\"report_tensor_allocation" + - "s_upon_oom\030\007 \001(\010\0229\n\014experimental\030\010 \001(\0132#" + - ".tensorflow.RunOptions.Experimental\032\322\001\n\014" + - "Experimental\022\034\n\024collective_graph_key\030\001 \001" + - "(\003\022\034\n\024use_run_handler_pool\030\002 \001(\010\022[\n\030run_" + - "handler_pool_options\030\003 \001(\01329.tensorflow." + - "RunOptions.Experimental.RunHandlerPoolOp" + - "tions\032)\n\025RunHandlerPoolOptions\022\020\n\010priori" + - "ty\030\001 \001(\003\"R\n\nTraceLevel\022\014\n\010NO_TRACE\020\000\022\022\n\016" + - "SOFTWARE_TRACE\020\001\022\022\n\016HARDWARE_TRACE\020\002\022\016\n\n" + - "FULL_TRACE\020\003J\004\010\004\020\005\"\207\003\n\013RunMetadata\022)\n\nst" + - "ep_stats\030\001 \001(\0132\025.tensorflow.StepStats\022,\n" + - "\ncost_graph\030\002 \001(\0132\030.tensorflow.CostGraph" + - "Def\022.\n\020partition_graphs\030\003 \003(\0132\024.tensorfl" + - "ow.GraphDef\022?\n\017function_graphs\030\004 \003(\0132&.t" + - "ensorflow.RunMetadata.FunctionGraphs\032\255\001\n" + - "\016FunctionGraphs\022.\n\020partition_graphs\030\001 \003(" + - "\0132\024.tensorflow.GraphDef\0224\n\026pre_optimizat" + - "ion_graph\030\002 \001(\0132\024.tensorflow.GraphDef\0225\n" + - "\027post_optimization_graph\030\003 \001(\0132\024.tensorf" + - "low.GraphDef\":\n\020TensorConnection\022\023\n\013from" + - "_tensor\030\001 \001(\t\022\021\n\tto_tensor\030\002 \001(\t\"\260\003\n\017Cal" + - "lableOptions\022\014\n\004feed\030\001 \003(\t\022\r\n\005fetch\030\002 \003(" + - "\t\022\016\n\006target\030\003 \003(\t\022+\n\013run_options\030\004 \001(\0132\026" + - ".tensorflow.RunOptions\0227\n\021tensor_connect" + - "ion\030\005 \003(\0132\034.tensorflow.TensorConnection\022" + - "B\n\014feed_devices\030\006 \003(\0132,.tensorflow.Calla" + - "bleOptions.FeedDevicesEntry\022D\n\rfetch_dev" + - "ices\030\007 \003(\0132-.tensorflow.CallableOptions." + - "FetchDevicesEntry\022\027\n\017fetch_skip_sync\030\010 \001" + - "(\010\0322\n\020FeedDevicesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005v" + - "alue\030\002 \001(\t:\0028\001\0323\n\021FetchDevicesEntry\022\013\n\003k" + - "ey\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\212\001\n\036org.tens" + - "orflow.proto.frameworkB\014ConfigProtosP\001ZU" + - "github.com/tensorflow/tensorflow/tensorf" + - "low/go/core/protobuf/for_core_protos_go_" + - "proto\370\001\001b\006proto3" + "\n\033xla_fusion_autotuner_thresh\030\017 \001(\003\022\020\n\010u" + + "se_tfrt\030\022 \001(\010\"\332\001\n\021MlirBridgeRollout\022#\n\037M" + + "LIR_BRIDGE_ROLLOUT_UNSPECIFIED\020\000\022\037\n\033MLIR" + + "_BRIDGE_ROLLOUT_ENABLED\020\001\022 \n\034MLIR_BRIDGE" + + "_ROLLOUT_DISABLED\020\002\022)\n%MLIR_BRIDGE_ROLLO" + + "UT_SAFE_MODE_ENABLED\020\003\0222\n.MLIR_BRIDGE_RO" + + "LLOUT_SAFE_MODE_FALLBACK_ENABLED\020\004J\004\010\002\020\003" + + "\"\341\004\n\nRunOptions\0226\n\013trace_level\030\001 \001(\0162!.t" + + "ensorflow.RunOptions.TraceLevel\022\025\n\rtimeo" + + "ut_in_ms\030\002 \001(\003\022\034\n\024inter_op_thread_pool\030\003" + + " \001(\005\022\037\n\027output_partition_graphs\030\005 \001(\010\022/\n" + + "\rdebug_options\030\006 \001(\0132\030.tensorflow.DebugO" + + "ptions\022*\n\"report_tensor_allocations_upon" + + "_oom\030\007 \001(\010\0229\n\014experimental\030\010 \001(\0132#.tenso" + + "rflow.RunOptions.Experimental\032\322\001\n\014Experi" + + "mental\022\034\n\024collective_graph_key\030\001 \001(\003\022\034\n\024" + + "use_run_handler_pool\030\002 \001(\010\022[\n\030run_handle" + + "r_pool_options\030\003 \001(\01329.tensorflow.RunOpt" + + "ions.Experimental.RunHandlerPoolOptions\032" + + ")\n\025RunHandlerPoolOptions\022\020\n\010priority\030\001 \001" + + "(\003\"R\n\nTraceLevel\022\014\n\010NO_TRACE\020\000\022\022\n\016SOFTWA" + + "RE_TRACE\020\001\022\022\n\016HARDWARE_TRACE\020\002\022\016\n\nFULL_T" + + "RACE\020\003J\004\010\004\020\005\"\207\003\n\013RunMetadata\022)\n\nstep_sta" + + "ts\030\001 \001(\0132\025.tensorflow.StepStats\022,\n\ncost_" + + "graph\030\002 \001(\0132\030.tensorflow.CostGraphDef\022.\n" + + "\020partition_graphs\030\003 \003(\0132\024.tensorflow.Gra" + + "phDef\022?\n\017function_graphs\030\004 \003(\0132&.tensorf" + + "low.RunMetadata.FunctionGraphs\032\255\001\n\016Funct" + + "ionGraphs\022.\n\020partition_graphs\030\001 \003(\0132\024.te" + + "nsorflow.GraphDef\0224\n\026pre_optimization_gr" + + "aph\030\002 \001(\0132\024.tensorflow.GraphDef\0225\n\027post_" + + "optimization_graph\030\003 \001(\0132\024.tensorflow.Gr" + + "aphDef\":\n\020TensorConnection\022\023\n\013from_tenso" + + "r\030\001 \001(\t\022\021\n\tto_tensor\030\002 \001(\t\"\260\003\n\017CallableO" + + "ptions\022\014\n\004feed\030\001 \003(\t\022\r\n\005fetch\030\002 \003(\t\022\016\n\006t" + + "arget\030\003 \003(\t\022+\n\013run_options\030\004 \001(\0132\026.tenso" + + "rflow.RunOptions\0227\n\021tensor_connection\030\005 " + + "\003(\0132\034.tensorflow.TensorConnection\022B\n\014fee" + + "d_devices\030\006 \003(\0132,.tensorflow.CallableOpt" + + "ions.FeedDevicesEntry\022D\n\rfetch_devices\030\007" + + " \003(\0132-.tensorflow.CallableOptions.FetchD" + + "evicesEntry\022\027\n\017fetch_skip_sync\030\010 \001(\010\0322\n\020" + + "FeedDevicesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002" + + " \001(\t:\0028\001\0323\n\021FetchDevicesEntry\022\013\n\003key\030\001 \001" + + "(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\212\001\n\036org.tensorflow" + + ".proto.frameworkB\014ConfigProtosP\001ZUgithub" + + ".com/tensorflow/tensorflow/tensorflow/go" + + "/core/protobuf/for_core_protos_go_proto\370" + + "\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -332,7 +335,7 @@ public static void registerAllExtensions( internal_static_tensorflow_ConfigProto_Experimental_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_ConfigProto_Experimental_descriptor, - new java.lang.String[] { "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession", "SessionMetadata", "OptimizeForStaticGraph", "EnableMlirBridge", "MlirBridgeRollout", "EnableMlirGraphOptimization", "DisableOutputPartitionGraphs", "XlaFusionAutotunerThresh", }); + new java.lang.String[] { "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession", "SessionMetadata", "OptimizeForStaticGraph", "EnableMlirBridge", "MlirBridgeRollout", "EnableMlirGraphOptimization", "DisableOutputPartitionGraphs", "XlaFusionAutotunerThresh", "UseTfrt", }); internal_static_tensorflow_RunOptions_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_tensorflow_RunOptions_fieldAccessorTable = new diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DataClass.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DataClass.java index 895366e817c..eb7123c795d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DataClass.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DataClass.java @@ -20,8 +20,7 @@ public enum DataClass /** *
          * Scalar time series. Each `Value` for the corresponding tag must have
      -   * `tensor` set to a rank-0 tensor of floating-point dtype, which will be
      -   * converted to float64.
      +   * `tensor` set to a rank-0 tensor of type `DT_FLOAT` (float32).
          * 
      * * DATA_CLASS_SCALAR = 1; @@ -62,8 +61,7 @@ public enum DataClass /** *
          * Scalar time series. Each `Value` for the corresponding tag must have
      -   * `tensor` set to a rank-0 tensor of floating-point dtype, which will be
      -   * converted to float64.
      +   * `tensor` set to a rank-0 tensor of type `DT_FLOAT` (float32).
          * 
      * * DATA_CLASS_SCALAR = 1; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ExtensionTypeVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ExtensionTypeVariant.java new file mode 100644 index 00000000000..1afaca12ca1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ExtensionTypeVariant.java @@ -0,0 +1,679 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/protobuf/extension_type_variant.proto + +package org.tensorflow.proto.framework; + +public final class ExtensionTypeVariant { + private ExtensionTypeVariant() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + public interface ExtensionTypeVariantMetadataOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.ExtensionTypeVariantMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + boolean hasTypeSpecProto(); + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + org.tensorflow.proto.framework.TypeSpecProto getTypeSpecProto(); + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + org.tensorflow.proto.framework.TypeSpecProtoOrBuilder getTypeSpecProtoOrBuilder(); + } + /** + *
      +   * Metadata for ExtensionTypeVariant, used when serializing as Variant.
      +   * We define a new message here (rather than directly using TypeSpecProto for
      +   * the metadata string) to retain flexibility to change the metadata encoding
      +   * to support additional features.
      +   * 
      + * + * Protobuf type {@code tensorflow.ExtensionTypeVariantMetadata} + */ + public static final class ExtensionTypeVariantMetadata extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.ExtensionTypeVariantMetadata) + ExtensionTypeVariantMetadataOrBuilder { + private static final long serialVersionUID = 0L; + // Use ExtensionTypeVariantMetadata.newBuilder() to construct. + private ExtensionTypeVariantMetadata(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ExtensionTypeVariantMetadata() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ExtensionTypeVariantMetadata(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ExtensionTypeVariantMetadata( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + org.tensorflow.proto.framework.TypeSpecProto.Builder subBuilder = null; + if (typeSpecProto_ != null) { + subBuilder = typeSpecProto_.toBuilder(); + } + typeSpecProto_ = input.readMessage(org.tensorflow.proto.framework.TypeSpecProto.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(typeSpecProto_); + typeSpecProto_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.ExtensionTypeVariant.internal_static_tensorflow_ExtensionTypeVariantMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.ExtensionTypeVariant.internal_static_tensorflow_ExtensionTypeVariantMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata.class, org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata.Builder.class); + } + + public static final int TYPE_SPEC_PROTO_FIELD_NUMBER = 1; + private org.tensorflow.proto.framework.TypeSpecProto typeSpecProto_; + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public boolean hasTypeSpecProto() { + return typeSpecProto_ != null; + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public org.tensorflow.proto.framework.TypeSpecProto getTypeSpecProto() { + return typeSpecProto_ == null ? org.tensorflow.proto.framework.TypeSpecProto.getDefaultInstance() : typeSpecProto_; + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public org.tensorflow.proto.framework.TypeSpecProtoOrBuilder getTypeSpecProtoOrBuilder() { + return getTypeSpecProto(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (typeSpecProto_ != null) { + output.writeMessage(1, getTypeSpecProto()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (typeSpecProto_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getTypeSpecProto()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata other = (org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata) obj; + + if (hasTypeSpecProto() != other.hasTypeSpecProto()) return false; + if (hasTypeSpecProto()) { + if (!getTypeSpecProto() + .equals(other.getTypeSpecProto())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasTypeSpecProto()) { + hash = (37 * hash) + TYPE_SPEC_PROTO_FIELD_NUMBER; + hash = (53 * hash) + getTypeSpecProto().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +     * Metadata for ExtensionTypeVariant, used when serializing as Variant.
      +     * We define a new message here (rather than directly using TypeSpecProto for
      +     * the metadata string) to retain flexibility to change the metadata encoding
      +     * to support additional features.
      +     * 
      + * + * Protobuf type {@code tensorflow.ExtensionTypeVariantMetadata} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.ExtensionTypeVariantMetadata) + org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.ExtensionTypeVariant.internal_static_tensorflow_ExtensionTypeVariantMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.ExtensionTypeVariant.internal_static_tensorflow_ExtensionTypeVariantMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata.class, org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (typeSpecProtoBuilder_ == null) { + typeSpecProto_ = null; + } else { + typeSpecProto_ = null; + typeSpecProtoBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.ExtensionTypeVariant.internal_static_tensorflow_ExtensionTypeVariantMetadata_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata getDefaultInstanceForType() { + return org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata build() { + org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata buildPartial() { + org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata result = new org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata(this); + if (typeSpecProtoBuilder_ == null) { + result.typeSpecProto_ = typeSpecProto_; + } else { + result.typeSpecProto_ = typeSpecProtoBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata) { + return mergeFrom((org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata other) { + if (other == org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata.getDefaultInstance()) return this; + if (other.hasTypeSpecProto()) { + mergeTypeSpecProto(other.getTypeSpecProto()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private org.tensorflow.proto.framework.TypeSpecProto typeSpecProto_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TypeSpecProto, org.tensorflow.proto.framework.TypeSpecProto.Builder, org.tensorflow.proto.framework.TypeSpecProtoOrBuilder> typeSpecProtoBuilder_; + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public boolean hasTypeSpecProto() { + return typeSpecProtoBuilder_ != null || typeSpecProto_ != null; + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public org.tensorflow.proto.framework.TypeSpecProto getTypeSpecProto() { + if (typeSpecProtoBuilder_ == null) { + return typeSpecProto_ == null ? org.tensorflow.proto.framework.TypeSpecProto.getDefaultInstance() : typeSpecProto_; + } else { + return typeSpecProtoBuilder_.getMessage(); + } + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public Builder setTypeSpecProto(org.tensorflow.proto.framework.TypeSpecProto value) { + if (typeSpecProtoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeSpecProto_ = value; + onChanged(); + } else { + typeSpecProtoBuilder_.setMessage(value); + } + + return this; + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public Builder setTypeSpecProto( + org.tensorflow.proto.framework.TypeSpecProto.Builder builderForValue) { + if (typeSpecProtoBuilder_ == null) { + typeSpecProto_ = builderForValue.build(); + onChanged(); + } else { + typeSpecProtoBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public Builder mergeTypeSpecProto(org.tensorflow.proto.framework.TypeSpecProto value) { + if (typeSpecProtoBuilder_ == null) { + if (typeSpecProto_ != null) { + typeSpecProto_ = + org.tensorflow.proto.framework.TypeSpecProto.newBuilder(typeSpecProto_).mergeFrom(value).buildPartial(); + } else { + typeSpecProto_ = value; + } + onChanged(); + } else { + typeSpecProtoBuilder_.mergeFrom(value); + } + + return this; + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public Builder clearTypeSpecProto() { + if (typeSpecProtoBuilder_ == null) { + typeSpecProto_ = null; + onChanged(); + } else { + typeSpecProto_ = null; + typeSpecProtoBuilder_ = null; + } + + return this; + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public org.tensorflow.proto.framework.TypeSpecProto.Builder getTypeSpecProtoBuilder() { + + onChanged(); + return getTypeSpecProtoFieldBuilder().getBuilder(); + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + public org.tensorflow.proto.framework.TypeSpecProtoOrBuilder getTypeSpecProtoOrBuilder() { + if (typeSpecProtoBuilder_ != null) { + return typeSpecProtoBuilder_.getMessageOrBuilder(); + } else { + return typeSpecProto_ == null ? + org.tensorflow.proto.framework.TypeSpecProto.getDefaultInstance() : typeSpecProto_; + } + } + /** + * .tensorflow.TypeSpecProto type_spec_proto = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TypeSpecProto, org.tensorflow.proto.framework.TypeSpecProto.Builder, org.tensorflow.proto.framework.TypeSpecProtoOrBuilder> + getTypeSpecProtoFieldBuilder() { + if (typeSpecProtoBuilder_ == null) { + typeSpecProtoBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TypeSpecProto, org.tensorflow.proto.framework.TypeSpecProto.Builder, org.tensorflow.proto.framework.TypeSpecProtoOrBuilder>( + getTypeSpecProto(), + getParentForChildren(), + isClean()); + typeSpecProto_ = null; + } + return typeSpecProtoBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.ExtensionTypeVariantMetadata) + } + + // @@protoc_insertion_point(class_scope:tensorflow.ExtensionTypeVariantMetadata) + private static final org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata(); + } + + public static org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ExtensionTypeVariantMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ExtensionTypeVariantMetadata(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.ExtensionTypeVariant.ExtensionTypeVariantMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_ExtensionTypeVariantMetadata_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_ExtensionTypeVariantMetadata_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n5tensorflow/core/protobuf/extension_typ" + + "e_variant.proto\022\ntensorflow\032%tensorflow/" + + "core/protobuf/struct.proto\"R\n\034ExtensionT" + + "ypeVariantMetadata\0222\n\017type_spec_proto\030\001 " + + "\001(\0132\031.tensorflow.TypeSpecProtoB \n\036org.te" + + "nsorflow.proto.frameworkb\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.tensorflow.proto.framework.StructProtos.getDescriptor(), + }); + internal_static_tensorflow_ExtensionTypeVariantMetadata_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_ExtensionTypeVariantMetadata_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_ExtensionTypeVariantMetadata_descriptor, + new java.lang.String[] { "TypeSpecProto", }); + org.tensorflow.proto.framework.StructProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FunctionSpec.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FunctionSpec.java index 08549e9715c..d71706dad58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FunctionSpec.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FunctionSpec.java @@ -21,7 +21,7 @@ private FunctionSpec(com.google.protobuf.GeneratedMessageV3.Builder builder) super(builder); } private FunctionSpec() { - experimentalCompile_ = 0; + jitCompile_ = 0; } @java.lang.Override @@ -88,7 +88,7 @@ private FunctionSpec( case 48: { int rawValue = input.readEnum(); - experimentalCompile_ = rawValue; + jitCompile_ = rawValue; break; } default: { @@ -133,9 +133,9 @@ private FunctionSpec( * See `tf.function` for details. *
      * - * Protobuf enum {@code tensorflow.FunctionSpec.ExperimentalCompile} + * Protobuf enum {@code tensorflow.FunctionSpec.JitCompile} */ - public enum ExperimentalCompile + public enum JitCompile implements com.google.protobuf.ProtocolMessageEnum { /** * DEFAULT = 0; @@ -178,11 +178,11 @@ public final int getNumber() { * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated - public static ExperimentalCompile valueOf(int value) { + public static JitCompile valueOf(int value) { return forNumber(value); } - public static ExperimentalCompile forNumber(int value) { + public static JitCompile forNumber(int value) { switch (value) { case 0: return DEFAULT; case 1: return ON; @@ -191,15 +191,15 @@ public static ExperimentalCompile forNumber(int value) { } } - public static com.google.protobuf.Internal.EnumLiteMap + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final com.google.protobuf.Internal.EnumLiteMap< - ExperimentalCompile> internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public ExperimentalCompile findValueByNumber(int number) { - return ExperimentalCompile.forNumber(number); + JitCompile> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public JitCompile findValueByNumber(int number) { + return JitCompile.forNumber(number); } }; @@ -216,9 +216,9 @@ public ExperimentalCompile findValueByNumber(int number) { return org.tensorflow.proto.framework.FunctionSpec.getDescriptor().getEnumTypes().get(0); } - private static final ExperimentalCompile[] VALUES = values(); + private static final JitCompile[] VALUES = values(); - public static ExperimentalCompile valueOf( + public static JitCompile valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( @@ -232,11 +232,11 @@ public static ExperimentalCompile valueOf( private final int value; - private ExperimentalCompile(int value) { + private JitCompile(int value) { this.value = value; } - // @@protoc_insertion_point(enum_scope:tensorflow.FunctionSpec.ExperimentalCompile) + // @@protoc_insertion_point(enum_scope:tensorflow.FunctionSpec.JitCompile) } public static final int FULLARGSPEC_FIELD_NUMBER = 1; @@ -318,21 +318,21 @@ public org.tensorflow.proto.framework.StructuredValueOrBuilder getInputSignature return getInputSignature(); } - public static final int EXPERIMENTAL_COMPILE_FIELD_NUMBER = 6; - private int experimentalCompile_; + public static final int JIT_COMPILE_FIELD_NUMBER = 6; + private int jitCompile_; /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - public int getExperimentalCompileValue() { - return experimentalCompile_; + public int getJitCompileValue() { + return jitCompile_; } /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - public org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile getExperimentalCompile() { + public org.tensorflow.proto.framework.FunctionSpec.JitCompile getJitCompile() { @SuppressWarnings("deprecation") - org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile result = org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile.valueOf(experimentalCompile_); - return result == null ? org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile.UNRECOGNIZED : result; + org.tensorflow.proto.framework.FunctionSpec.JitCompile result = org.tensorflow.proto.framework.FunctionSpec.JitCompile.valueOf(jitCompile_); + return result == null ? org.tensorflow.proto.framework.FunctionSpec.JitCompile.UNRECOGNIZED : result; } private byte memoizedIsInitialized = -1; @@ -358,8 +358,8 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (inputSignature_ != null) { output.writeMessage(5, getInputSignature()); } - if (experimentalCompile_ != org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile.DEFAULT.getNumber()) { - output.writeEnum(6, experimentalCompile_); + if (jitCompile_ != org.tensorflow.proto.framework.FunctionSpec.JitCompile.DEFAULT.getNumber()) { + output.writeEnum(6, jitCompile_); } unknownFields.writeTo(output); } @@ -382,9 +382,9 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(5, getInputSignature()); } - if (experimentalCompile_ != org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile.DEFAULT.getNumber()) { + if (jitCompile_ != org.tensorflow.proto.framework.FunctionSpec.JitCompile.DEFAULT.getNumber()) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(6, experimentalCompile_); + .computeEnumSize(6, jitCompile_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -413,7 +413,7 @@ public boolean equals(final java.lang.Object obj) { if (!getInputSignature() .equals(other.getInputSignature())) return false; } - if (experimentalCompile_ != other.experimentalCompile_) return false; + if (jitCompile_ != other.jitCompile_) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -436,8 +436,8 @@ public int hashCode() { hash = (37 * hash) + INPUT_SIGNATURE_FIELD_NUMBER; hash = (53 * hash) + getInputSignature().hashCode(); } - hash = (37 * hash) + EXPERIMENTAL_COMPILE_FIELD_NUMBER; - hash = (53 * hash) + experimentalCompile_; + hash = (37 * hash) + JIT_COMPILE_FIELD_NUMBER; + hash = (53 * hash) + jitCompile_; hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -590,7 +590,7 @@ public Builder clear() { inputSignature_ = null; inputSignatureBuilder_ = null; } - experimentalCompile_ = 0; + jitCompile_ = 0; return this; } @@ -629,7 +629,7 @@ public org.tensorflow.proto.framework.FunctionSpec buildPartial() { } else { result.inputSignature_ = inputSignatureBuilder_.build(); } - result.experimentalCompile_ = experimentalCompile_; + result.jitCompile_ = jitCompile_; onBuilt(); return result; } @@ -687,8 +687,8 @@ public Builder mergeFrom(org.tensorflow.proto.framework.FunctionSpec other) { if (other.hasInputSignature()) { mergeInputSignature(other.getInputSignature()); } - if (other.experimentalCompile_ != 0) { - setExperimentalCompileValue(other.getExperimentalCompileValue()); + if (other.jitCompile_ != 0) { + setJitCompileValue(other.getJitCompileValue()); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -1063,47 +1063,47 @@ public org.tensorflow.proto.framework.StructuredValueOrBuilder getInputSignature return inputSignatureBuilder_; } - private int experimentalCompile_ = 0; + private int jitCompile_ = 0; /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - public int getExperimentalCompileValue() { - return experimentalCompile_; + public int getJitCompileValue() { + return jitCompile_; } /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - public Builder setExperimentalCompileValue(int value) { - experimentalCompile_ = value; + public Builder setJitCompileValue(int value) { + jitCompile_ = value; onChanged(); return this; } /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - public org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile getExperimentalCompile() { + public org.tensorflow.proto.framework.FunctionSpec.JitCompile getJitCompile() { @SuppressWarnings("deprecation") - org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile result = org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile.valueOf(experimentalCompile_); - return result == null ? org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile.UNRECOGNIZED : result; + org.tensorflow.proto.framework.FunctionSpec.JitCompile result = org.tensorflow.proto.framework.FunctionSpec.JitCompile.valueOf(jitCompile_); + return result == null ? org.tensorflow.proto.framework.FunctionSpec.JitCompile.UNRECOGNIZED : result; } /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - public Builder setExperimentalCompile(org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile value) { + public Builder setJitCompile(org.tensorflow.proto.framework.FunctionSpec.JitCompile value) { if (value == null) { throw new NullPointerException(); } - experimentalCompile_ = value.getNumber(); + jitCompile_ = value.getNumber(); onChanged(); return this; } /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - public Builder clearExperimentalCompile() { + public Builder clearJitCompile() { - experimentalCompile_ = 0; + jitCompile_ = 0; onChanged(); return this; } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FunctionSpecOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FunctionSpecOrBuilder.java index 09ebc013bdc..8f2536c86b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FunctionSpecOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FunctionSpecOrBuilder.java @@ -67,11 +67,11 @@ public interface FunctionSpecOrBuilder extends org.tensorflow.proto.framework.StructuredValueOrBuilder getInputSignatureOrBuilder(); /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - int getExperimentalCompileValue(); + int getJitCompileValue(); /** - * .tensorflow.FunctionSpec.ExperimentalCompile experimental_compile = 6; + * .tensorflow.FunctionSpec.JitCompile jit_compile = 6; */ - org.tensorflow.proto.framework.FunctionSpec.ExperimentalCompile getExperimentalCompile(); + org.tensorflow.proto.framework.FunctionSpec.JitCompile getJitCompile(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/GraphDef.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/GraphDef.java index 18e75b5e768..0d67353ab03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/GraphDef.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/GraphDef.java @@ -222,7 +222,6 @@ public org.tensorflow.proto.framework.VersionDefOrBuilder getVersionsOrBuilder() private org.tensorflow.proto.framework.FunctionDefLibrary library_; /** *
      -   * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
          * "library" provides user-defined functions.
          * Naming:
          *   * library.function.name are in a flat namespace.
      @@ -253,7 +252,6 @@ public boolean hasLibrary() {
         }
         /**
          * 
      -   * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
          * "library" provides user-defined functions.
          * Naming:
          *   * library.function.name are in a flat namespace.
      @@ -284,7 +282,6 @@ public org.tensorflow.proto.framework.FunctionDefLibrary getLibrary() {
         }
         /**
          * 
      -   * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
          * "library" provides user-defined functions.
          * Naming:
          *   * library.function.name are in a flat namespace.
      @@ -1197,7 +1194,6 @@ public org.tensorflow.proto.framework.VersionDefOrBuilder getVersionsOrBuilder()
               org.tensorflow.proto.framework.FunctionDefLibrary, org.tensorflow.proto.framework.FunctionDefLibrary.Builder, org.tensorflow.proto.framework.FunctionDefLibraryOrBuilder> libraryBuilder_;
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      @@ -1228,7 +1224,6 @@ public boolean hasLibrary() {
           }
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      @@ -1263,7 +1258,6 @@ public org.tensorflow.proto.framework.FunctionDefLibrary getLibrary() {
           }
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      @@ -1304,7 +1298,6 @@ public Builder setLibrary(org.tensorflow.proto.framework.FunctionDefLibrary valu
           }
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      @@ -1343,7 +1336,6 @@ public Builder setLibrary(
           }
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      @@ -1386,7 +1378,6 @@ public Builder mergeLibrary(org.tensorflow.proto.framework.FunctionDefLibrary va
           }
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      @@ -1425,7 +1416,6 @@ public Builder clearLibrary() {
           }
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      @@ -1458,7 +1448,6 @@ public org.tensorflow.proto.framework.FunctionDefLibrary.Builder getLibraryBuild
           }
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      @@ -1494,7 +1483,6 @@ public org.tensorflow.proto.framework.FunctionDefLibraryOrBuilder getLibraryOrBu
           }
           /**
            * 
      -     * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
            * "library" provides user-defined functions.
            * Naming:
            *   * library.function.name are in a flat namespace.
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/GraphDefOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/GraphDefOrBuilder.java
      index 0e4ac4dca6e..9aa0a0deb22 100644
      --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/GraphDefOrBuilder.java
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/GraphDefOrBuilder.java
      @@ -75,7 +75,6 @@ org.tensorflow.proto.framework.NodeDefOrBuilder getNodeOrBuilder(
       
         /**
          * 
      -   * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
          * "library" provides user-defined functions.
          * Naming:
          *   * library.function.name are in a flat namespace.
      @@ -104,7 +103,6 @@ org.tensorflow.proto.framework.NodeDefOrBuilder getNodeOrBuilder(
         boolean hasLibrary();
         /**
          * 
      -   * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
          * "library" provides user-defined functions.
          * Naming:
          *   * library.function.name are in a flat namespace.
      @@ -133,7 +131,6 @@ org.tensorflow.proto.framework.NodeDefOrBuilder getNodeOrBuilder(
         org.tensorflow.proto.framework.FunctionDefLibrary getLibrary();
         /**
          * 
      -   * EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
          * "library" provides user-defined functions.
          * Naming:
          *   * library.function.name are in a flat namespace.
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/LogNormalDistribution.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/LogNormalDistribution.java
      new file mode 100644
      index 00000000000..3164a04e0b7
      --- /dev/null
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/LogNormalDistribution.java
      @@ -0,0 +1,537 @@
      +// Generated by the protocol buffer compiler.  DO NOT EDIT!
      +// source: tensorflow/core/grappler/costs/op_performance_data.proto
      +
      +package org.tensorflow.proto.framework;
      +
      +/**
      + * Protobuf type {@code tensorflow.LogNormalDistribution}
      + */
      +public  final class LogNormalDistribution extends
      +    com.google.protobuf.GeneratedMessageV3 implements
      +    // @@protoc_insertion_point(message_implements:tensorflow.LogNormalDistribution)
      +    LogNormalDistributionOrBuilder {
      +private static final long serialVersionUID = 0L;
      +  // Use LogNormalDistribution.newBuilder() to construct.
      +  private LogNormalDistribution(com.google.protobuf.GeneratedMessageV3.Builder builder) {
      +    super(builder);
      +  }
      +  private LogNormalDistribution() {
      +  }
      +
      +  @java.lang.Override
      +  @SuppressWarnings({"unused"})
      +  protected java.lang.Object newInstance(
      +      UnusedPrivateParameter unused) {
      +    return new LogNormalDistribution();
      +  }
      +
      +  @java.lang.Override
      +  public final com.google.protobuf.UnknownFieldSet
      +  getUnknownFields() {
      +    return this.unknownFields;
      +  }
      +  private LogNormalDistribution(
      +      com.google.protobuf.CodedInputStream input,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    this();
      +    if (extensionRegistry == null) {
      +      throw new java.lang.NullPointerException();
      +    }
      +    com.google.protobuf.UnknownFieldSet.Builder unknownFields =
      +        com.google.protobuf.UnknownFieldSet.newBuilder();
      +    try {
      +      boolean done = false;
      +      while (!done) {
      +        int tag = input.readTag();
      +        switch (tag) {
      +          case 0:
      +            done = true;
      +            break;
      +          case 9: {
      +
      +            mu_ = input.readDouble();
      +            break;
      +          }
      +          case 17: {
      +
      +            sigma_ = input.readDouble();
      +            break;
      +          }
      +          default: {
      +            if (!parseUnknownField(
      +                input, unknownFields, extensionRegistry, tag)) {
      +              done = true;
      +            }
      +            break;
      +          }
      +        }
      +      }
      +    } catch (com.google.protobuf.InvalidProtocolBufferException e) {
      +      throw e.setUnfinishedMessage(this);
      +    } catch (java.io.IOException e) {
      +      throw new com.google.protobuf.InvalidProtocolBufferException(
      +          e).setUnfinishedMessage(this);
      +    } finally {
      +      this.unknownFields = unknownFields.build();
      +      makeExtensionsImmutable();
      +    }
      +  }
      +  public static final com.google.protobuf.Descriptors.Descriptor
      +      getDescriptor() {
      +    return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_LogNormalDistribution_descriptor;
      +  }
      +
      +  @java.lang.Override
      +  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      +      internalGetFieldAccessorTable() {
      +    return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_LogNormalDistribution_fieldAccessorTable
      +        .ensureFieldAccessorsInitialized(
      +            org.tensorflow.proto.framework.LogNormalDistribution.class, org.tensorflow.proto.framework.LogNormalDistribution.Builder.class);
      +  }
      +
      +  public static final int MU_FIELD_NUMBER = 1;
      +  private double mu_;
      +  /**
      +   * double mu = 1;
      +   */
      +  public double getMu() {
      +    return mu_;
      +  }
      +
      +  public static final int SIGMA_FIELD_NUMBER = 2;
      +  private double sigma_;
      +  /**
      +   * double sigma = 2;
      +   */
      +  public double getSigma() {
      +    return sigma_;
      +  }
      +
      +  private byte memoizedIsInitialized = -1;
      +  @java.lang.Override
      +  public final boolean isInitialized() {
      +    byte isInitialized = memoizedIsInitialized;
      +    if (isInitialized == 1) return true;
      +    if (isInitialized == 0) return false;
      +
      +    memoizedIsInitialized = 1;
      +    return true;
      +  }
      +
      +  @java.lang.Override
      +  public void writeTo(com.google.protobuf.CodedOutputStream output)
      +                      throws java.io.IOException {
      +    if (mu_ != 0D) {
      +      output.writeDouble(1, mu_);
      +    }
      +    if (sigma_ != 0D) {
      +      output.writeDouble(2, sigma_);
      +    }
      +    unknownFields.writeTo(output);
      +  }
      +
      +  @java.lang.Override
      +  public int getSerializedSize() {
      +    int size = memoizedSize;
      +    if (size != -1) return size;
      +
      +    size = 0;
      +    if (mu_ != 0D) {
      +      size += com.google.protobuf.CodedOutputStream
      +        .computeDoubleSize(1, mu_);
      +    }
      +    if (sigma_ != 0D) {
      +      size += com.google.protobuf.CodedOutputStream
      +        .computeDoubleSize(2, sigma_);
      +    }
      +    size += unknownFields.getSerializedSize();
      +    memoizedSize = size;
      +    return size;
      +  }
      +
      +  @java.lang.Override
      +  public boolean equals(final java.lang.Object obj) {
      +    if (obj == this) {
      +     return true;
      +    }
      +    if (!(obj instanceof org.tensorflow.proto.framework.LogNormalDistribution)) {
      +      return super.equals(obj);
      +    }
      +    org.tensorflow.proto.framework.LogNormalDistribution other = (org.tensorflow.proto.framework.LogNormalDistribution) obj;
      +
      +    if (java.lang.Double.doubleToLongBits(getMu())
      +        != java.lang.Double.doubleToLongBits(
      +            other.getMu())) return false;
      +    if (java.lang.Double.doubleToLongBits(getSigma())
      +        != java.lang.Double.doubleToLongBits(
      +            other.getSigma())) return false;
      +    if (!unknownFields.equals(other.unknownFields)) return false;
      +    return true;
      +  }
      +
      +  @java.lang.Override
      +  public int hashCode() {
      +    if (memoizedHashCode != 0) {
      +      return memoizedHashCode;
      +    }
      +    int hash = 41;
      +    hash = (19 * hash) + getDescriptor().hashCode();
      +    hash = (37 * hash) + MU_FIELD_NUMBER;
      +    hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
      +        java.lang.Double.doubleToLongBits(getMu()));
      +    hash = (37 * hash) + SIGMA_FIELD_NUMBER;
      +    hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
      +        java.lang.Double.doubleToLongBits(getSigma()));
      +    hash = (29 * hash) + unknownFields.hashCode();
      +    memoizedHashCode = hash;
      +    return hash;
      +  }
      +
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(
      +      java.nio.ByteBuffer data)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(
      +      java.nio.ByteBuffer data,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(
      +      com.google.protobuf.ByteString data)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(
      +      com.google.protobuf.ByteString data,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(byte[] data)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(
      +      byte[] data,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(java.io.InputStream input)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseWithIOException(PARSER, input);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(
      +      java.io.InputStream input,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseWithIOException(PARSER, input, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseDelimitedFrom(java.io.InputStream input)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseDelimitedWithIOException(PARSER, input);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseDelimitedFrom(
      +      java.io.InputStream input,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(
      +      com.google.protobuf.CodedInputStream input)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseWithIOException(PARSER, input);
      +  }
      +  public static org.tensorflow.proto.framework.LogNormalDistribution parseFrom(
      +      com.google.protobuf.CodedInputStream input,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseWithIOException(PARSER, input, extensionRegistry);
      +  }
      +
      +  @java.lang.Override
      +  public Builder newBuilderForType() { return newBuilder(); }
      +  public static Builder newBuilder() {
      +    return DEFAULT_INSTANCE.toBuilder();
      +  }
      +  public static Builder newBuilder(org.tensorflow.proto.framework.LogNormalDistribution prototype) {
      +    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      +  }
      +  @java.lang.Override
      +  public Builder toBuilder() {
      +    return this == DEFAULT_INSTANCE
      +        ? new Builder() : new Builder().mergeFrom(this);
      +  }
      +
      +  @java.lang.Override
      +  protected Builder newBuilderForType(
      +      com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      +    Builder builder = new Builder(parent);
      +    return builder;
      +  }
      +  /**
      +   * Protobuf type {@code tensorflow.LogNormalDistribution}
      +   */
      +  public static final class Builder extends
      +      com.google.protobuf.GeneratedMessageV3.Builder implements
      +      // @@protoc_insertion_point(builder_implements:tensorflow.LogNormalDistribution)
      +      org.tensorflow.proto.framework.LogNormalDistributionOrBuilder {
      +    public static final com.google.protobuf.Descriptors.Descriptor
      +        getDescriptor() {
      +      return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_LogNormalDistribution_descriptor;
      +    }
      +
      +    @java.lang.Override
      +    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      +        internalGetFieldAccessorTable() {
      +      return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_LogNormalDistribution_fieldAccessorTable
      +          .ensureFieldAccessorsInitialized(
      +              org.tensorflow.proto.framework.LogNormalDistribution.class, org.tensorflow.proto.framework.LogNormalDistribution.Builder.class);
      +    }
      +
      +    // Construct using org.tensorflow.proto.framework.LogNormalDistribution.newBuilder()
      +    private Builder() {
      +      maybeForceBuilderInitialization();
      +    }
      +
      +    private Builder(
      +        com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      +      super(parent);
      +      maybeForceBuilderInitialization();
      +    }
      +    private void maybeForceBuilderInitialization() {
      +      if (com.google.protobuf.GeneratedMessageV3
      +              .alwaysUseFieldBuilders) {
      +      }
      +    }
      +    @java.lang.Override
      +    public Builder clear() {
      +      super.clear();
      +      mu_ = 0D;
      +
      +      sigma_ = 0D;
      +
      +      return this;
      +    }
      +
      +    @java.lang.Override
      +    public com.google.protobuf.Descriptors.Descriptor
      +        getDescriptorForType() {
      +      return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_LogNormalDistribution_descriptor;
      +    }
      +
      +    @java.lang.Override
      +    public org.tensorflow.proto.framework.LogNormalDistribution getDefaultInstanceForType() {
      +      return org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance();
      +    }
      +
      +    @java.lang.Override
      +    public org.tensorflow.proto.framework.LogNormalDistribution build() {
      +      org.tensorflow.proto.framework.LogNormalDistribution result = buildPartial();
      +      if (!result.isInitialized()) {
      +        throw newUninitializedMessageException(result);
      +      }
      +      return result;
      +    }
      +
      +    @java.lang.Override
      +    public org.tensorflow.proto.framework.LogNormalDistribution buildPartial() {
      +      org.tensorflow.proto.framework.LogNormalDistribution result = new org.tensorflow.proto.framework.LogNormalDistribution(this);
      +      result.mu_ = mu_;
      +      result.sigma_ = sigma_;
      +      onBuilt();
      +      return result;
      +    }
      +
      +    @java.lang.Override
      +    public Builder clone() {
      +      return super.clone();
      +    }
      +    @java.lang.Override
      +    public Builder setField(
      +        com.google.protobuf.Descriptors.FieldDescriptor field,
      +        java.lang.Object value) {
      +      return super.setField(field, value);
      +    }
      +    @java.lang.Override
      +    public Builder clearField(
      +        com.google.protobuf.Descriptors.FieldDescriptor field) {
      +      return super.clearField(field);
      +    }
      +    @java.lang.Override
      +    public Builder clearOneof(
      +        com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      +      return super.clearOneof(oneof);
      +    }
      +    @java.lang.Override
      +    public Builder setRepeatedField(
      +        com.google.protobuf.Descriptors.FieldDescriptor field,
      +        int index, java.lang.Object value) {
      +      return super.setRepeatedField(field, index, value);
      +    }
      +    @java.lang.Override
      +    public Builder addRepeatedField(
      +        com.google.protobuf.Descriptors.FieldDescriptor field,
      +        java.lang.Object value) {
      +      return super.addRepeatedField(field, value);
      +    }
      +    @java.lang.Override
      +    public Builder mergeFrom(com.google.protobuf.Message other) {
      +      if (other instanceof org.tensorflow.proto.framework.LogNormalDistribution) {
      +        return mergeFrom((org.tensorflow.proto.framework.LogNormalDistribution)other);
      +      } else {
      +        super.mergeFrom(other);
      +        return this;
      +      }
      +    }
      +
      +    public Builder mergeFrom(org.tensorflow.proto.framework.LogNormalDistribution other) {
      +      if (other == org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance()) return this;
      +      if (other.getMu() != 0D) {
      +        setMu(other.getMu());
      +      }
      +      if (other.getSigma() != 0D) {
      +        setSigma(other.getSigma());
      +      }
      +      this.mergeUnknownFields(other.unknownFields);
      +      onChanged();
      +      return this;
      +    }
      +
      +    @java.lang.Override
      +    public final boolean isInitialized() {
      +      return true;
      +    }
      +
      +    @java.lang.Override
      +    public Builder mergeFrom(
      +        com.google.protobuf.CodedInputStream input,
      +        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +        throws java.io.IOException {
      +      org.tensorflow.proto.framework.LogNormalDistribution parsedMessage = null;
      +      try {
      +        parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
      +      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
      +        parsedMessage = (org.tensorflow.proto.framework.LogNormalDistribution) e.getUnfinishedMessage();
      +        throw e.unwrapIOException();
      +      } finally {
      +        if (parsedMessage != null) {
      +          mergeFrom(parsedMessage);
      +        }
      +      }
      +      return this;
      +    }
      +
      +    private double mu_ ;
      +    /**
      +     * double mu = 1;
      +     */
      +    public double getMu() {
      +      return mu_;
      +    }
      +    /**
      +     * double mu = 1;
      +     */
      +    public Builder setMu(double value) {
      +      
      +      mu_ = value;
      +      onChanged();
      +      return this;
      +    }
      +    /**
      +     * double mu = 1;
      +     */
      +    public Builder clearMu() {
      +      
      +      mu_ = 0D;
      +      onChanged();
      +      return this;
      +    }
      +
      +    private double sigma_ ;
      +    /**
      +     * double sigma = 2;
      +     */
      +    public double getSigma() {
      +      return sigma_;
      +    }
      +    /**
      +     * double sigma = 2;
      +     */
      +    public Builder setSigma(double value) {
      +      
      +      sigma_ = value;
      +      onChanged();
      +      return this;
      +    }
      +    /**
      +     * double sigma = 2;
      +     */
      +    public Builder clearSigma() {
      +      
      +      sigma_ = 0D;
      +      onChanged();
      +      return this;
      +    }
      +    @java.lang.Override
      +    public final Builder setUnknownFields(
      +        final com.google.protobuf.UnknownFieldSet unknownFields) {
      +      return super.setUnknownFields(unknownFields);
      +    }
      +
      +    @java.lang.Override
      +    public final Builder mergeUnknownFields(
      +        final com.google.protobuf.UnknownFieldSet unknownFields) {
      +      return super.mergeUnknownFields(unknownFields);
      +    }
      +
      +
      +    // @@protoc_insertion_point(builder_scope:tensorflow.LogNormalDistribution)
      +  }
      +
      +  // @@protoc_insertion_point(class_scope:tensorflow.LogNormalDistribution)
      +  private static final org.tensorflow.proto.framework.LogNormalDistribution DEFAULT_INSTANCE;
      +  static {
      +    DEFAULT_INSTANCE = new org.tensorflow.proto.framework.LogNormalDistribution();
      +  }
      +
      +  public static org.tensorflow.proto.framework.LogNormalDistribution getDefaultInstance() {
      +    return DEFAULT_INSTANCE;
      +  }
      +
      +  private static final com.google.protobuf.Parser
      +      PARSER = new com.google.protobuf.AbstractParser() {
      +    @java.lang.Override
      +    public LogNormalDistribution parsePartialFrom(
      +        com.google.protobuf.CodedInputStream input,
      +        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +        throws com.google.protobuf.InvalidProtocolBufferException {
      +      return new LogNormalDistribution(input, extensionRegistry);
      +    }
      +  };
      +
      +  public static com.google.protobuf.Parser parser() {
      +    return PARSER;
      +  }
      +
      +  @java.lang.Override
      +  public com.google.protobuf.Parser getParserForType() {
      +    return PARSER;
      +  }
      +
      +  @java.lang.Override
      +  public org.tensorflow.proto.framework.LogNormalDistribution getDefaultInstanceForType() {
      +    return DEFAULT_INSTANCE;
      +  }
      +
      +}
      +
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/LogNormalDistributionOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/LogNormalDistributionOrBuilder.java
      new file mode 100644
      index 00000000000..367b5827180
      --- /dev/null
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/LogNormalDistributionOrBuilder.java
      @@ -0,0 +1,19 @@
      +// Generated by the protocol buffer compiler.  DO NOT EDIT!
      +// source: tensorflow/core/grappler/costs/op_performance_data.proto
      +
      +package org.tensorflow.proto.framework;
      +
      +public interface LogNormalDistributionOrBuilder extends
      +    // @@protoc_insertion_point(interface_extends:tensorflow.LogNormalDistribution)
      +    com.google.protobuf.MessageOrBuilder {
      +
      +  /**
      +   * double mu = 1;
      +   */
      +  double getMu();
      +
      +  /**
      +   * double sigma = 2;
      +   */
      +  double getSigma();
      +}
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NormalDistribution.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NormalDistribution.java
      new file mode 100644
      index 00000000000..976cb57df20
      --- /dev/null
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NormalDistribution.java
      @@ -0,0 +1,537 @@
      +// Generated by the protocol buffer compiler.  DO NOT EDIT!
      +// source: tensorflow/core/grappler/costs/op_performance_data.proto
      +
      +package org.tensorflow.proto.framework;
      +
      +/**
      + * Protobuf type {@code tensorflow.NormalDistribution}
      + */
      +public  final class NormalDistribution extends
      +    com.google.protobuf.GeneratedMessageV3 implements
      +    // @@protoc_insertion_point(message_implements:tensorflow.NormalDistribution)
      +    NormalDistributionOrBuilder {
      +private static final long serialVersionUID = 0L;
      +  // Use NormalDistribution.newBuilder() to construct.
      +  private NormalDistribution(com.google.protobuf.GeneratedMessageV3.Builder builder) {
      +    super(builder);
      +  }
      +  private NormalDistribution() {
      +  }
      +
      +  @java.lang.Override
      +  @SuppressWarnings({"unused"})
      +  protected java.lang.Object newInstance(
      +      UnusedPrivateParameter unused) {
      +    return new NormalDistribution();
      +  }
      +
      +  @java.lang.Override
      +  public final com.google.protobuf.UnknownFieldSet
      +  getUnknownFields() {
      +    return this.unknownFields;
      +  }
      +  private NormalDistribution(
      +      com.google.protobuf.CodedInputStream input,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    this();
      +    if (extensionRegistry == null) {
      +      throw new java.lang.NullPointerException();
      +    }
      +    com.google.protobuf.UnknownFieldSet.Builder unknownFields =
      +        com.google.protobuf.UnknownFieldSet.newBuilder();
      +    try {
      +      boolean done = false;
      +      while (!done) {
      +        int tag = input.readTag();
      +        switch (tag) {
      +          case 0:
      +            done = true;
      +            break;
      +          case 9: {
      +
      +            mu_ = input.readDouble();
      +            break;
      +          }
      +          case 17: {
      +
      +            sigma_ = input.readDouble();
      +            break;
      +          }
      +          default: {
      +            if (!parseUnknownField(
      +                input, unknownFields, extensionRegistry, tag)) {
      +              done = true;
      +            }
      +            break;
      +          }
      +        }
      +      }
      +    } catch (com.google.protobuf.InvalidProtocolBufferException e) {
      +      throw e.setUnfinishedMessage(this);
      +    } catch (java.io.IOException e) {
      +      throw new com.google.protobuf.InvalidProtocolBufferException(
      +          e).setUnfinishedMessage(this);
      +    } finally {
      +      this.unknownFields = unknownFields.build();
      +      makeExtensionsImmutable();
      +    }
      +  }
      +  public static final com.google.protobuf.Descriptors.Descriptor
      +      getDescriptor() {
      +    return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_NormalDistribution_descriptor;
      +  }
      +
      +  @java.lang.Override
      +  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      +      internalGetFieldAccessorTable() {
      +    return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_NormalDistribution_fieldAccessorTable
      +        .ensureFieldAccessorsInitialized(
      +            org.tensorflow.proto.framework.NormalDistribution.class, org.tensorflow.proto.framework.NormalDistribution.Builder.class);
      +  }
      +
      +  public static final int MU_FIELD_NUMBER = 1;
      +  private double mu_;
      +  /**
      +   * double mu = 1;
      +   */
      +  public double getMu() {
      +    return mu_;
      +  }
      +
      +  public static final int SIGMA_FIELD_NUMBER = 2;
      +  private double sigma_;
      +  /**
      +   * double sigma = 2;
      +   */
      +  public double getSigma() {
      +    return sigma_;
      +  }
      +
      +  private byte memoizedIsInitialized = -1;
      +  @java.lang.Override
      +  public final boolean isInitialized() {
      +    byte isInitialized = memoizedIsInitialized;
      +    if (isInitialized == 1) return true;
      +    if (isInitialized == 0) return false;
      +
      +    memoizedIsInitialized = 1;
      +    return true;
      +  }
      +
      +  @java.lang.Override
      +  public void writeTo(com.google.protobuf.CodedOutputStream output)
      +                      throws java.io.IOException {
      +    if (mu_ != 0D) {
      +      output.writeDouble(1, mu_);
      +    }
      +    if (sigma_ != 0D) {
      +      output.writeDouble(2, sigma_);
      +    }
      +    unknownFields.writeTo(output);
      +  }
      +
      +  @java.lang.Override
      +  public int getSerializedSize() {
      +    int size = memoizedSize;
      +    if (size != -1) return size;
      +
      +    size = 0;
      +    if (mu_ != 0D) {
      +      size += com.google.protobuf.CodedOutputStream
      +        .computeDoubleSize(1, mu_);
      +    }
      +    if (sigma_ != 0D) {
      +      size += com.google.protobuf.CodedOutputStream
      +        .computeDoubleSize(2, sigma_);
      +    }
      +    size += unknownFields.getSerializedSize();
      +    memoizedSize = size;
      +    return size;
      +  }
      +
      +  @java.lang.Override
      +  public boolean equals(final java.lang.Object obj) {
      +    if (obj == this) {
      +     return true;
      +    }
      +    if (!(obj instanceof org.tensorflow.proto.framework.NormalDistribution)) {
      +      return super.equals(obj);
      +    }
      +    org.tensorflow.proto.framework.NormalDistribution other = (org.tensorflow.proto.framework.NormalDistribution) obj;
      +
      +    if (java.lang.Double.doubleToLongBits(getMu())
      +        != java.lang.Double.doubleToLongBits(
      +            other.getMu())) return false;
      +    if (java.lang.Double.doubleToLongBits(getSigma())
      +        != java.lang.Double.doubleToLongBits(
      +            other.getSigma())) return false;
      +    if (!unknownFields.equals(other.unknownFields)) return false;
      +    return true;
      +  }
      +
      +  @java.lang.Override
      +  public int hashCode() {
      +    if (memoizedHashCode != 0) {
      +      return memoizedHashCode;
      +    }
      +    int hash = 41;
      +    hash = (19 * hash) + getDescriptor().hashCode();
      +    hash = (37 * hash) + MU_FIELD_NUMBER;
      +    hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
      +        java.lang.Double.doubleToLongBits(getMu()));
      +    hash = (37 * hash) + SIGMA_FIELD_NUMBER;
      +    hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
      +        java.lang.Double.doubleToLongBits(getSigma()));
      +    hash = (29 * hash) + unknownFields.hashCode();
      +    memoizedHashCode = hash;
      +    return hash;
      +  }
      +
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(
      +      java.nio.ByteBuffer data)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(
      +      java.nio.ByteBuffer data,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(
      +      com.google.protobuf.ByteString data)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(
      +      com.google.protobuf.ByteString data,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(byte[] data)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(
      +      byte[] data,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws com.google.protobuf.InvalidProtocolBufferException {
      +    return PARSER.parseFrom(data, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(java.io.InputStream input)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseWithIOException(PARSER, input);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(
      +      java.io.InputStream input,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseWithIOException(PARSER, input, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseDelimitedFrom(java.io.InputStream input)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseDelimitedWithIOException(PARSER, input);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseDelimitedFrom(
      +      java.io.InputStream input,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(
      +      com.google.protobuf.CodedInputStream input)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseWithIOException(PARSER, input);
      +  }
      +  public static org.tensorflow.proto.framework.NormalDistribution parseFrom(
      +      com.google.protobuf.CodedInputStream input,
      +      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +      throws java.io.IOException {
      +    return com.google.protobuf.GeneratedMessageV3
      +        .parseWithIOException(PARSER, input, extensionRegistry);
      +  }
      +
      +  @java.lang.Override
      +  public Builder newBuilderForType() { return newBuilder(); }
      +  public static Builder newBuilder() {
      +    return DEFAULT_INSTANCE.toBuilder();
      +  }
      +  public static Builder newBuilder(org.tensorflow.proto.framework.NormalDistribution prototype) {
      +    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      +  }
      +  @java.lang.Override
      +  public Builder toBuilder() {
      +    return this == DEFAULT_INSTANCE
      +        ? new Builder() : new Builder().mergeFrom(this);
      +  }
      +
      +  @java.lang.Override
      +  protected Builder newBuilderForType(
      +      com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      +    Builder builder = new Builder(parent);
      +    return builder;
      +  }
      +  /**
      +   * Protobuf type {@code tensorflow.NormalDistribution}
      +   */
      +  public static final class Builder extends
      +      com.google.protobuf.GeneratedMessageV3.Builder implements
      +      // @@protoc_insertion_point(builder_implements:tensorflow.NormalDistribution)
      +      org.tensorflow.proto.framework.NormalDistributionOrBuilder {
      +    public static final com.google.protobuf.Descriptors.Descriptor
      +        getDescriptor() {
      +      return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_NormalDistribution_descriptor;
      +    }
      +
      +    @java.lang.Override
      +    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      +        internalGetFieldAccessorTable() {
      +      return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_NormalDistribution_fieldAccessorTable
      +          .ensureFieldAccessorsInitialized(
      +              org.tensorflow.proto.framework.NormalDistribution.class, org.tensorflow.proto.framework.NormalDistribution.Builder.class);
      +    }
      +
      +    // Construct using org.tensorflow.proto.framework.NormalDistribution.newBuilder()
      +    private Builder() {
      +      maybeForceBuilderInitialization();
      +    }
      +
      +    private Builder(
      +        com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      +      super(parent);
      +      maybeForceBuilderInitialization();
      +    }
      +    private void maybeForceBuilderInitialization() {
      +      if (com.google.protobuf.GeneratedMessageV3
      +              .alwaysUseFieldBuilders) {
      +      }
      +    }
      +    @java.lang.Override
      +    public Builder clear() {
      +      super.clear();
      +      mu_ = 0D;
      +
      +      sigma_ = 0D;
      +
      +      return this;
      +    }
      +
      +    @java.lang.Override
      +    public com.google.protobuf.Descriptors.Descriptor
      +        getDescriptorForType() {
      +      return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_NormalDistribution_descriptor;
      +    }
      +
      +    @java.lang.Override
      +    public org.tensorflow.proto.framework.NormalDistribution getDefaultInstanceForType() {
      +      return org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance();
      +    }
      +
      +    @java.lang.Override
      +    public org.tensorflow.proto.framework.NormalDistribution build() {
      +      org.tensorflow.proto.framework.NormalDistribution result = buildPartial();
      +      if (!result.isInitialized()) {
      +        throw newUninitializedMessageException(result);
      +      }
      +      return result;
      +    }
      +
      +    @java.lang.Override
      +    public org.tensorflow.proto.framework.NormalDistribution buildPartial() {
      +      org.tensorflow.proto.framework.NormalDistribution result = new org.tensorflow.proto.framework.NormalDistribution(this);
      +      result.mu_ = mu_;
      +      result.sigma_ = sigma_;
      +      onBuilt();
      +      return result;
      +    }
      +
      +    @java.lang.Override
      +    public Builder clone() {
      +      return super.clone();
      +    }
      +    @java.lang.Override
      +    public Builder setField(
      +        com.google.protobuf.Descriptors.FieldDescriptor field,
      +        java.lang.Object value) {
      +      return super.setField(field, value);
      +    }
      +    @java.lang.Override
      +    public Builder clearField(
      +        com.google.protobuf.Descriptors.FieldDescriptor field) {
      +      return super.clearField(field);
      +    }
      +    @java.lang.Override
      +    public Builder clearOneof(
      +        com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      +      return super.clearOneof(oneof);
      +    }
      +    @java.lang.Override
      +    public Builder setRepeatedField(
      +        com.google.protobuf.Descriptors.FieldDescriptor field,
      +        int index, java.lang.Object value) {
      +      return super.setRepeatedField(field, index, value);
      +    }
      +    @java.lang.Override
      +    public Builder addRepeatedField(
      +        com.google.protobuf.Descriptors.FieldDescriptor field,
      +        java.lang.Object value) {
      +      return super.addRepeatedField(field, value);
      +    }
      +    @java.lang.Override
      +    public Builder mergeFrom(com.google.protobuf.Message other) {
      +      if (other instanceof org.tensorflow.proto.framework.NormalDistribution) {
      +        return mergeFrom((org.tensorflow.proto.framework.NormalDistribution)other);
      +      } else {
      +        super.mergeFrom(other);
      +        return this;
      +      }
      +    }
      +
      +    public Builder mergeFrom(org.tensorflow.proto.framework.NormalDistribution other) {
      +      if (other == org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance()) return this;
      +      if (other.getMu() != 0D) {
      +        setMu(other.getMu());
      +      }
      +      if (other.getSigma() != 0D) {
      +        setSigma(other.getSigma());
      +      }
      +      this.mergeUnknownFields(other.unknownFields);
      +      onChanged();
      +      return this;
      +    }
      +
      +    @java.lang.Override
      +    public final boolean isInitialized() {
      +      return true;
      +    }
      +
      +    @java.lang.Override
      +    public Builder mergeFrom(
      +        com.google.protobuf.CodedInputStream input,
      +        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +        throws java.io.IOException {
      +      org.tensorflow.proto.framework.NormalDistribution parsedMessage = null;
      +      try {
      +        parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
      +      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
      +        parsedMessage = (org.tensorflow.proto.framework.NormalDistribution) e.getUnfinishedMessage();
      +        throw e.unwrapIOException();
      +      } finally {
      +        if (parsedMessage != null) {
      +          mergeFrom(parsedMessage);
      +        }
      +      }
      +      return this;
      +    }
      +
      +    private double mu_ ;
      +    /**
      +     * double mu = 1;
      +     */
      +    public double getMu() {
      +      return mu_;
      +    }
      +    /**
      +     * double mu = 1;
      +     */
      +    public Builder setMu(double value) {
      +      
      +      mu_ = value;
      +      onChanged();
      +      return this;
      +    }
      +    /**
      +     * double mu = 1;
      +     */
      +    public Builder clearMu() {
      +      
      +      mu_ = 0D;
      +      onChanged();
      +      return this;
      +    }
      +
      +    private double sigma_ ;
      +    /**
      +     * double sigma = 2;
      +     */
      +    public double getSigma() {
      +      return sigma_;
      +    }
      +    /**
      +     * double sigma = 2;
      +     */
      +    public Builder setSigma(double value) {
      +      
      +      sigma_ = value;
      +      onChanged();
      +      return this;
      +    }
      +    /**
      +     * double sigma = 2;
      +     */
      +    public Builder clearSigma() {
      +      
      +      sigma_ = 0D;
      +      onChanged();
      +      return this;
      +    }
      +    @java.lang.Override
      +    public final Builder setUnknownFields(
      +        final com.google.protobuf.UnknownFieldSet unknownFields) {
      +      return super.setUnknownFields(unknownFields);
      +    }
      +
      +    @java.lang.Override
      +    public final Builder mergeUnknownFields(
      +        final com.google.protobuf.UnknownFieldSet unknownFields) {
      +      return super.mergeUnknownFields(unknownFields);
      +    }
      +
      +
      +    // @@protoc_insertion_point(builder_scope:tensorflow.NormalDistribution)
      +  }
      +
      +  // @@protoc_insertion_point(class_scope:tensorflow.NormalDistribution)
      +  private static final org.tensorflow.proto.framework.NormalDistribution DEFAULT_INSTANCE;
      +  static {
      +    DEFAULT_INSTANCE = new org.tensorflow.proto.framework.NormalDistribution();
      +  }
      +
      +  public static org.tensorflow.proto.framework.NormalDistribution getDefaultInstance() {
      +    return DEFAULT_INSTANCE;
      +  }
      +
      +  private static final com.google.protobuf.Parser
      +      PARSER = new com.google.protobuf.AbstractParser() {
      +    @java.lang.Override
      +    public NormalDistribution parsePartialFrom(
      +        com.google.protobuf.CodedInputStream input,
      +        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      +        throws com.google.protobuf.InvalidProtocolBufferException {
      +      return new NormalDistribution(input, extensionRegistry);
      +    }
      +  };
      +
      +  public static com.google.protobuf.Parser parser() {
      +    return PARSER;
      +  }
      +
      +  @java.lang.Override
      +  public com.google.protobuf.Parser getParserForType() {
      +    return PARSER;
      +  }
      +
      +  @java.lang.Override
      +  public org.tensorflow.proto.framework.NormalDistribution getDefaultInstanceForType() {
      +    return DEFAULT_INSTANCE;
      +  }
      +
      +}
      +
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NormalDistributionOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NormalDistributionOrBuilder.java
      new file mode 100644
      index 00000000000..4b2c75b1f24
      --- /dev/null
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NormalDistributionOrBuilder.java
      @@ -0,0 +1,19 @@
      +// Generated by the protocol buffer compiler.  DO NOT EDIT!
      +// source: tensorflow/core/grappler/costs/op_performance_data.proto
      +
      +package org.tensorflow.proto.framework;
      +
      +public interface NormalDistributionOrBuilder extends
      +    // @@protoc_insertion_point(interface_extends:tensorflow.NormalDistribution)
      +    com.google.protobuf.MessageOrBuilder {
      +
      +  /**
      +   * double mu = 1;
      +   */
      +  double getMu();
      +
      +  /**
      +   * double sigma = 2;
      +   */
      +  double getSigma();
      +}
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpDef.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpDef.java
      index 80839f48dcc..958471427f3 100644
      --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpDef.java
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpDef.java
      @@ -324,6 +324,50 @@ public interface ArgDefOrBuilder extends
           com.google.protobuf.ByteString
               getTypeListAttrBytes();
       
      +    /**
      +     * 
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + java.util.List + getHandleDataList(); + /** + *
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape getHandleData(int index); + /** + *
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + int getHandleDataCount(); + /** + *
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + java.util.List + getHandleDataOrBuilderList(); + /** + *
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShapeOrBuilder getHandleDataOrBuilder( + int index); + /** *
            * For inputs: if true, the inputs are required to be refs.
      @@ -358,6 +402,7 @@ private ArgDef() {
             typeAttr_ = "";
             numberAttr_ = "";
             typeListAttr_ = "";
      +      handleData_ = java.util.Collections.emptyList();
           }
       
           @java.lang.Override
      @@ -380,6 +425,7 @@ private ArgDef(
             if (extensionRegistry == null) {
               throw new java.lang.NullPointerException();
             }
      +      int mutable_bitField0_ = 0;
             com.google.protobuf.UnknownFieldSet.Builder unknownFields =
                 com.google.protobuf.UnknownFieldSet.newBuilder();
             try {
      @@ -426,6 +472,15 @@ private ArgDef(
                     typeListAttr_ = s;
                     break;
                   }
      +            case 58: {
      +              if (!((mutable_bitField0_ & 0x00000001) != 0)) {
      +                handleData_ = new java.util.ArrayList();
      +                mutable_bitField0_ |= 0x00000001;
      +              }
      +              handleData_.add(
      +                  input.readMessage(org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.parser(), extensionRegistry));
      +              break;
      +            }
                   case 128: {
       
                     isRef_ = input.readBool();
      @@ -446,6 +501,9 @@ private ArgDef(
               throw new com.google.protobuf.InvalidProtocolBufferException(
                   e).setUnfinishedMessage(this);
             } finally {
      +        if (((mutable_bitField0_ & 0x00000001) != 0)) {
      +          handleData_ = java.util.Collections.unmodifiableList(handleData_);
      +        }
               this.unknownFields = unknownFields.build();
               makeExtensionsImmutable();
             }
      @@ -718,6 +776,61 @@ public java.lang.String getTypeListAttr() {
             }
           }
       
      +    public static final int HANDLE_DATA_FIELD_NUMBER = 7;
      +    private java.util.List handleData_;
      +    /**
      +     * 
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public java.util.List getHandleDataList() { + return handleData_; + } + /** + *
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public java.util.List + getHandleDataOrBuilderList() { + return handleData_; + } + /** + *
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public int getHandleDataCount() { + return handleData_.size(); + } + /** + *
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape getHandleData(int index) { + return handleData_.get(index); + } + /** + *
      +     * The handle data for resource inputs.
      +     * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShapeOrBuilder getHandleDataOrBuilder( + int index) { + return handleData_.get(index); + } + public static final int IS_REF_FIELD_NUMBER = 16; private boolean isRef_; /** @@ -765,6 +878,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (!getTypeListAttrBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 6, typeListAttr_); } + for (int i = 0; i < handleData_.size(); i++) { + output.writeMessage(7, handleData_.get(i)); + } if (isRef_ != false) { output.writeBool(16, isRef_); } @@ -796,6 +912,10 @@ public int getSerializedSize() { if (!getTypeListAttrBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, typeListAttr_); } + for (int i = 0; i < handleData_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, handleData_.get(i)); + } if (isRef_ != false) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(16, isRef_); @@ -826,6 +946,8 @@ public boolean equals(final java.lang.Object obj) { .equals(other.getNumberAttr())) return false; if (!getTypeListAttr() .equals(other.getTypeListAttr())) return false; + if (!getHandleDataList() + .equals(other.getHandleDataList())) return false; if (getIsRef() != other.getIsRef()) return false; if (!unknownFields.equals(other.unknownFields)) return false; @@ -851,6 +973,10 @@ public int hashCode() { hash = (53 * hash) + getNumberAttr().hashCode(); hash = (37 * hash) + TYPE_LIST_ATTR_FIELD_NUMBER; hash = (53 * hash) + getTypeListAttr().hashCode(); + if (getHandleDataCount() > 0) { + hash = (37 * hash) + HANDLE_DATA_FIELD_NUMBER; + hash = (53 * hash) + getHandleDataList().hashCode(); + } hash = (37 * hash) + IS_REF_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getIsRef()); @@ -986,6 +1112,7 @@ private Builder( private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { + getHandleDataFieldBuilder(); } } @java.lang.Override @@ -1003,6 +1130,12 @@ public Builder clear() { typeListAttr_ = ""; + if (handleDataBuilder_ == null) { + handleData_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + handleDataBuilder_.clear(); + } isRef_ = false; return this; @@ -1031,12 +1164,22 @@ public org.tensorflow.proto.framework.OpDef.ArgDef build() { @java.lang.Override public org.tensorflow.proto.framework.OpDef.ArgDef buildPartial() { org.tensorflow.proto.framework.OpDef.ArgDef result = new org.tensorflow.proto.framework.OpDef.ArgDef(this); + int from_bitField0_ = bitField0_; result.name_ = name_; result.description_ = description_; result.type_ = type_; result.typeAttr_ = typeAttr_; result.numberAttr_ = numberAttr_; result.typeListAttr_ = typeListAttr_; + if (handleDataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + handleData_ = java.util.Collections.unmodifiableList(handleData_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.handleData_ = handleData_; + } else { + result.handleData_ = handleDataBuilder_.build(); + } result.isRef_ = isRef_; onBuilt(); return result; @@ -1109,6 +1252,32 @@ public Builder mergeFrom(org.tensorflow.proto.framework.OpDef.ArgDef other) { typeListAttr_ = other.typeListAttr_; onChanged(); } + if (handleDataBuilder_ == null) { + if (!other.handleData_.isEmpty()) { + if (handleData_.isEmpty()) { + handleData_ = other.handleData_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureHandleDataIsMutable(); + handleData_.addAll(other.handleData_); + } + onChanged(); + } + } else { + if (!other.handleData_.isEmpty()) { + if (handleDataBuilder_.isEmpty()) { + handleDataBuilder_.dispose(); + handleDataBuilder_ = null; + handleData_ = other.handleData_; + bitField0_ = (bitField0_ & ~0x00000001); + handleDataBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getHandleDataFieldBuilder() : null; + } else { + handleDataBuilder_.addAllMessages(other.handleData_); + } + } + } if (other.getIsRef() != false) { setIsRef(other.getIsRef()); } @@ -1140,6 +1309,7 @@ public Builder mergeFrom( } return this; } + private int bitField0_; private java.lang.Object name_ = ""; /** @@ -1701,6 +1871,318 @@ public Builder setTypeListAttrBytes( return this; } + private java.util.List handleData_ = + java.util.Collections.emptyList(); + private void ensureHandleDataIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + handleData_ = new java.util.ArrayList(handleData_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShapeOrBuilder> handleDataBuilder_; + + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public java.util.List getHandleDataList() { + if (handleDataBuilder_ == null) { + return java.util.Collections.unmodifiableList(handleData_); + } else { + return handleDataBuilder_.getMessageList(); + } + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public int getHandleDataCount() { + if (handleDataBuilder_ == null) { + return handleData_.size(); + } else { + return handleDataBuilder_.getCount(); + } + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape getHandleData(int index) { + if (handleDataBuilder_ == null) { + return handleData_.get(index); + } else { + return handleDataBuilder_.getMessage(index); + } + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder setHandleData( + int index, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape value) { + if (handleDataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureHandleDataIsMutable(); + handleData_.set(index, value); + onChanged(); + } else { + handleDataBuilder_.setMessage(index, value); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder setHandleData( + int index, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder builderForValue) { + if (handleDataBuilder_ == null) { + ensureHandleDataIsMutable(); + handleData_.set(index, builderForValue.build()); + onChanged(); + } else { + handleDataBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder addHandleData(org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape value) { + if (handleDataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureHandleDataIsMutable(); + handleData_.add(value); + onChanged(); + } else { + handleDataBuilder_.addMessage(value); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder addHandleData( + int index, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape value) { + if (handleDataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureHandleDataIsMutable(); + handleData_.add(index, value); + onChanged(); + } else { + handleDataBuilder_.addMessage(index, value); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder addHandleData( + org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder builderForValue) { + if (handleDataBuilder_ == null) { + ensureHandleDataIsMutable(); + handleData_.add(builderForValue.build()); + onChanged(); + } else { + handleDataBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder addHandleData( + int index, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder builderForValue) { + if (handleDataBuilder_ == null) { + ensureHandleDataIsMutable(); + handleData_.add(index, builderForValue.build()); + onChanged(); + } else { + handleDataBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder addAllHandleData( + java.lang.Iterable values) { + if (handleDataBuilder_ == null) { + ensureHandleDataIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, handleData_); + onChanged(); + } else { + handleDataBuilder_.addAllMessages(values); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder clearHandleData() { + if (handleDataBuilder_ == null) { + handleData_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + handleDataBuilder_.clear(); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public Builder removeHandleData(int index) { + if (handleDataBuilder_ == null) { + ensureHandleDataIsMutable(); + handleData_.remove(index); + onChanged(); + } else { + handleDataBuilder_.remove(index); + } + return this; + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder getHandleDataBuilder( + int index) { + return getHandleDataFieldBuilder().getBuilder(index); + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShapeOrBuilder getHandleDataOrBuilder( + int index) { + if (handleDataBuilder_ == null) { + return handleData_.get(index); } else { + return handleDataBuilder_.getMessageOrBuilder(index); + } + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public java.util.List + getHandleDataOrBuilderList() { + if (handleDataBuilder_ != null) { + return handleDataBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(handleData_); + } + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder addHandleDataBuilder() { + return getHandleDataFieldBuilder().addBuilder( + org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.getDefaultInstance()); + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder addHandleDataBuilder( + int index) { + return getHandleDataFieldBuilder().addBuilder( + index, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.getDefaultInstance()); + } + /** + *
      +       * The handle data for resource inputs.
      +       * 
      + * + * repeated .tensorflow.ResourceHandleProto.DtypeAndShape handle_data = 7; + */ + public java.util.List + getHandleDataBuilderList() { + return getHandleDataFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShapeOrBuilder> + getHandleDataFieldBuilder() { + if (handleDataBuilder_ == null) { + handleDataBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShape.Builder, org.tensorflow.proto.framework.ResourceHandleProto.DtypeAndShapeOrBuilder>( + handleData_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + handleData_ = null; + } + return handleDataBuilder_; + } + private boolean isRef_ ; /** *
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpDefProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpDefProtos.java
      index 8beef161f83..e56b212a6a6 100644
      --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpDefProtos.java
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpDefProtos.java
      @@ -51,37 +51,41 @@ public static void registerAllExtensions(
             "\n&tensorflow/core/framework/op_def.proto" +
             "\022\ntensorflow\032*tensorflow/core/framework/" +
             "attr_value.proto\032%tensorflow/core/framew" +
      -      "ork/types.proto\"\320\005\n\005OpDef\022\014\n\004name\030\001 \001(\t\022" +
      -      "+\n\tinput_arg\030\002 \003(\0132\030.tensorflow.OpDef.Ar" +
      -      "gDef\022,\n\noutput_arg\030\003 \003(\0132\030.tensorflow.Op" +
      -      "Def.ArgDef\022\026\n\016control_output\030\024 \003(\t\022\'\n\004at" +
      -      "tr\030\004 \003(\0132\031.tensorflow.OpDef.AttrDef\022.\n\013d" +
      -      "eprecation\030\010 \001(\0132\031.tensorflow.OpDeprecat" +
      -      "ion\022\017\n\007summary\030\005 \001(\t\022\023\n\013description\030\006 \001(" +
      -      "\t\022\026\n\016is_commutative\030\022 \001(\010\022\024\n\014is_aggregat" +
      -      "e\030\020 \001(\010\022\023\n\013is_stateful\030\021 \001(\010\022\"\n\032allows_u" +
      -      "ninitialized_input\030\023 \001(\010\032\237\001\n\006ArgDef\022\014\n\004n" +
      -      "ame\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022\"\n\004type\030\003" +
      -      " \001(\0162\024.tensorflow.DataType\022\021\n\ttype_attr\030" +
      -      "\004 \001(\t\022\023\n\013number_attr\030\005 \001(\t\022\026\n\016type_list_" +
      -      "attr\030\006 \001(\t\022\016\n\006is_ref\030\020 \001(\010\032\275\001\n\007AttrDef\022\014" +
      -      "\n\004name\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022,\n\rdefault_va" +
      -      "lue\030\003 \001(\0132\025.tensorflow.AttrValue\022\023\n\013desc" +
      -      "ription\030\004 \001(\t\022\023\n\013has_minimum\030\005 \001(\010\022\017\n\007mi" +
      -      "nimum\030\006 \001(\003\022-\n\016allowed_values\030\007 \001(\0132\025.te" +
      -      "nsorflow.AttrValue\"5\n\rOpDeprecation\022\017\n\007v" +
      -      "ersion\030\001 \001(\005\022\023\n\013explanation\030\002 \001(\t\"\'\n\006OpL" +
      -      "ist\022\035\n\002op\030\001 \003(\0132\021.tensorflow.OpDefB\201\001\n\036o" +
      -      "rg.tensorflow.proto.frameworkB\013OpDefProt" +
      -      "osP\001ZMgithub.com/tensorflow/tensorflow/t" +
      -      "ensorflow/go/core/framework/op_def_go_pr" +
      -      "oto\370\001\001b\006proto3"
      +      "ork/types.proto\032/tensorflow/core/framewo" +
      +      "rk/resource_handle.proto\"\224\006\n\005OpDef\022\014\n\004na" +
      +      "me\030\001 \001(\t\022+\n\tinput_arg\030\002 \003(\0132\030.tensorflow" +
      +      ".OpDef.ArgDef\022,\n\noutput_arg\030\003 \003(\0132\030.tens" +
      +      "orflow.OpDef.ArgDef\022\026\n\016control_output\030\024 " +
      +      "\003(\t\022\'\n\004attr\030\004 \003(\0132\031.tensorflow.OpDef.Att" +
      +      "rDef\022.\n\013deprecation\030\010 \001(\0132\031.tensorflow.O" +
      +      "pDeprecation\022\017\n\007summary\030\005 \001(\t\022\023\n\013descrip" +
      +      "tion\030\006 \001(\t\022\026\n\016is_commutative\030\022 \001(\010\022\024\n\014is" +
      +      "_aggregate\030\020 \001(\010\022\023\n\013is_stateful\030\021 \001(\010\022\"\n" +
      +      "\032allows_uninitialized_input\030\023 \001(\010\032\343\001\n\006Ar" +
      +      "gDef\022\014\n\004name\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022" +
      +      "\"\n\004type\030\003 \001(\0162\024.tensorflow.DataType\022\021\n\tt" +
      +      "ype_attr\030\004 \001(\t\022\023\n\013number_attr\030\005 \001(\t\022\026\n\016t" +
      +      "ype_list_attr\030\006 \001(\t\022B\n\013handle_data\030\007 \003(\013" +
      +      "2-.tensorflow.ResourceHandleProto.DtypeA" +
      +      "ndShape\022\016\n\006is_ref\030\020 \001(\010\032\275\001\n\007AttrDef\022\014\n\004n" +
      +      "ame\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022,\n\rdefault_value" +
      +      "\030\003 \001(\0132\025.tensorflow.AttrValue\022\023\n\013descrip" +
      +      "tion\030\004 \001(\t\022\023\n\013has_minimum\030\005 \001(\010\022\017\n\007minim" +
      +      "um\030\006 \001(\003\022-\n\016allowed_values\030\007 \001(\0132\025.tenso" +
      +      "rflow.AttrValue\"5\n\rOpDeprecation\022\017\n\007vers" +
      +      "ion\030\001 \001(\005\022\023\n\013explanation\030\002 \001(\t\"\'\n\006OpList" +
      +      "\022\035\n\002op\030\001 \003(\0132\021.tensorflow.OpDefB\201\001\n\036org." +
      +      "tensorflow.proto.frameworkB\013OpDefProtosP" +
      +      "\001ZMgithub.com/tensorflow/tensorflow/tens" +
      +      "orflow/go/core/framework/op_def_go_proto" +
      +      "\370\001\001b\006proto3"
           };
           descriptor = com.google.protobuf.Descriptors.FileDescriptor
             .internalBuildGeneratedFileFrom(descriptorData,
               new com.google.protobuf.Descriptors.FileDescriptor[] {
                 org.tensorflow.proto.framework.AttrValueProtos.getDescriptor(),
                 org.tensorflow.proto.framework.TypesProtos.getDescriptor(),
      +          org.tensorflow.proto.framework.ResourceHandle.getDescriptor(),
               });
           internal_static_tensorflow_OpDef_descriptor =
             getDescriptor().getMessageTypes().get(0);
      @@ -94,7 +98,7 @@ public static void registerAllExtensions(
           internal_static_tensorflow_OpDef_ArgDef_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
               internal_static_tensorflow_OpDef_ArgDef_descriptor,
      -        new java.lang.String[] { "Name", "Description", "Type", "TypeAttr", "NumberAttr", "TypeListAttr", "IsRef", });
      +        new java.lang.String[] { "Name", "Description", "Type", "TypeAttr", "NumberAttr", "TypeListAttr", "HandleData", "IsRef", });
           internal_static_tensorflow_OpDef_AttrDef_descriptor =
             internal_static_tensorflow_OpDef_descriptor.getNestedTypes().get(1);
           internal_static_tensorflow_OpDef_AttrDef_fieldAccessorTable = new
      @@ -115,6 +119,7 @@ public static void registerAllExtensions(
               new java.lang.String[] { "Op", });
           org.tensorflow.proto.framework.AttrValueProtos.getDescriptor();
           org.tensorflow.proto.framework.TypesProtos.getDescriptor();
      +    org.tensorflow.proto.framework.ResourceHandle.getDescriptor();
         }
       
         // @@protoc_insertion_point(outer_class_scope)
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpInfo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpInfo.java
      new file mode 100644
      index 00000000000..860edaad590
      --- /dev/null
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpInfo.java
      @@ -0,0 +1,3048 @@
      +// Generated by the protocol buffer compiler.  DO NOT EDIT!
      +// source: tensorflow/core/grappler/costs/op_performance_data.proto
      +
      +package org.tensorflow.proto.framework;
      +
      +/**
      + * 
      + * Description of an operation as well as the parameters expected to impact its
      + * performance.
      + * 
      + * + * Protobuf type {@code tensorflow.OpInfo} + */ +public final class OpInfo extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.OpInfo) + OpInfoOrBuilder { +private static final long serialVersionUID = 0L; + // Use OpInfo.newBuilder() to construct. + private OpInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private OpInfo() { + op_ = ""; + inputs_ = java.util.Collections.emptyList(); + outputs_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new OpInfo(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OpInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + op_ = s; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + attr_ = com.google.protobuf.MapField.newMapField( + AttrDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry + attr__ = input.readMessage( + AttrDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + attr_.getMutableMap().put( + attr__.getKey(), attr__.getValue()); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + inputs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + inputs_.add( + input.readMessage(org.tensorflow.proto.framework.OpInfo.TensorProperties.parser(), extensionRegistry)); + break; + } + case 34: { + org.tensorflow.proto.framework.DeviceProperties.Builder subBuilder = null; + if (device_ != null) { + subBuilder = device_.toBuilder(); + } + device_ = input.readMessage(org.tensorflow.proto.framework.DeviceProperties.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(device_); + device_ = subBuilder.buildPartial(); + } + + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000004) != 0)) { + outputs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + outputs_.add( + input.readMessage(org.tensorflow.proto.framework.OpInfo.TensorProperties.parser(), extensionRegistry)); + break; + } + case 50: { + org.tensorflow.proto.framework.SessionInfo.Builder subBuilder = null; + if (sessionInfo_ != null) { + subBuilder = sessionInfo_.toBuilder(); + } + sessionInfo_ = input.readMessage(org.tensorflow.proto.framework.SessionInfo.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sessionInfo_); + sessionInfo_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) != 0)) { + inputs_ = java.util.Collections.unmodifiableList(inputs_); + } + if (((mutable_bitField0_ & 0x00000004) != 0)) { + outputs_ = java.util.Collections.unmodifiableList(outputs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 2: + return internalGetAttr(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpInfo.class, org.tensorflow.proto.framework.OpInfo.Builder.class); + } + + public interface TensorPropertiesOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.OpInfo.TensorProperties) + com.google.protobuf.MessageOrBuilder { + + /** + * .tensorflow.DataType dtype = 1; + */ + int getDtypeValue(); + /** + * .tensorflow.DataType dtype = 1; + */ + org.tensorflow.proto.framework.DataType getDtype(); + + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + boolean hasShape(); + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + org.tensorflow.proto.framework.TensorShapeProto getShape(); + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + org.tensorflow.proto.framework.TensorShapeProtoOrBuilder getShapeOrBuilder(); + + /** + * .tensorflow.TensorProto value = 3; + */ + boolean hasValue(); + /** + * .tensorflow.TensorProto value = 3; + */ + org.tensorflow.proto.framework.TensorProto getValue(); + /** + * .tensorflow.TensorProto value = 3; + */ + org.tensorflow.proto.framework.TensorProtoOrBuilder getValueOrBuilder(); + } + /** + *
      +   * Input data types, shapes and values if known.
      +   * 
      + * + * Protobuf type {@code tensorflow.OpInfo.TensorProperties} + */ + public static final class TensorProperties extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.OpInfo.TensorProperties) + TensorPropertiesOrBuilder { + private static final long serialVersionUID = 0L; + // Use TensorProperties.newBuilder() to construct. + private TensorProperties(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private TensorProperties() { + dtype_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new TensorProperties(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TensorProperties( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + int rawValue = input.readEnum(); + + dtype_ = rawValue; + break; + } + case 18: { + org.tensorflow.proto.framework.TensorShapeProto.Builder subBuilder = null; + if (shape_ != null) { + subBuilder = shape_.toBuilder(); + } + shape_ = input.readMessage(org.tensorflow.proto.framework.TensorShapeProto.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(shape_); + shape_ = subBuilder.buildPartial(); + } + + break; + } + case 26: { + org.tensorflow.proto.framework.TensorProto.Builder subBuilder = null; + if (value_ != null) { + subBuilder = value_.toBuilder(); + } + value_ = input.readMessage(org.tensorflow.proto.framework.TensorProto.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(value_); + value_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_TensorProperties_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_TensorProperties_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpInfo.TensorProperties.class, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder.class); + } + + public static final int DTYPE_FIELD_NUMBER = 1; + private int dtype_; + /** + * .tensorflow.DataType dtype = 1; + */ + public int getDtypeValue() { + return dtype_; + } + /** + * .tensorflow.DataType dtype = 1; + */ + public org.tensorflow.proto.framework.DataType getDtype() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.framework.DataType result = org.tensorflow.proto.framework.DataType.valueOf(dtype_); + return result == null ? org.tensorflow.proto.framework.DataType.UNRECOGNIZED : result; + } + + public static final int SHAPE_FIELD_NUMBER = 2; + private org.tensorflow.proto.framework.TensorShapeProto shape_; + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public boolean hasShape() { + return shape_ != null; + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public org.tensorflow.proto.framework.TensorShapeProto getShape() { + return shape_ == null ? org.tensorflow.proto.framework.TensorShapeProto.getDefaultInstance() : shape_; + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public org.tensorflow.proto.framework.TensorShapeProtoOrBuilder getShapeOrBuilder() { + return getShape(); + } + + public static final int VALUE_FIELD_NUMBER = 3; + private org.tensorflow.proto.framework.TensorProto value_; + /** + * .tensorflow.TensorProto value = 3; + */ + public boolean hasValue() { + return value_ != null; + } + /** + * .tensorflow.TensorProto value = 3; + */ + public org.tensorflow.proto.framework.TensorProto getValue() { + return value_ == null ? org.tensorflow.proto.framework.TensorProto.getDefaultInstance() : value_; + } + /** + * .tensorflow.TensorProto value = 3; + */ + public org.tensorflow.proto.framework.TensorProtoOrBuilder getValueOrBuilder() { + return getValue(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (dtype_ != org.tensorflow.proto.framework.DataType.DT_INVALID.getNumber()) { + output.writeEnum(1, dtype_); + } + if (shape_ != null) { + output.writeMessage(2, getShape()); + } + if (value_ != null) { + output.writeMessage(3, getValue()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (dtype_ != org.tensorflow.proto.framework.DataType.DT_INVALID.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, dtype_); + } + if (shape_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getShape()); + } + if (value_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getValue()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.OpInfo.TensorProperties)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.OpInfo.TensorProperties other = (org.tensorflow.proto.framework.OpInfo.TensorProperties) obj; + + if (dtype_ != other.dtype_) return false; + if (hasShape() != other.hasShape()) return false; + if (hasShape()) { + if (!getShape() + .equals(other.getShape())) return false; + } + if (hasValue() != other.hasValue()) return false; + if (hasValue()) { + if (!getValue() + .equals(other.getValue())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DTYPE_FIELD_NUMBER; + hash = (53 * hash) + dtype_; + if (hasShape()) { + hash = (37 * hash) + SHAPE_FIELD_NUMBER; + hash = (53 * hash) + getShape().hashCode(); + } + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpInfo.TensorProperties parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.OpInfo.TensorProperties prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +     * Input data types, shapes and values if known.
      +     * 
      + * + * Protobuf type {@code tensorflow.OpInfo.TensorProperties} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.OpInfo.TensorProperties) + org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_TensorProperties_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_TensorProperties_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpInfo.TensorProperties.class, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.OpInfo.TensorProperties.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + dtype_ = 0; + + if (shapeBuilder_ == null) { + shape_ = null; + } else { + shape_ = null; + shapeBuilder_ = null; + } + if (valueBuilder_ == null) { + value_ = null; + } else { + value_ = null; + valueBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_TensorProperties_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpInfo.TensorProperties getDefaultInstanceForType() { + return org.tensorflow.proto.framework.OpInfo.TensorProperties.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpInfo.TensorProperties build() { + org.tensorflow.proto.framework.OpInfo.TensorProperties result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpInfo.TensorProperties buildPartial() { + org.tensorflow.proto.framework.OpInfo.TensorProperties result = new org.tensorflow.proto.framework.OpInfo.TensorProperties(this); + result.dtype_ = dtype_; + if (shapeBuilder_ == null) { + result.shape_ = shape_; + } else { + result.shape_ = shapeBuilder_.build(); + } + if (valueBuilder_ == null) { + result.value_ = value_; + } else { + result.value_ = valueBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.OpInfo.TensorProperties) { + return mergeFrom((org.tensorflow.proto.framework.OpInfo.TensorProperties)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.OpInfo.TensorProperties other) { + if (other == org.tensorflow.proto.framework.OpInfo.TensorProperties.getDefaultInstance()) return this; + if (other.dtype_ != 0) { + setDtypeValue(other.getDtypeValue()); + } + if (other.hasShape()) { + mergeShape(other.getShape()); + } + if (other.hasValue()) { + mergeValue(other.getValue()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.OpInfo.TensorProperties parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.OpInfo.TensorProperties) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int dtype_ = 0; + /** + * .tensorflow.DataType dtype = 1; + */ + public int getDtypeValue() { + return dtype_; + } + /** + * .tensorflow.DataType dtype = 1; + */ + public Builder setDtypeValue(int value) { + dtype_ = value; + onChanged(); + return this; + } + /** + * .tensorflow.DataType dtype = 1; + */ + public org.tensorflow.proto.framework.DataType getDtype() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.framework.DataType result = org.tensorflow.proto.framework.DataType.valueOf(dtype_); + return result == null ? org.tensorflow.proto.framework.DataType.UNRECOGNIZED : result; + } + /** + * .tensorflow.DataType dtype = 1; + */ + public Builder setDtype(org.tensorflow.proto.framework.DataType value) { + if (value == null) { + throw new NullPointerException(); + } + + dtype_ = value.getNumber(); + onChanged(); + return this; + } + /** + * .tensorflow.DataType dtype = 1; + */ + public Builder clearDtype() { + + dtype_ = 0; + onChanged(); + return this; + } + + private org.tensorflow.proto.framework.TensorShapeProto shape_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TensorShapeProto, org.tensorflow.proto.framework.TensorShapeProto.Builder, org.tensorflow.proto.framework.TensorShapeProtoOrBuilder> shapeBuilder_; + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public boolean hasShape() { + return shapeBuilder_ != null || shape_ != null; + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public org.tensorflow.proto.framework.TensorShapeProto getShape() { + if (shapeBuilder_ == null) { + return shape_ == null ? org.tensorflow.proto.framework.TensorShapeProto.getDefaultInstance() : shape_; + } else { + return shapeBuilder_.getMessage(); + } + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public Builder setShape(org.tensorflow.proto.framework.TensorShapeProto value) { + if (shapeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + shape_ = value; + onChanged(); + } else { + shapeBuilder_.setMessage(value); + } + + return this; + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public Builder setShape( + org.tensorflow.proto.framework.TensorShapeProto.Builder builderForValue) { + if (shapeBuilder_ == null) { + shape_ = builderForValue.build(); + onChanged(); + } else { + shapeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public Builder mergeShape(org.tensorflow.proto.framework.TensorShapeProto value) { + if (shapeBuilder_ == null) { + if (shape_ != null) { + shape_ = + org.tensorflow.proto.framework.TensorShapeProto.newBuilder(shape_).mergeFrom(value).buildPartial(); + } else { + shape_ = value; + } + onChanged(); + } else { + shapeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public Builder clearShape() { + if (shapeBuilder_ == null) { + shape_ = null; + onChanged(); + } else { + shape_ = null; + shapeBuilder_ = null; + } + + return this; + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public org.tensorflow.proto.framework.TensorShapeProto.Builder getShapeBuilder() { + + onChanged(); + return getShapeFieldBuilder().getBuilder(); + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + public org.tensorflow.proto.framework.TensorShapeProtoOrBuilder getShapeOrBuilder() { + if (shapeBuilder_ != null) { + return shapeBuilder_.getMessageOrBuilder(); + } else { + return shape_ == null ? + org.tensorflow.proto.framework.TensorShapeProto.getDefaultInstance() : shape_; + } + } + /** + * .tensorflow.TensorShapeProto shape = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TensorShapeProto, org.tensorflow.proto.framework.TensorShapeProto.Builder, org.tensorflow.proto.framework.TensorShapeProtoOrBuilder> + getShapeFieldBuilder() { + if (shapeBuilder_ == null) { + shapeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TensorShapeProto, org.tensorflow.proto.framework.TensorShapeProto.Builder, org.tensorflow.proto.framework.TensorShapeProtoOrBuilder>( + getShape(), + getParentForChildren(), + isClean()); + shape_ = null; + } + return shapeBuilder_; + } + + private org.tensorflow.proto.framework.TensorProto value_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TensorProto, org.tensorflow.proto.framework.TensorProto.Builder, org.tensorflow.proto.framework.TensorProtoOrBuilder> valueBuilder_; + /** + * .tensorflow.TensorProto value = 3; + */ + public boolean hasValue() { + return valueBuilder_ != null || value_ != null; + } + /** + * .tensorflow.TensorProto value = 3; + */ + public org.tensorflow.proto.framework.TensorProto getValue() { + if (valueBuilder_ == null) { + return value_ == null ? org.tensorflow.proto.framework.TensorProto.getDefaultInstance() : value_; + } else { + return valueBuilder_.getMessage(); + } + } + /** + * .tensorflow.TensorProto value = 3; + */ + public Builder setValue(org.tensorflow.proto.framework.TensorProto value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + onChanged(); + } else { + valueBuilder_.setMessage(value); + } + + return this; + } + /** + * .tensorflow.TensorProto value = 3; + */ + public Builder setValue( + org.tensorflow.proto.framework.TensorProto.Builder builderForValue) { + if (valueBuilder_ == null) { + value_ = builderForValue.build(); + onChanged(); + } else { + valueBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * .tensorflow.TensorProto value = 3; + */ + public Builder mergeValue(org.tensorflow.proto.framework.TensorProto value) { + if (valueBuilder_ == null) { + if (value_ != null) { + value_ = + org.tensorflow.proto.framework.TensorProto.newBuilder(value_).mergeFrom(value).buildPartial(); + } else { + value_ = value; + } + onChanged(); + } else { + valueBuilder_.mergeFrom(value); + } + + return this; + } + /** + * .tensorflow.TensorProto value = 3; + */ + public Builder clearValue() { + if (valueBuilder_ == null) { + value_ = null; + onChanged(); + } else { + value_ = null; + valueBuilder_ = null; + } + + return this; + } + /** + * .tensorflow.TensorProto value = 3; + */ + public org.tensorflow.proto.framework.TensorProto.Builder getValueBuilder() { + + onChanged(); + return getValueFieldBuilder().getBuilder(); + } + /** + * .tensorflow.TensorProto value = 3; + */ + public org.tensorflow.proto.framework.TensorProtoOrBuilder getValueOrBuilder() { + if (valueBuilder_ != null) { + return valueBuilder_.getMessageOrBuilder(); + } else { + return value_ == null ? + org.tensorflow.proto.framework.TensorProto.getDefaultInstance() : value_; + } + } + /** + * .tensorflow.TensorProto value = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TensorProto, org.tensorflow.proto.framework.TensorProto.Builder, org.tensorflow.proto.framework.TensorProtoOrBuilder> + getValueFieldBuilder() { + if (valueBuilder_ == null) { + valueBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.TensorProto, org.tensorflow.proto.framework.TensorProto.Builder, org.tensorflow.proto.framework.TensorProtoOrBuilder>( + getValue(), + getParentForChildren(), + isClean()); + value_ = null; + } + return valueBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.OpInfo.TensorProperties) + } + + // @@protoc_insertion_point(class_scope:tensorflow.OpInfo.TensorProperties) + private static final org.tensorflow.proto.framework.OpInfo.TensorProperties DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.OpInfo.TensorProperties(); + } + + public static org.tensorflow.proto.framework.OpInfo.TensorProperties getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TensorProperties parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TensorProperties(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpInfo.TensorProperties getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public static final int OP_FIELD_NUMBER = 1; + private volatile java.lang.Object op_; + /** + *
      +   * The operation name.  There may be custom parameters in attrs.
      +   * 
      + * + * string op = 1; + */ + public java.lang.String getOp() { + java.lang.Object ref = op_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + op_ = s; + return s; + } + } + /** + *
      +   * The operation name.  There may be custom parameters in attrs.
      +   * 
      + * + * string op = 1; + */ + public com.google.protobuf.ByteString + getOpBytes() { + java.lang.Object ref = op_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + op_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ATTR_FIELD_NUMBER = 2; + private static final class AttrDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, org.tensorflow.proto.framework.AttrValue> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_AttrEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.MESSAGE, + org.tensorflow.proto.framework.AttrValue.getDefaultInstance()); + } + private com.google.protobuf.MapField< + java.lang.String, org.tensorflow.proto.framework.AttrValue> attr_; + private com.google.protobuf.MapField + internalGetAttr() { + if (attr_ == null) { + return com.google.protobuf.MapField.emptyMapField( + AttrDefaultEntryHolder.defaultEntry); + } + return attr_; + } + + public int getAttrCount() { + return internalGetAttr().getMap().size(); + } + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public boolean containsAttr( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetAttr().getMap().containsKey(key); + } + /** + * Use {@link #getAttrMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getAttr() { + return getAttrMap(); + } + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public java.util.Map getAttrMap() { + return internalGetAttr().getMap(); + } + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public org.tensorflow.proto.framework.AttrValue getAttrOrDefault( + java.lang.String key, + org.tensorflow.proto.framework.AttrValue defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetAttr().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public org.tensorflow.proto.framework.AttrValue getAttrOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetAttr().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int INPUTS_FIELD_NUMBER = 3; + private java.util.List inputs_; + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public java.util.List getInputsList() { + return inputs_; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public java.util.List + getInputsOrBuilderList() { + return inputs_; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public int getInputsCount() { + return inputs_.size(); + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties getInputs(int index) { + return inputs_.get(index); + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder getInputsOrBuilder( + int index) { + return inputs_.get(index); + } + + public static final int OUTPUTS_FIELD_NUMBER = 5; + private java.util.List outputs_; + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public java.util.List getOutputsList() { + return outputs_; + } + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public java.util.List + getOutputsOrBuilderList() { + return outputs_; + } + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public int getOutputsCount() { + return outputs_.size(); + } + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties getOutputs(int index) { + return outputs_.get(index); + } + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder getOutputsOrBuilder( + int index) { + return outputs_.get(index); + } + + public static final int DEVICE_FIELD_NUMBER = 4; + private org.tensorflow.proto.framework.DeviceProperties device_; + /** + *
      +   * Device on which the operation is run.
      +   * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public boolean hasDevice() { + return device_ != null; + } + /** + *
      +   * Device on which the operation is run.
      +   * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public org.tensorflow.proto.framework.DeviceProperties getDevice() { + return device_ == null ? org.tensorflow.proto.framework.DeviceProperties.getDefaultInstance() : device_; + } + /** + *
      +   * Device on which the operation is run.
      +   * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public org.tensorflow.proto.framework.DevicePropertiesOrBuilder getDeviceOrBuilder() { + return getDevice(); + } + + public static final int SESSION_INFO_FIELD_NUMBER = 6; + private org.tensorflow.proto.framework.SessionInfo sessionInfo_; + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public boolean hasSessionInfo() { + return sessionInfo_ != null; + } + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public org.tensorflow.proto.framework.SessionInfo getSessionInfo() { + return sessionInfo_ == null ? org.tensorflow.proto.framework.SessionInfo.getDefaultInstance() : sessionInfo_; + } + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public org.tensorflow.proto.framework.SessionInfoOrBuilder getSessionInfoOrBuilder() { + return getSessionInfo(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getOpBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, op_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetAttr(), + AttrDefaultEntryHolder.defaultEntry, + 2); + for (int i = 0; i < inputs_.size(); i++) { + output.writeMessage(3, inputs_.get(i)); + } + if (device_ != null) { + output.writeMessage(4, getDevice()); + } + for (int i = 0; i < outputs_.size(); i++) { + output.writeMessage(5, outputs_.get(i)); + } + if (sessionInfo_ != null) { + output.writeMessage(6, getSessionInfo()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getOpBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, op_); + } + for (java.util.Map.Entry entry + : internalGetAttr().getMap().entrySet()) { + com.google.protobuf.MapEntry + attr__ = AttrDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, attr__); + } + for (int i = 0; i < inputs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, inputs_.get(i)); + } + if (device_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getDevice()); + } + for (int i = 0; i < outputs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, outputs_.get(i)); + } + if (sessionInfo_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, getSessionInfo()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.OpInfo)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.OpInfo other = (org.tensorflow.proto.framework.OpInfo) obj; + + if (!getOp() + .equals(other.getOp())) return false; + if (!internalGetAttr().equals( + other.internalGetAttr())) return false; + if (!getInputsList() + .equals(other.getInputsList())) return false; + if (!getOutputsList() + .equals(other.getOutputsList())) return false; + if (hasDevice() != other.hasDevice()) return false; + if (hasDevice()) { + if (!getDevice() + .equals(other.getDevice())) return false; + } + if (hasSessionInfo() != other.hasSessionInfo()) return false; + if (hasSessionInfo()) { + if (!getSessionInfo() + .equals(other.getSessionInfo())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OP_FIELD_NUMBER; + hash = (53 * hash) + getOp().hashCode(); + if (!internalGetAttr().getMap().isEmpty()) { + hash = (37 * hash) + ATTR_FIELD_NUMBER; + hash = (53 * hash) + internalGetAttr().hashCode(); + } + if (getInputsCount() > 0) { + hash = (37 * hash) + INPUTS_FIELD_NUMBER; + hash = (53 * hash) + getInputsList().hashCode(); + } + if (getOutputsCount() > 0) { + hash = (37 * hash) + OUTPUTS_FIELD_NUMBER; + hash = (53 * hash) + getOutputsList().hashCode(); + } + if (hasDevice()) { + hash = (37 * hash) + DEVICE_FIELD_NUMBER; + hash = (53 * hash) + getDevice().hashCode(); + } + if (hasSessionInfo()) { + hash = (37 * hash) + SESSION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getSessionInfo().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.OpInfo parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.OpInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +   * Description of an operation as well as the parameters expected to impact its
      +   * performance.
      +   * 
      + * + * Protobuf type {@code tensorflow.OpInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.OpInfo) + org.tensorflow.proto.framework.OpInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 2: + return internalGetAttr(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 2: + return internalGetMutableAttr(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpInfo.class, org.tensorflow.proto.framework.OpInfo.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.OpInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getInputsFieldBuilder(); + getOutputsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + op_ = ""; + + internalGetMutableAttr().clear(); + if (inputsBuilder_ == null) { + inputs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + inputsBuilder_.clear(); + } + if (outputsBuilder_ == null) { + outputs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + outputsBuilder_.clear(); + } + if (deviceBuilder_ == null) { + device_ = null; + } else { + device_ = null; + deviceBuilder_ = null; + } + if (sessionInfoBuilder_ == null) { + sessionInfo_ = null; + } else { + sessionInfo_ = null; + sessionInfoBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpInfo_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpInfo getDefaultInstanceForType() { + return org.tensorflow.proto.framework.OpInfo.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpInfo build() { + org.tensorflow.proto.framework.OpInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpInfo buildPartial() { + org.tensorflow.proto.framework.OpInfo result = new org.tensorflow.proto.framework.OpInfo(this); + int from_bitField0_ = bitField0_; + result.op_ = op_; + result.attr_ = internalGetAttr(); + result.attr_.makeImmutable(); + if (inputsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + inputs_ = java.util.Collections.unmodifiableList(inputs_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.inputs_ = inputs_; + } else { + result.inputs_ = inputsBuilder_.build(); + } + if (outputsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + outputs_ = java.util.Collections.unmodifiableList(outputs_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.outputs_ = outputs_; + } else { + result.outputs_ = outputsBuilder_.build(); + } + if (deviceBuilder_ == null) { + result.device_ = device_; + } else { + result.device_ = deviceBuilder_.build(); + } + if (sessionInfoBuilder_ == null) { + result.sessionInfo_ = sessionInfo_; + } else { + result.sessionInfo_ = sessionInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.OpInfo) { + return mergeFrom((org.tensorflow.proto.framework.OpInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.OpInfo other) { + if (other == org.tensorflow.proto.framework.OpInfo.getDefaultInstance()) return this; + if (!other.getOp().isEmpty()) { + op_ = other.op_; + onChanged(); + } + internalGetMutableAttr().mergeFrom( + other.internalGetAttr()); + if (inputsBuilder_ == null) { + if (!other.inputs_.isEmpty()) { + if (inputs_.isEmpty()) { + inputs_ = other.inputs_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureInputsIsMutable(); + inputs_.addAll(other.inputs_); + } + onChanged(); + } + } else { + if (!other.inputs_.isEmpty()) { + if (inputsBuilder_.isEmpty()) { + inputsBuilder_.dispose(); + inputsBuilder_ = null; + inputs_ = other.inputs_; + bitField0_ = (bitField0_ & ~0x00000002); + inputsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getInputsFieldBuilder() : null; + } else { + inputsBuilder_.addAllMessages(other.inputs_); + } + } + } + if (outputsBuilder_ == null) { + if (!other.outputs_.isEmpty()) { + if (outputs_.isEmpty()) { + outputs_ = other.outputs_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureOutputsIsMutable(); + outputs_.addAll(other.outputs_); + } + onChanged(); + } + } else { + if (!other.outputs_.isEmpty()) { + if (outputsBuilder_.isEmpty()) { + outputsBuilder_.dispose(); + outputsBuilder_ = null; + outputs_ = other.outputs_; + bitField0_ = (bitField0_ & ~0x00000004); + outputsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getOutputsFieldBuilder() : null; + } else { + outputsBuilder_.addAllMessages(other.outputs_); + } + } + } + if (other.hasDevice()) { + mergeDevice(other.getDevice()); + } + if (other.hasSessionInfo()) { + mergeSessionInfo(other.getSessionInfo()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.OpInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.OpInfo) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object op_ = ""; + /** + *
      +     * The operation name.  There may be custom parameters in attrs.
      +     * 
      + * + * string op = 1; + */ + public java.lang.String getOp() { + java.lang.Object ref = op_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + op_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
      +     * The operation name.  There may be custom parameters in attrs.
      +     * 
      + * + * string op = 1; + */ + public com.google.protobuf.ByteString + getOpBytes() { + java.lang.Object ref = op_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + op_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
      +     * The operation name.  There may be custom parameters in attrs.
      +     * 
      + * + * string op = 1; + */ + public Builder setOp( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + op_ = value; + onChanged(); + return this; + } + /** + *
      +     * The operation name.  There may be custom parameters in attrs.
      +     * 
      + * + * string op = 1; + */ + public Builder clearOp() { + + op_ = getDefaultInstance().getOp(); + onChanged(); + return this; + } + /** + *
      +     * The operation name.  There may be custom parameters in attrs.
      +     * 
      + * + * string op = 1; + */ + public Builder setOpBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + op_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, org.tensorflow.proto.framework.AttrValue> attr_; + private com.google.protobuf.MapField + internalGetAttr() { + if (attr_ == null) { + return com.google.protobuf.MapField.emptyMapField( + AttrDefaultEntryHolder.defaultEntry); + } + return attr_; + } + private com.google.protobuf.MapField + internalGetMutableAttr() { + onChanged();; + if (attr_ == null) { + attr_ = com.google.protobuf.MapField.newMapField( + AttrDefaultEntryHolder.defaultEntry); + } + if (!attr_.isMutable()) { + attr_ = attr_.copy(); + } + return attr_; + } + + public int getAttrCount() { + return internalGetAttr().getMap().size(); + } + /** + *
      +     * Custom parameters impacting the behavior of the op.
      +     * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public boolean containsAttr( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetAttr().getMap().containsKey(key); + } + /** + * Use {@link #getAttrMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getAttr() { + return getAttrMap(); + } + /** + *
      +     * Custom parameters impacting the behavior of the op.
      +     * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public java.util.Map getAttrMap() { + return internalGetAttr().getMap(); + } + /** + *
      +     * Custom parameters impacting the behavior of the op.
      +     * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public org.tensorflow.proto.framework.AttrValue getAttrOrDefault( + java.lang.String key, + org.tensorflow.proto.framework.AttrValue defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetAttr().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
      +     * Custom parameters impacting the behavior of the op.
      +     * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public org.tensorflow.proto.framework.AttrValue getAttrOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetAttr().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearAttr() { + internalGetMutableAttr().getMutableMap() + .clear(); + return this; + } + /** + *
      +     * Custom parameters impacting the behavior of the op.
      +     * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public Builder removeAttr( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableAttr().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableAttr() { + return internalGetMutableAttr().getMutableMap(); + } + /** + *
      +     * Custom parameters impacting the behavior of the op.
      +     * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + public Builder putAttr( + java.lang.String key, + org.tensorflow.proto.framework.AttrValue value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableAttr().getMutableMap() + .put(key, value); + return this; + } + /** + *
      +     * Custom parameters impacting the behavior of the op.
      +     * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + public Builder putAllAttr( + java.util.Map values) { + internalGetMutableAttr().getMutableMap() + .putAll(values); + return this; + } + + private java.util.List inputs_ = + java.util.Collections.emptyList(); + private void ensureInputsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + inputs_ = new java.util.ArrayList(inputs_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo.TensorProperties, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder, org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder> inputsBuilder_; + + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public java.util.List getInputsList() { + if (inputsBuilder_ == null) { + return java.util.Collections.unmodifiableList(inputs_); + } else { + return inputsBuilder_.getMessageList(); + } + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public int getInputsCount() { + if (inputsBuilder_ == null) { + return inputs_.size(); + } else { + return inputsBuilder_.getCount(); + } + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties getInputs(int index) { + if (inputsBuilder_ == null) { + return inputs_.get(index); + } else { + return inputsBuilder_.getMessage(index); + } + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder setInputs( + int index, org.tensorflow.proto.framework.OpInfo.TensorProperties value) { + if (inputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInputsIsMutable(); + inputs_.set(index, value); + onChanged(); + } else { + inputsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder setInputs( + int index, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder builderForValue) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + inputs_.set(index, builderForValue.build()); + onChanged(); + } else { + inputsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder addInputs(org.tensorflow.proto.framework.OpInfo.TensorProperties value) { + if (inputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInputsIsMutable(); + inputs_.add(value); + onChanged(); + } else { + inputsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder addInputs( + int index, org.tensorflow.proto.framework.OpInfo.TensorProperties value) { + if (inputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInputsIsMutable(); + inputs_.add(index, value); + onChanged(); + } else { + inputsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder addInputs( + org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder builderForValue) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + inputs_.add(builderForValue.build()); + onChanged(); + } else { + inputsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder addInputs( + int index, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder builderForValue) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + inputs_.add(index, builderForValue.build()); + onChanged(); + } else { + inputsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder addAllInputs( + java.lang.Iterable values) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, inputs_); + onChanged(); + } else { + inputsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder clearInputs() { + if (inputsBuilder_ == null) { + inputs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + inputsBuilder_.clear(); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public Builder removeInputs(int index) { + if (inputsBuilder_ == null) { + ensureInputsIsMutable(); + inputs_.remove(index); + onChanged(); + } else { + inputsBuilder_.remove(index); + } + return this; + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder getInputsBuilder( + int index) { + return getInputsFieldBuilder().getBuilder(index); + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder getInputsOrBuilder( + int index) { + if (inputsBuilder_ == null) { + return inputs_.get(index); } else { + return inputsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public java.util.List + getInputsOrBuilderList() { + if (inputsBuilder_ != null) { + return inputsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(inputs_); + } + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder addInputsBuilder() { + return getInputsFieldBuilder().addBuilder( + org.tensorflow.proto.framework.OpInfo.TensorProperties.getDefaultInstance()); + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder addInputsBuilder( + int index) { + return getInputsFieldBuilder().addBuilder( + index, org.tensorflow.proto.framework.OpInfo.TensorProperties.getDefaultInstance()); + } + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + public java.util.List + getInputsBuilderList() { + return getInputsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo.TensorProperties, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder, org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder> + getInputsFieldBuilder() { + if (inputsBuilder_ == null) { + inputsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo.TensorProperties, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder, org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder>( + inputs_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + inputs_ = null; + } + return inputsBuilder_; + } + + private java.util.List outputs_ = + java.util.Collections.emptyList(); + private void ensureOutputsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + outputs_ = new java.util.ArrayList(outputs_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo.TensorProperties, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder, org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder> outputsBuilder_; + + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public java.util.List getOutputsList() { + if (outputsBuilder_ == null) { + return java.util.Collections.unmodifiableList(outputs_); + } else { + return outputsBuilder_.getMessageList(); + } + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public int getOutputsCount() { + if (outputsBuilder_ == null) { + return outputs_.size(); + } else { + return outputsBuilder_.getCount(); + } + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties getOutputs(int index) { + if (outputsBuilder_ == null) { + return outputs_.get(index); + } else { + return outputsBuilder_.getMessage(index); + } + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder setOutputs( + int index, org.tensorflow.proto.framework.OpInfo.TensorProperties value) { + if (outputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOutputsIsMutable(); + outputs_.set(index, value); + onChanged(); + } else { + outputsBuilder_.setMessage(index, value); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder setOutputs( + int index, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder builderForValue) { + if (outputsBuilder_ == null) { + ensureOutputsIsMutable(); + outputs_.set(index, builderForValue.build()); + onChanged(); + } else { + outputsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder addOutputs(org.tensorflow.proto.framework.OpInfo.TensorProperties value) { + if (outputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOutputsIsMutable(); + outputs_.add(value); + onChanged(); + } else { + outputsBuilder_.addMessage(value); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder addOutputs( + int index, org.tensorflow.proto.framework.OpInfo.TensorProperties value) { + if (outputsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOutputsIsMutable(); + outputs_.add(index, value); + onChanged(); + } else { + outputsBuilder_.addMessage(index, value); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder addOutputs( + org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder builderForValue) { + if (outputsBuilder_ == null) { + ensureOutputsIsMutable(); + outputs_.add(builderForValue.build()); + onChanged(); + } else { + outputsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder addOutputs( + int index, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder builderForValue) { + if (outputsBuilder_ == null) { + ensureOutputsIsMutable(); + outputs_.add(index, builderForValue.build()); + onChanged(); + } else { + outputsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder addAllOutputs( + java.lang.Iterable values) { + if (outputsBuilder_ == null) { + ensureOutputsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, outputs_); + onChanged(); + } else { + outputsBuilder_.addAllMessages(values); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder clearOutputs() { + if (outputsBuilder_ == null) { + outputs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + outputsBuilder_.clear(); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public Builder removeOutputs(int index) { + if (outputsBuilder_ == null) { + ensureOutputsIsMutable(); + outputs_.remove(index); + onChanged(); + } else { + outputsBuilder_.remove(index); + } + return this; + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder getOutputsBuilder( + int index) { + return getOutputsFieldBuilder().getBuilder(index); + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder getOutputsOrBuilder( + int index) { + if (outputsBuilder_ == null) { + return outputs_.get(index); } else { + return outputsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public java.util.List + getOutputsOrBuilderList() { + if (outputsBuilder_ != null) { + return outputsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(outputs_); + } + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder addOutputsBuilder() { + return getOutputsFieldBuilder().addBuilder( + org.tensorflow.proto.framework.OpInfo.TensorProperties.getDefaultInstance()); + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder addOutputsBuilder( + int index) { + return getOutputsFieldBuilder().addBuilder( + index, org.tensorflow.proto.framework.OpInfo.TensorProperties.getDefaultInstance()); + } + /** + *
      +     * Optional description of the op outputs
      +     * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + public java.util.List + getOutputsBuilderList() { + return getOutputsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo.TensorProperties, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder, org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder> + getOutputsFieldBuilder() { + if (outputsBuilder_ == null) { + outputsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo.TensorProperties, org.tensorflow.proto.framework.OpInfo.TensorProperties.Builder, org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder>( + outputs_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + outputs_ = null; + } + return outputsBuilder_; + } + + private org.tensorflow.proto.framework.DeviceProperties device_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.DeviceProperties, org.tensorflow.proto.framework.DeviceProperties.Builder, org.tensorflow.proto.framework.DevicePropertiesOrBuilder> deviceBuilder_; + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public boolean hasDevice() { + return deviceBuilder_ != null || device_ != null; + } + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public org.tensorflow.proto.framework.DeviceProperties getDevice() { + if (deviceBuilder_ == null) { + return device_ == null ? org.tensorflow.proto.framework.DeviceProperties.getDefaultInstance() : device_; + } else { + return deviceBuilder_.getMessage(); + } + } + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public Builder setDevice(org.tensorflow.proto.framework.DeviceProperties value) { + if (deviceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + device_ = value; + onChanged(); + } else { + deviceBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public Builder setDevice( + org.tensorflow.proto.framework.DeviceProperties.Builder builderForValue) { + if (deviceBuilder_ == null) { + device_ = builderForValue.build(); + onChanged(); + } else { + deviceBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public Builder mergeDevice(org.tensorflow.proto.framework.DeviceProperties value) { + if (deviceBuilder_ == null) { + if (device_ != null) { + device_ = + org.tensorflow.proto.framework.DeviceProperties.newBuilder(device_).mergeFrom(value).buildPartial(); + } else { + device_ = value; + } + onChanged(); + } else { + deviceBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public Builder clearDevice() { + if (deviceBuilder_ == null) { + device_ = null; + onChanged(); + } else { + device_ = null; + deviceBuilder_ = null; + } + + return this; + } + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public org.tensorflow.proto.framework.DeviceProperties.Builder getDeviceBuilder() { + + onChanged(); + return getDeviceFieldBuilder().getBuilder(); + } + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + public org.tensorflow.proto.framework.DevicePropertiesOrBuilder getDeviceOrBuilder() { + if (deviceBuilder_ != null) { + return deviceBuilder_.getMessageOrBuilder(); + } else { + return device_ == null ? + org.tensorflow.proto.framework.DeviceProperties.getDefaultInstance() : device_; + } + } + /** + *
      +     * Device on which the operation is run.
      +     * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.DeviceProperties, org.tensorflow.proto.framework.DeviceProperties.Builder, org.tensorflow.proto.framework.DevicePropertiesOrBuilder> + getDeviceFieldBuilder() { + if (deviceBuilder_ == null) { + deviceBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.DeviceProperties, org.tensorflow.proto.framework.DeviceProperties.Builder, org.tensorflow.proto.framework.DevicePropertiesOrBuilder>( + getDevice(), + getParentForChildren(), + isClean()); + device_ = null; + } + return deviceBuilder_; + } + + private org.tensorflow.proto.framework.SessionInfo sessionInfo_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.SessionInfo, org.tensorflow.proto.framework.SessionInfo.Builder, org.tensorflow.proto.framework.SessionInfoOrBuilder> sessionInfoBuilder_; + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public boolean hasSessionInfo() { + return sessionInfoBuilder_ != null || sessionInfo_ != null; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public org.tensorflow.proto.framework.SessionInfo getSessionInfo() { + if (sessionInfoBuilder_ == null) { + return sessionInfo_ == null ? org.tensorflow.proto.framework.SessionInfo.getDefaultInstance() : sessionInfo_; + } else { + return sessionInfoBuilder_.getMessage(); + } + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public Builder setSessionInfo(org.tensorflow.proto.framework.SessionInfo value) { + if (sessionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sessionInfo_ = value; + onChanged(); + } else { + sessionInfoBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public Builder setSessionInfo( + org.tensorflow.proto.framework.SessionInfo.Builder builderForValue) { + if (sessionInfoBuilder_ == null) { + sessionInfo_ = builderForValue.build(); + onChanged(); + } else { + sessionInfoBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public Builder mergeSessionInfo(org.tensorflow.proto.framework.SessionInfo value) { + if (sessionInfoBuilder_ == null) { + if (sessionInfo_ != null) { + sessionInfo_ = + org.tensorflow.proto.framework.SessionInfo.newBuilder(sessionInfo_).mergeFrom(value).buildPartial(); + } else { + sessionInfo_ = value; + } + onChanged(); + } else { + sessionInfoBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public Builder clearSessionInfo() { + if (sessionInfoBuilder_ == null) { + sessionInfo_ = null; + onChanged(); + } else { + sessionInfo_ = null; + sessionInfoBuilder_ = null; + } + + return this; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public org.tensorflow.proto.framework.SessionInfo.Builder getSessionInfoBuilder() { + + onChanged(); + return getSessionInfoFieldBuilder().getBuilder(); + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + public org.tensorflow.proto.framework.SessionInfoOrBuilder getSessionInfoOrBuilder() { + if (sessionInfoBuilder_ != null) { + return sessionInfoBuilder_.getMessageOrBuilder(); + } else { + return sessionInfo_ == null ? + org.tensorflow.proto.framework.SessionInfo.getDefaultInstance() : sessionInfo_; + } + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.SessionInfo, org.tensorflow.proto.framework.SessionInfo.Builder, org.tensorflow.proto.framework.SessionInfoOrBuilder> + getSessionInfoFieldBuilder() { + if (sessionInfoBuilder_ == null) { + sessionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.SessionInfo, org.tensorflow.proto.framework.SessionInfo.Builder, org.tensorflow.proto.framework.SessionInfoOrBuilder>( + getSessionInfo(), + getParentForChildren(), + isClean()); + sessionInfo_ = null; + } + return sessionInfoBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.OpInfo) + } + + // @@protoc_insertion_point(class_scope:tensorflow.OpInfo) + private static final org.tensorflow.proto.framework.OpInfo DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.OpInfo(); + } + + public static org.tensorflow.proto.framework.OpInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OpInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OpInfo(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpInfoOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpInfoOrBuilder.java new file mode 100644 index 00000000000..675799794a4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpInfoOrBuilder.java @@ -0,0 +1,199 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/grappler/costs/op_performance_data.proto + +package org.tensorflow.proto.framework; + +public interface OpInfoOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.OpInfo) + com.google.protobuf.MessageOrBuilder { + + /** + *
      +   * The operation name.  There may be custom parameters in attrs.
      +   * 
      + * + * string op = 1; + */ + java.lang.String getOp(); + /** + *
      +   * The operation name.  There may be custom parameters in attrs.
      +   * 
      + * + * string op = 1; + */ + com.google.protobuf.ByteString + getOpBytes(); + + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + int getAttrCount(); + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + boolean containsAttr( + java.lang.String key); + /** + * Use {@link #getAttrMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getAttr(); + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + java.util.Map + getAttrMap(); + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + org.tensorflow.proto.framework.AttrValue getAttrOrDefault( + java.lang.String key, + org.tensorflow.proto.framework.AttrValue defaultValue); + /** + *
      +   * Custom parameters impacting the behavior of the op.
      +   * 
      + * + * map<string, .tensorflow.AttrValue> attr = 2; + */ + + org.tensorflow.proto.framework.AttrValue getAttrOrThrow( + java.lang.String key); + + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + java.util.List + getInputsList(); + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + org.tensorflow.proto.framework.OpInfo.TensorProperties getInputs(int index); + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + int getInputsCount(); + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + java.util.List + getInputsOrBuilderList(); + /** + * repeated .tensorflow.OpInfo.TensorProperties inputs = 3; + */ + org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder getInputsOrBuilder( + int index); + + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + java.util.List + getOutputsList(); + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + org.tensorflow.proto.framework.OpInfo.TensorProperties getOutputs(int index); + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + int getOutputsCount(); + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + java.util.List + getOutputsOrBuilderList(); + /** + *
      +   * Optional description of the op outputs
      +   * 
      + * + * repeated .tensorflow.OpInfo.TensorProperties outputs = 5; + */ + org.tensorflow.proto.framework.OpInfo.TensorPropertiesOrBuilder getOutputsOrBuilder( + int index); + + /** + *
      +   * Device on which the operation is run.
      +   * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + boolean hasDevice(); + /** + *
      +   * Device on which the operation is run.
      +   * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + org.tensorflow.proto.framework.DeviceProperties getDevice(); + /** + *
      +   * Device on which the operation is run.
      +   * 
      + * + * .tensorflow.DeviceProperties device = 4; + */ + org.tensorflow.proto.framework.DevicePropertiesOrBuilder getDeviceOrBuilder(); + + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + boolean hasSessionInfo(); + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + org.tensorflow.proto.framework.SessionInfo getSessionInfo(); + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 6; + */ + org.tensorflow.proto.framework.SessionInfoOrBuilder getSessionInfoOrBuilder(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformance.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformance.java new file mode 100644 index 00000000000..0f25f95dff9 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformance.java @@ -0,0 +1,3074 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/grappler/costs/op_performance_data.proto + +package org.tensorflow.proto.framework; + +/** + *
      + * Performance data for tensorflow operations
      + * 
      + * + * Protobuf type {@code tensorflow.OpPerformance} + */ +public final class OpPerformance extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.OpPerformance) + OpPerformanceOrBuilder { +private static final long serialVersionUID = 0L; + // Use OpPerformance.newBuilder() to construct. + private OpPerformance(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private OpPerformance() { + node_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new OpPerformance(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OpPerformance( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + org.tensorflow.proto.framework.OpInfo.Builder subBuilder = null; + if (op_ != null) { + subBuilder = op_.toBuilder(); + } + op_ = input.readMessage(org.tensorflow.proto.framework.OpInfo.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(op_); + op_ = subBuilder.buildPartial(); + } + + break; + } + case 16: { + + temporaryMemorySize_ = input.readInt64(); + break; + } + case 24: { + + computeCost_ = input.readInt64(); + break; + } + case 33: { + + computeEfficiency_ = input.readDouble(); + break; + } + case 42: { + java.lang.String s = input.readStringRequireUtf8(); + + node_ = s; + break; + } + case 48: { + + computeTime_ = input.readInt64(); + break; + } + case 56: { + + memoryTime_ = input.readInt64(); + break; + } + case 65: { + + memoryEfficiency_ = input.readDouble(); + break; + } + case 74: { + org.tensorflow.proto.framework.OpPerformance.OpMemory.Builder subBuilder = null; + if (opMemory_ != null) { + subBuilder = opMemory_.toBuilder(); + } + opMemory_ = input.readMessage(org.tensorflow.proto.framework.OpPerformance.OpMemory.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(opMemory_); + opMemory_ = subBuilder.buildPartial(); + } + + break; + } + case 82: { + org.tensorflow.proto.framework.NormalDistribution.Builder subBuilder = null; + if (executionTimeCase_ == 10) { + subBuilder = ((org.tensorflow.proto.framework.NormalDistribution) executionTime_).toBuilder(); + } + executionTime_ = + input.readMessage(org.tensorflow.proto.framework.NormalDistribution.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((org.tensorflow.proto.framework.NormalDistribution) executionTime_); + executionTime_ = subBuilder.buildPartial(); + } + executionTimeCase_ = 10; + break; + } + case 90: { + org.tensorflow.proto.framework.LogNormalDistribution.Builder subBuilder = null; + if (executionTimeCase_ == 11) { + subBuilder = ((org.tensorflow.proto.framework.LogNormalDistribution) executionTime_).toBuilder(); + } + executionTime_ = + input.readMessage(org.tensorflow.proto.framework.LogNormalDistribution.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((org.tensorflow.proto.framework.LogNormalDistribution) executionTime_); + executionTime_ = subBuilder.buildPartial(); + } + executionTimeCase_ = 11; + break; + } + case 98: { + org.tensorflow.proto.framework.SessionInfo.Builder subBuilder = null; + if (sessionInfo_ != null) { + subBuilder = sessionInfo_.toBuilder(); + } + sessionInfo_ = input.readMessage(org.tensorflow.proto.framework.SessionInfo.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sessionInfo_); + sessionInfo_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpPerformance.class, org.tensorflow.proto.framework.OpPerformance.Builder.class); + } + + public interface OpMemoryOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.OpPerformance.OpMemory) + com.google.protobuf.MessageOrBuilder { + + /** + *
      +     * The output information may have memory usage and output shapes.
      +     * 
      + * + * repeated int64 output_memory = 1; + */ + java.util.List getOutputMemoryList(); + /** + *
      +     * The output information may have memory usage and output shapes.
      +     * 
      + * + * repeated int64 output_memory = 1; + */ + int getOutputMemoryCount(); + /** + *
      +     * The output information may have memory usage and output shapes.
      +     * 
      + * + * repeated int64 output_memory = 1; + */ + long getOutputMemory(int index); + + /** + *
      +     * Temp and persistent memory allocated by this node.
      +     * 
      + * + * int64 temp_memory = 2; + */ + long getTempMemory(); + + /** + * int64 persistent_memory = 4; + */ + long getPersistentMemory(); + + /** + * int64 device_temp_memory = 3 [deprecated = true]; + */ + @java.lang.Deprecated long getDeviceTempMemory(); + + /** + * int64 device_persistent_memory = 5 [deprecated = true]; + */ + @java.lang.Deprecated long getDevicePersistentMemory(); + } + /** + *
      +   * Memory usage data for a tensorflow operation.
      +   * 
      + * + * Protobuf type {@code tensorflow.OpPerformance.OpMemory} + */ + public static final class OpMemory extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.OpPerformance.OpMemory) + OpMemoryOrBuilder { + private static final long serialVersionUID = 0L; + // Use OpMemory.newBuilder() to construct. + private OpMemory(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private OpMemory() { + outputMemory_ = emptyLongList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new OpMemory(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OpMemory( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + outputMemory_ = newLongList(); + mutable_bitField0_ |= 0x00000001; + } + outputMemory_.addLong(input.readInt64()); + break; + } + case 10: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000001) != 0) && input.getBytesUntilLimit() > 0) { + outputMemory_ = newLongList(); + mutable_bitField0_ |= 0x00000001; + } + while (input.getBytesUntilLimit() > 0) { + outputMemory_.addLong(input.readInt64()); + } + input.popLimit(limit); + break; + } + case 16: { + + tempMemory_ = input.readInt64(); + break; + } + case 24: { + + deviceTempMemory_ = input.readInt64(); + break; + } + case 32: { + + persistentMemory_ = input.readInt64(); + break; + } + case 40: { + + devicePersistentMemory_ = input.readInt64(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + outputMemory_.makeImmutable(); // C + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_OpMemory_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_OpMemory_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpPerformance.OpMemory.class, org.tensorflow.proto.framework.OpPerformance.OpMemory.Builder.class); + } + + public static final int OUTPUT_MEMORY_FIELD_NUMBER = 1; + private com.google.protobuf.Internal.LongList outputMemory_; + /** + *
      +     * The output information may have memory usage and output shapes.
      +     * 
      + * + * repeated int64 output_memory = 1; + */ + public java.util.List + getOutputMemoryList() { + return outputMemory_; + } + /** + *
      +     * The output information may have memory usage and output shapes.
      +     * 
      + * + * repeated int64 output_memory = 1; + */ + public int getOutputMemoryCount() { + return outputMemory_.size(); + } + /** + *
      +     * The output information may have memory usage and output shapes.
      +     * 
      + * + * repeated int64 output_memory = 1; + */ + public long getOutputMemory(int index) { + return outputMemory_.getLong(index); + } + private int outputMemoryMemoizedSerializedSize = -1; + + public static final int TEMP_MEMORY_FIELD_NUMBER = 2; + private long tempMemory_; + /** + *
      +     * Temp and persistent memory allocated by this node.
      +     * 
      + * + * int64 temp_memory = 2; + */ + public long getTempMemory() { + return tempMemory_; + } + + public static final int PERSISTENT_MEMORY_FIELD_NUMBER = 4; + private long persistentMemory_; + /** + * int64 persistent_memory = 4; + */ + public long getPersistentMemory() { + return persistentMemory_; + } + + public static final int DEVICE_TEMP_MEMORY_FIELD_NUMBER = 3; + private long deviceTempMemory_; + /** + * int64 device_temp_memory = 3 [deprecated = true]; + */ + @java.lang.Deprecated public long getDeviceTempMemory() { + return deviceTempMemory_; + } + + public static final int DEVICE_PERSISTENT_MEMORY_FIELD_NUMBER = 5; + private long devicePersistentMemory_; + /** + * int64 device_persistent_memory = 5 [deprecated = true]; + */ + @java.lang.Deprecated public long getDevicePersistentMemory() { + return devicePersistentMemory_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (getOutputMemoryList().size() > 0) { + output.writeUInt32NoTag(10); + output.writeUInt32NoTag(outputMemoryMemoizedSerializedSize); + } + for (int i = 0; i < outputMemory_.size(); i++) { + output.writeInt64NoTag(outputMemory_.getLong(i)); + } + if (tempMemory_ != 0L) { + output.writeInt64(2, tempMemory_); + } + if (deviceTempMemory_ != 0L) { + output.writeInt64(3, deviceTempMemory_); + } + if (persistentMemory_ != 0L) { + output.writeInt64(4, persistentMemory_); + } + if (devicePersistentMemory_ != 0L) { + output.writeInt64(5, devicePersistentMemory_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < outputMemory_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeInt64SizeNoTag(outputMemory_.getLong(i)); + } + size += dataSize; + if (!getOutputMemoryList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(dataSize); + } + outputMemoryMemoizedSerializedSize = dataSize; + } + if (tempMemory_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, tempMemory_); + } + if (deviceTempMemory_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, deviceTempMemory_); + } + if (persistentMemory_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, persistentMemory_); + } + if (devicePersistentMemory_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(5, devicePersistentMemory_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.OpPerformance.OpMemory)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.OpPerformance.OpMemory other = (org.tensorflow.proto.framework.OpPerformance.OpMemory) obj; + + if (!getOutputMemoryList() + .equals(other.getOutputMemoryList())) return false; + if (getTempMemory() + != other.getTempMemory()) return false; + if (getPersistentMemory() + != other.getPersistentMemory()) return false; + if (getDeviceTempMemory() + != other.getDeviceTempMemory()) return false; + if (getDevicePersistentMemory() + != other.getDevicePersistentMemory()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOutputMemoryCount() > 0) { + hash = (37 * hash) + OUTPUT_MEMORY_FIELD_NUMBER; + hash = (53 * hash) + getOutputMemoryList().hashCode(); + } + hash = (37 * hash) + TEMP_MEMORY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getTempMemory()); + hash = (37 * hash) + PERSISTENT_MEMORY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getPersistentMemory()); + hash = (37 * hash) + DEVICE_TEMP_MEMORY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getDeviceTempMemory()); + hash = (37 * hash) + DEVICE_PERSISTENT_MEMORY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getDevicePersistentMemory()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformance.OpMemory parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.OpPerformance.OpMemory prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +     * Memory usage data for a tensorflow operation.
      +     * 
      + * + * Protobuf type {@code tensorflow.OpPerformance.OpMemory} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.OpPerformance.OpMemory) + org.tensorflow.proto.framework.OpPerformance.OpMemoryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_OpMemory_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_OpMemory_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpPerformance.OpMemory.class, org.tensorflow.proto.framework.OpPerformance.OpMemory.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.OpPerformance.OpMemory.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + outputMemory_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000001); + tempMemory_ = 0L; + + persistentMemory_ = 0L; + + deviceTempMemory_ = 0L; + + devicePersistentMemory_ = 0L; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_OpMemory_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformance.OpMemory getDefaultInstanceForType() { + return org.tensorflow.proto.framework.OpPerformance.OpMemory.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformance.OpMemory build() { + org.tensorflow.proto.framework.OpPerformance.OpMemory result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformance.OpMemory buildPartial() { + org.tensorflow.proto.framework.OpPerformance.OpMemory result = new org.tensorflow.proto.framework.OpPerformance.OpMemory(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) != 0)) { + outputMemory_.makeImmutable(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.outputMemory_ = outputMemory_; + result.tempMemory_ = tempMemory_; + result.persistentMemory_ = persistentMemory_; + result.deviceTempMemory_ = deviceTempMemory_; + result.devicePersistentMemory_ = devicePersistentMemory_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.OpPerformance.OpMemory) { + return mergeFrom((org.tensorflow.proto.framework.OpPerformance.OpMemory)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.OpPerformance.OpMemory other) { + if (other == org.tensorflow.proto.framework.OpPerformance.OpMemory.getDefaultInstance()) return this; + if (!other.outputMemory_.isEmpty()) { + if (outputMemory_.isEmpty()) { + outputMemory_ = other.outputMemory_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOutputMemoryIsMutable(); + outputMemory_.addAll(other.outputMemory_); + } + onChanged(); + } + if (other.getTempMemory() != 0L) { + setTempMemory(other.getTempMemory()); + } + if (other.getPersistentMemory() != 0L) { + setPersistentMemory(other.getPersistentMemory()); + } + if (other.getDeviceTempMemory() != 0L) { + setDeviceTempMemory(other.getDeviceTempMemory()); + } + if (other.getDevicePersistentMemory() != 0L) { + setDevicePersistentMemory(other.getDevicePersistentMemory()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.OpPerformance.OpMemory parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.OpPerformance.OpMemory) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private com.google.protobuf.Internal.LongList outputMemory_ = emptyLongList(); + private void ensureOutputMemoryIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + outputMemory_ = mutableCopy(outputMemory_); + bitField0_ |= 0x00000001; + } + } + /** + *
      +       * The output information may have memory usage and output shapes.
      +       * 
      + * + * repeated int64 output_memory = 1; + */ + public java.util.List + getOutputMemoryList() { + return ((bitField0_ & 0x00000001) != 0) ? + java.util.Collections.unmodifiableList(outputMemory_) : outputMemory_; + } + /** + *
      +       * The output information may have memory usage and output shapes.
      +       * 
      + * + * repeated int64 output_memory = 1; + */ + public int getOutputMemoryCount() { + return outputMemory_.size(); + } + /** + *
      +       * The output information may have memory usage and output shapes.
      +       * 
      + * + * repeated int64 output_memory = 1; + */ + public long getOutputMemory(int index) { + return outputMemory_.getLong(index); + } + /** + *
      +       * The output information may have memory usage and output shapes.
      +       * 
      + * + * repeated int64 output_memory = 1; + */ + public Builder setOutputMemory( + int index, long value) { + ensureOutputMemoryIsMutable(); + outputMemory_.setLong(index, value); + onChanged(); + return this; + } + /** + *
      +       * The output information may have memory usage and output shapes.
      +       * 
      + * + * repeated int64 output_memory = 1; + */ + public Builder addOutputMemory(long value) { + ensureOutputMemoryIsMutable(); + outputMemory_.addLong(value); + onChanged(); + return this; + } + /** + *
      +       * The output information may have memory usage and output shapes.
      +       * 
      + * + * repeated int64 output_memory = 1; + */ + public Builder addAllOutputMemory( + java.lang.Iterable values) { + ensureOutputMemoryIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, outputMemory_); + onChanged(); + return this; + } + /** + *
      +       * The output information may have memory usage and output shapes.
      +       * 
      + * + * repeated int64 output_memory = 1; + */ + public Builder clearOutputMemory() { + outputMemory_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + private long tempMemory_ ; + /** + *
      +       * Temp and persistent memory allocated by this node.
      +       * 
      + * + * int64 temp_memory = 2; + */ + public long getTempMemory() { + return tempMemory_; + } + /** + *
      +       * Temp and persistent memory allocated by this node.
      +       * 
      + * + * int64 temp_memory = 2; + */ + public Builder setTempMemory(long value) { + + tempMemory_ = value; + onChanged(); + return this; + } + /** + *
      +       * Temp and persistent memory allocated by this node.
      +       * 
      + * + * int64 temp_memory = 2; + */ + public Builder clearTempMemory() { + + tempMemory_ = 0L; + onChanged(); + return this; + } + + private long persistentMemory_ ; + /** + * int64 persistent_memory = 4; + */ + public long getPersistentMemory() { + return persistentMemory_; + } + /** + * int64 persistent_memory = 4; + */ + public Builder setPersistentMemory(long value) { + + persistentMemory_ = value; + onChanged(); + return this; + } + /** + * int64 persistent_memory = 4; + */ + public Builder clearPersistentMemory() { + + persistentMemory_ = 0L; + onChanged(); + return this; + } + + private long deviceTempMemory_ ; + /** + * int64 device_temp_memory = 3 [deprecated = true]; + */ + @java.lang.Deprecated public long getDeviceTempMemory() { + return deviceTempMemory_; + } + /** + * int64 device_temp_memory = 3 [deprecated = true]; + */ + @java.lang.Deprecated public Builder setDeviceTempMemory(long value) { + + deviceTempMemory_ = value; + onChanged(); + return this; + } + /** + * int64 device_temp_memory = 3 [deprecated = true]; + */ + @java.lang.Deprecated public Builder clearDeviceTempMemory() { + + deviceTempMemory_ = 0L; + onChanged(); + return this; + } + + private long devicePersistentMemory_ ; + /** + * int64 device_persistent_memory = 5 [deprecated = true]; + */ + @java.lang.Deprecated public long getDevicePersistentMemory() { + return devicePersistentMemory_; + } + /** + * int64 device_persistent_memory = 5 [deprecated = true]; + */ + @java.lang.Deprecated public Builder setDevicePersistentMemory(long value) { + + devicePersistentMemory_ = value; + onChanged(); + return this; + } + /** + * int64 device_persistent_memory = 5 [deprecated = true]; + */ + @java.lang.Deprecated public Builder clearDevicePersistentMemory() { + + devicePersistentMemory_ = 0L; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.OpPerformance.OpMemory) + } + + // @@protoc_insertion_point(class_scope:tensorflow.OpPerformance.OpMemory) + private static final org.tensorflow.proto.framework.OpPerformance.OpMemory DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.OpPerformance.OpMemory(); + } + + public static org.tensorflow.proto.framework.OpPerformance.OpMemory getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OpMemory parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OpMemory(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformance.OpMemory getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private int executionTimeCase_ = 0; + private java.lang.Object executionTime_; + public enum ExecutionTimeCase + implements com.google.protobuf.Internal.EnumLite { + EXECUTION_TIME_NORMAL(10), + EXECUTION_TIME_LOG_NORMAL(11), + EXECUTIONTIME_NOT_SET(0); + private final int value; + private ExecutionTimeCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ExecutionTimeCase valueOf(int value) { + return forNumber(value); + } + + public static ExecutionTimeCase forNumber(int value) { + switch (value) { + case 10: return EXECUTION_TIME_NORMAL; + case 11: return EXECUTION_TIME_LOG_NORMAL; + case 0: return EXECUTIONTIME_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public ExecutionTimeCase + getExecutionTimeCase() { + return ExecutionTimeCase.forNumber( + executionTimeCase_); + } + + public static final int OP_FIELD_NUMBER = 1; + private org.tensorflow.proto.framework.OpInfo op_; + /** + *
      +   * The op
      +   * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public boolean hasOp() { + return op_ != null; + } + /** + *
      +   * The op
      +   * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public org.tensorflow.proto.framework.OpInfo getOp() { + return op_ == null ? org.tensorflow.proto.framework.OpInfo.getDefaultInstance() : op_; + } + /** + *
      +   * The op
      +   * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public org.tensorflow.proto.framework.OpInfoOrBuilder getOpOrBuilder() { + return getOp(); + } + + public static final int SESSION_INFO_FIELD_NUMBER = 12; + private org.tensorflow.proto.framework.SessionInfo sessionInfo_; + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public boolean hasSessionInfo() { + return sessionInfo_ != null; + } + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public org.tensorflow.proto.framework.SessionInfo getSessionInfo() { + return sessionInfo_ == null ? org.tensorflow.proto.framework.SessionInfo.getDefaultInstance() : sessionInfo_; + } + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public org.tensorflow.proto.framework.SessionInfoOrBuilder getSessionInfoOrBuilder() { + return getSessionInfo(); + } + + public static final int NODE_FIELD_NUMBER = 5; + private volatile java.lang.Object node_; + /** + *
      +   * The node name (optional). Makes it easier to associate the performance data
      +   * with a specific graph node.
      +   * 
      + * + * string node = 5; + */ + public java.lang.String getNode() { + java.lang.Object ref = node_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + node_ = s; + return s; + } + } + /** + *
      +   * The node name (optional). Makes it easier to associate the performance data
      +   * with a specific graph node.
      +   * 
      + * + * string node = 5; + */ + public com.google.protobuf.ByteString + getNodeBytes() { + java.lang.Object ref = node_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + node_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TEMPORARY_MEMORY_SIZE_FIELD_NUMBER = 2; + private long temporaryMemorySize_; + /** + *
      +   * Temporary memory used by this node (in bytes).
      +   * 
      + * + * int64 temporary_memory_size = 2; + */ + public long getTemporaryMemorySize() { + return temporaryMemorySize_; + } + + public static final int COMPUTE_COST_FIELD_NUMBER = 3; + private long computeCost_; + /** + *
      +   * Time it takes to run the op (in nanoseconds).
      +   * 
      + * + * int64 compute_cost = 3; + */ + public long getComputeCost() { + return computeCost_; + } + + public static final int COMPUTE_TIME_FIELD_NUMBER = 6; + private long computeTime_; + /** + *
      +   * Analytical compute cost (in nanoseconds).
      +   * 
      + * + * int64 compute_time = 6; + */ + public long getComputeTime() { + return computeTime_; + } + + public static final int MEMORY_TIME_FIELD_NUMBER = 7; + private long memoryTime_; + /** + *
      +   * Analytical memory access cost (in nanoseconds).
      +   * 
      + * + * int64 memory_time = 7; + */ + public long getMemoryTime() { + return memoryTime_; + } + + public static final int COMPUTE_EFFICIENCY_FIELD_NUMBER = 4; + private double computeEfficiency_; + /** + *
      +   * Percentage of theoretical compute performance.
      +   * 
      + * + * double compute_efficiency = 4; + */ + public double getComputeEfficiency() { + return computeEfficiency_; + } + + public static final int MEMORY_EFFICIENCY_FIELD_NUMBER = 8; + private double memoryEfficiency_; + /** + *
      +   * Percentage of theoretical memory performance.
      +   * 
      + * + * double memory_efficiency = 8; + */ + public double getMemoryEfficiency() { + return memoryEfficiency_; + } + + public static final int EXECUTION_TIME_NORMAL_FIELD_NUMBER = 10; + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public boolean hasExecutionTimeNormal() { + return executionTimeCase_ == 10; + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public org.tensorflow.proto.framework.NormalDistribution getExecutionTimeNormal() { + if (executionTimeCase_ == 10) { + return (org.tensorflow.proto.framework.NormalDistribution) executionTime_; + } + return org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance(); + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public org.tensorflow.proto.framework.NormalDistributionOrBuilder getExecutionTimeNormalOrBuilder() { + if (executionTimeCase_ == 10) { + return (org.tensorflow.proto.framework.NormalDistribution) executionTime_; + } + return org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance(); + } + + public static final int EXECUTION_TIME_LOG_NORMAL_FIELD_NUMBER = 11; + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public boolean hasExecutionTimeLogNormal() { + return executionTimeCase_ == 11; + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public org.tensorflow.proto.framework.LogNormalDistribution getExecutionTimeLogNormal() { + if (executionTimeCase_ == 11) { + return (org.tensorflow.proto.framework.LogNormalDistribution) executionTime_; + } + return org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance(); + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public org.tensorflow.proto.framework.LogNormalDistributionOrBuilder getExecutionTimeLogNormalOrBuilder() { + if (executionTimeCase_ == 11) { + return (org.tensorflow.proto.framework.LogNormalDistribution) executionTime_; + } + return org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance(); + } + + public static final int OP_MEMORY_FIELD_NUMBER = 9; + private org.tensorflow.proto.framework.OpPerformance.OpMemory opMemory_; + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public boolean hasOpMemory() { + return opMemory_ != null; + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public org.tensorflow.proto.framework.OpPerformance.OpMemory getOpMemory() { + return opMemory_ == null ? org.tensorflow.proto.framework.OpPerformance.OpMemory.getDefaultInstance() : opMemory_; + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public org.tensorflow.proto.framework.OpPerformance.OpMemoryOrBuilder getOpMemoryOrBuilder() { + return getOpMemory(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (op_ != null) { + output.writeMessage(1, getOp()); + } + if (temporaryMemorySize_ != 0L) { + output.writeInt64(2, temporaryMemorySize_); + } + if (computeCost_ != 0L) { + output.writeInt64(3, computeCost_); + } + if (computeEfficiency_ != 0D) { + output.writeDouble(4, computeEfficiency_); + } + if (!getNodeBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, node_); + } + if (computeTime_ != 0L) { + output.writeInt64(6, computeTime_); + } + if (memoryTime_ != 0L) { + output.writeInt64(7, memoryTime_); + } + if (memoryEfficiency_ != 0D) { + output.writeDouble(8, memoryEfficiency_); + } + if (opMemory_ != null) { + output.writeMessage(9, getOpMemory()); + } + if (executionTimeCase_ == 10) { + output.writeMessage(10, (org.tensorflow.proto.framework.NormalDistribution) executionTime_); + } + if (executionTimeCase_ == 11) { + output.writeMessage(11, (org.tensorflow.proto.framework.LogNormalDistribution) executionTime_); + } + if (sessionInfo_ != null) { + output.writeMessage(12, getSessionInfo()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (op_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getOp()); + } + if (temporaryMemorySize_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, temporaryMemorySize_); + } + if (computeCost_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, computeCost_); + } + if (computeEfficiency_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(4, computeEfficiency_); + } + if (!getNodeBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, node_); + } + if (computeTime_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(6, computeTime_); + } + if (memoryTime_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(7, memoryTime_); + } + if (memoryEfficiency_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(8, memoryEfficiency_); + } + if (opMemory_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, getOpMemory()); + } + if (executionTimeCase_ == 10) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, (org.tensorflow.proto.framework.NormalDistribution) executionTime_); + } + if (executionTimeCase_ == 11) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(11, (org.tensorflow.proto.framework.LogNormalDistribution) executionTime_); + } + if (sessionInfo_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(12, getSessionInfo()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.OpPerformance)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.OpPerformance other = (org.tensorflow.proto.framework.OpPerformance) obj; + + if (hasOp() != other.hasOp()) return false; + if (hasOp()) { + if (!getOp() + .equals(other.getOp())) return false; + } + if (hasSessionInfo() != other.hasSessionInfo()) return false; + if (hasSessionInfo()) { + if (!getSessionInfo() + .equals(other.getSessionInfo())) return false; + } + if (!getNode() + .equals(other.getNode())) return false; + if (getTemporaryMemorySize() + != other.getTemporaryMemorySize()) return false; + if (getComputeCost() + != other.getComputeCost()) return false; + if (getComputeTime() + != other.getComputeTime()) return false; + if (getMemoryTime() + != other.getMemoryTime()) return false; + if (java.lang.Double.doubleToLongBits(getComputeEfficiency()) + != java.lang.Double.doubleToLongBits( + other.getComputeEfficiency())) return false; + if (java.lang.Double.doubleToLongBits(getMemoryEfficiency()) + != java.lang.Double.doubleToLongBits( + other.getMemoryEfficiency())) return false; + if (hasOpMemory() != other.hasOpMemory()) return false; + if (hasOpMemory()) { + if (!getOpMemory() + .equals(other.getOpMemory())) return false; + } + if (!getExecutionTimeCase().equals(other.getExecutionTimeCase())) return false; + switch (executionTimeCase_) { + case 10: + if (!getExecutionTimeNormal() + .equals(other.getExecutionTimeNormal())) return false; + break; + case 11: + if (!getExecutionTimeLogNormal() + .equals(other.getExecutionTimeLogNormal())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOp()) { + hash = (37 * hash) + OP_FIELD_NUMBER; + hash = (53 * hash) + getOp().hashCode(); + } + if (hasSessionInfo()) { + hash = (37 * hash) + SESSION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getSessionInfo().hashCode(); + } + hash = (37 * hash) + NODE_FIELD_NUMBER; + hash = (53 * hash) + getNode().hashCode(); + hash = (37 * hash) + TEMPORARY_MEMORY_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getTemporaryMemorySize()); + hash = (37 * hash) + COMPUTE_COST_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getComputeCost()); + hash = (37 * hash) + COMPUTE_TIME_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getComputeTime()); + hash = (37 * hash) + MEMORY_TIME_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getMemoryTime()); + hash = (37 * hash) + COMPUTE_EFFICIENCY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getComputeEfficiency())); + hash = (37 * hash) + MEMORY_EFFICIENCY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getMemoryEfficiency())); + if (hasOpMemory()) { + hash = (37 * hash) + OP_MEMORY_FIELD_NUMBER; + hash = (53 * hash) + getOpMemory().hashCode(); + } + switch (executionTimeCase_) { + case 10: + hash = (37 * hash) + EXECUTION_TIME_NORMAL_FIELD_NUMBER; + hash = (53 * hash) + getExecutionTimeNormal().hashCode(); + break; + case 11: + hash = (37 * hash) + EXECUTION_TIME_LOG_NORMAL_FIELD_NUMBER; + hash = (53 * hash) + getExecutionTimeLogNormal().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.OpPerformance parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformance parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformance parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.OpPerformance prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +   * Performance data for tensorflow operations
      +   * 
      + * + * Protobuf type {@code tensorflow.OpPerformance} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.OpPerformance) + org.tensorflow.proto.framework.OpPerformanceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpPerformance.class, org.tensorflow.proto.framework.OpPerformance.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.OpPerformance.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (opBuilder_ == null) { + op_ = null; + } else { + op_ = null; + opBuilder_ = null; + } + if (sessionInfoBuilder_ == null) { + sessionInfo_ = null; + } else { + sessionInfo_ = null; + sessionInfoBuilder_ = null; + } + node_ = ""; + + temporaryMemorySize_ = 0L; + + computeCost_ = 0L; + + computeTime_ = 0L; + + memoryTime_ = 0L; + + computeEfficiency_ = 0D; + + memoryEfficiency_ = 0D; + + if (opMemoryBuilder_ == null) { + opMemory_ = null; + } else { + opMemory_ = null; + opMemoryBuilder_ = null; + } + executionTimeCase_ = 0; + executionTime_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformance_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformance getDefaultInstanceForType() { + return org.tensorflow.proto.framework.OpPerformance.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformance build() { + org.tensorflow.proto.framework.OpPerformance result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformance buildPartial() { + org.tensorflow.proto.framework.OpPerformance result = new org.tensorflow.proto.framework.OpPerformance(this); + if (opBuilder_ == null) { + result.op_ = op_; + } else { + result.op_ = opBuilder_.build(); + } + if (sessionInfoBuilder_ == null) { + result.sessionInfo_ = sessionInfo_; + } else { + result.sessionInfo_ = sessionInfoBuilder_.build(); + } + result.node_ = node_; + result.temporaryMemorySize_ = temporaryMemorySize_; + result.computeCost_ = computeCost_; + result.computeTime_ = computeTime_; + result.memoryTime_ = memoryTime_; + result.computeEfficiency_ = computeEfficiency_; + result.memoryEfficiency_ = memoryEfficiency_; + if (executionTimeCase_ == 10) { + if (executionTimeNormalBuilder_ == null) { + result.executionTime_ = executionTime_; + } else { + result.executionTime_ = executionTimeNormalBuilder_.build(); + } + } + if (executionTimeCase_ == 11) { + if (executionTimeLogNormalBuilder_ == null) { + result.executionTime_ = executionTime_; + } else { + result.executionTime_ = executionTimeLogNormalBuilder_.build(); + } + } + if (opMemoryBuilder_ == null) { + result.opMemory_ = opMemory_; + } else { + result.opMemory_ = opMemoryBuilder_.build(); + } + result.executionTimeCase_ = executionTimeCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.OpPerformance) { + return mergeFrom((org.tensorflow.proto.framework.OpPerformance)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.OpPerformance other) { + if (other == org.tensorflow.proto.framework.OpPerformance.getDefaultInstance()) return this; + if (other.hasOp()) { + mergeOp(other.getOp()); + } + if (other.hasSessionInfo()) { + mergeSessionInfo(other.getSessionInfo()); + } + if (!other.getNode().isEmpty()) { + node_ = other.node_; + onChanged(); + } + if (other.getTemporaryMemorySize() != 0L) { + setTemporaryMemorySize(other.getTemporaryMemorySize()); + } + if (other.getComputeCost() != 0L) { + setComputeCost(other.getComputeCost()); + } + if (other.getComputeTime() != 0L) { + setComputeTime(other.getComputeTime()); + } + if (other.getMemoryTime() != 0L) { + setMemoryTime(other.getMemoryTime()); + } + if (other.getComputeEfficiency() != 0D) { + setComputeEfficiency(other.getComputeEfficiency()); + } + if (other.getMemoryEfficiency() != 0D) { + setMemoryEfficiency(other.getMemoryEfficiency()); + } + if (other.hasOpMemory()) { + mergeOpMemory(other.getOpMemory()); + } + switch (other.getExecutionTimeCase()) { + case EXECUTION_TIME_NORMAL: { + mergeExecutionTimeNormal(other.getExecutionTimeNormal()); + break; + } + case EXECUTION_TIME_LOG_NORMAL: { + mergeExecutionTimeLogNormal(other.getExecutionTimeLogNormal()); + break; + } + case EXECUTIONTIME_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.OpPerformance parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.OpPerformance) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int executionTimeCase_ = 0; + private java.lang.Object executionTime_; + public ExecutionTimeCase + getExecutionTimeCase() { + return ExecutionTimeCase.forNumber( + executionTimeCase_); + } + + public Builder clearExecutionTime() { + executionTimeCase_ = 0; + executionTime_ = null; + onChanged(); + return this; + } + + + private org.tensorflow.proto.framework.OpInfo op_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo, org.tensorflow.proto.framework.OpInfo.Builder, org.tensorflow.proto.framework.OpInfoOrBuilder> opBuilder_; + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public boolean hasOp() { + return opBuilder_ != null || op_ != null; + } + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public org.tensorflow.proto.framework.OpInfo getOp() { + if (opBuilder_ == null) { + return op_ == null ? org.tensorflow.proto.framework.OpInfo.getDefaultInstance() : op_; + } else { + return opBuilder_.getMessage(); + } + } + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public Builder setOp(org.tensorflow.proto.framework.OpInfo value) { + if (opBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + op_ = value; + onChanged(); + } else { + opBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public Builder setOp( + org.tensorflow.proto.framework.OpInfo.Builder builderForValue) { + if (opBuilder_ == null) { + op_ = builderForValue.build(); + onChanged(); + } else { + opBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public Builder mergeOp(org.tensorflow.proto.framework.OpInfo value) { + if (opBuilder_ == null) { + if (op_ != null) { + op_ = + org.tensorflow.proto.framework.OpInfo.newBuilder(op_).mergeFrom(value).buildPartial(); + } else { + op_ = value; + } + onChanged(); + } else { + opBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public Builder clearOp() { + if (opBuilder_ == null) { + op_ = null; + onChanged(); + } else { + op_ = null; + opBuilder_ = null; + } + + return this; + } + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public org.tensorflow.proto.framework.OpInfo.Builder getOpBuilder() { + + onChanged(); + return getOpFieldBuilder().getBuilder(); + } + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + public org.tensorflow.proto.framework.OpInfoOrBuilder getOpOrBuilder() { + if (opBuilder_ != null) { + return opBuilder_.getMessageOrBuilder(); + } else { + return op_ == null ? + org.tensorflow.proto.framework.OpInfo.getDefaultInstance() : op_; + } + } + /** + *
      +     * The op
      +     * 
      + * + * .tensorflow.OpInfo op = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo, org.tensorflow.proto.framework.OpInfo.Builder, org.tensorflow.proto.framework.OpInfoOrBuilder> + getOpFieldBuilder() { + if (opBuilder_ == null) { + opBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.OpInfo, org.tensorflow.proto.framework.OpInfo.Builder, org.tensorflow.proto.framework.OpInfoOrBuilder>( + getOp(), + getParentForChildren(), + isClean()); + op_ = null; + } + return opBuilder_; + } + + private org.tensorflow.proto.framework.SessionInfo sessionInfo_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.SessionInfo, org.tensorflow.proto.framework.SessionInfo.Builder, org.tensorflow.proto.framework.SessionInfoOrBuilder> sessionInfoBuilder_; + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public boolean hasSessionInfo() { + return sessionInfoBuilder_ != null || sessionInfo_ != null; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public org.tensorflow.proto.framework.SessionInfo getSessionInfo() { + if (sessionInfoBuilder_ == null) { + return sessionInfo_ == null ? org.tensorflow.proto.framework.SessionInfo.getDefaultInstance() : sessionInfo_; + } else { + return sessionInfoBuilder_.getMessage(); + } + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public Builder setSessionInfo(org.tensorflow.proto.framework.SessionInfo value) { + if (sessionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sessionInfo_ = value; + onChanged(); + } else { + sessionInfoBuilder_.setMessage(value); + } + + return this; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public Builder setSessionInfo( + org.tensorflow.proto.framework.SessionInfo.Builder builderForValue) { + if (sessionInfoBuilder_ == null) { + sessionInfo_ = builderForValue.build(); + onChanged(); + } else { + sessionInfoBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public Builder mergeSessionInfo(org.tensorflow.proto.framework.SessionInfo value) { + if (sessionInfoBuilder_ == null) { + if (sessionInfo_ != null) { + sessionInfo_ = + org.tensorflow.proto.framework.SessionInfo.newBuilder(sessionInfo_).mergeFrom(value).buildPartial(); + } else { + sessionInfo_ = value; + } + onChanged(); + } else { + sessionInfoBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public Builder clearSessionInfo() { + if (sessionInfoBuilder_ == null) { + sessionInfo_ = null; + onChanged(); + } else { + sessionInfo_ = null; + sessionInfoBuilder_ = null; + } + + return this; + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public org.tensorflow.proto.framework.SessionInfo.Builder getSessionInfoBuilder() { + + onChanged(); + return getSessionInfoFieldBuilder().getBuilder(); + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated public org.tensorflow.proto.framework.SessionInfoOrBuilder getSessionInfoOrBuilder() { + if (sessionInfoBuilder_ != null) { + return sessionInfoBuilder_.getMessageOrBuilder(); + } else { + return sessionInfo_ == null ? + org.tensorflow.proto.framework.SessionInfo.getDefaultInstance() : sessionInfo_; + } + } + /** + *
      +     * Information about the session configs.
      +     * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.SessionInfo, org.tensorflow.proto.framework.SessionInfo.Builder, org.tensorflow.proto.framework.SessionInfoOrBuilder> + getSessionInfoFieldBuilder() { + if (sessionInfoBuilder_ == null) { + sessionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.SessionInfo, org.tensorflow.proto.framework.SessionInfo.Builder, org.tensorflow.proto.framework.SessionInfoOrBuilder>( + getSessionInfo(), + getParentForChildren(), + isClean()); + sessionInfo_ = null; + } + return sessionInfoBuilder_; + } + + private java.lang.Object node_ = ""; + /** + *
      +     * The node name (optional). Makes it easier to associate the performance data
      +     * with a specific graph node.
      +     * 
      + * + * string node = 5; + */ + public java.lang.String getNode() { + java.lang.Object ref = node_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + node_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
      +     * The node name (optional). Makes it easier to associate the performance data
      +     * with a specific graph node.
      +     * 
      + * + * string node = 5; + */ + public com.google.protobuf.ByteString + getNodeBytes() { + java.lang.Object ref = node_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + node_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
      +     * The node name (optional). Makes it easier to associate the performance data
      +     * with a specific graph node.
      +     * 
      + * + * string node = 5; + */ + public Builder setNode( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + node_ = value; + onChanged(); + return this; + } + /** + *
      +     * The node name (optional). Makes it easier to associate the performance data
      +     * with a specific graph node.
      +     * 
      + * + * string node = 5; + */ + public Builder clearNode() { + + node_ = getDefaultInstance().getNode(); + onChanged(); + return this; + } + /** + *
      +     * The node name (optional). Makes it easier to associate the performance data
      +     * with a specific graph node.
      +     * 
      + * + * string node = 5; + */ + public Builder setNodeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + node_ = value; + onChanged(); + return this; + } + + private long temporaryMemorySize_ ; + /** + *
      +     * Temporary memory used by this node (in bytes).
      +     * 
      + * + * int64 temporary_memory_size = 2; + */ + public long getTemporaryMemorySize() { + return temporaryMemorySize_; + } + /** + *
      +     * Temporary memory used by this node (in bytes).
      +     * 
      + * + * int64 temporary_memory_size = 2; + */ + public Builder setTemporaryMemorySize(long value) { + + temporaryMemorySize_ = value; + onChanged(); + return this; + } + /** + *
      +     * Temporary memory used by this node (in bytes).
      +     * 
      + * + * int64 temporary_memory_size = 2; + */ + public Builder clearTemporaryMemorySize() { + + temporaryMemorySize_ = 0L; + onChanged(); + return this; + } + + private long computeCost_ ; + /** + *
      +     * Time it takes to run the op (in nanoseconds).
      +     * 
      + * + * int64 compute_cost = 3; + */ + public long getComputeCost() { + return computeCost_; + } + /** + *
      +     * Time it takes to run the op (in nanoseconds).
      +     * 
      + * + * int64 compute_cost = 3; + */ + public Builder setComputeCost(long value) { + + computeCost_ = value; + onChanged(); + return this; + } + /** + *
      +     * Time it takes to run the op (in nanoseconds).
      +     * 
      + * + * int64 compute_cost = 3; + */ + public Builder clearComputeCost() { + + computeCost_ = 0L; + onChanged(); + return this; + } + + private long computeTime_ ; + /** + *
      +     * Analytical compute cost (in nanoseconds).
      +     * 
      + * + * int64 compute_time = 6; + */ + public long getComputeTime() { + return computeTime_; + } + /** + *
      +     * Analytical compute cost (in nanoseconds).
      +     * 
      + * + * int64 compute_time = 6; + */ + public Builder setComputeTime(long value) { + + computeTime_ = value; + onChanged(); + return this; + } + /** + *
      +     * Analytical compute cost (in nanoseconds).
      +     * 
      + * + * int64 compute_time = 6; + */ + public Builder clearComputeTime() { + + computeTime_ = 0L; + onChanged(); + return this; + } + + private long memoryTime_ ; + /** + *
      +     * Analytical memory access cost (in nanoseconds).
      +     * 
      + * + * int64 memory_time = 7; + */ + public long getMemoryTime() { + return memoryTime_; + } + /** + *
      +     * Analytical memory access cost (in nanoseconds).
      +     * 
      + * + * int64 memory_time = 7; + */ + public Builder setMemoryTime(long value) { + + memoryTime_ = value; + onChanged(); + return this; + } + /** + *
      +     * Analytical memory access cost (in nanoseconds).
      +     * 
      + * + * int64 memory_time = 7; + */ + public Builder clearMemoryTime() { + + memoryTime_ = 0L; + onChanged(); + return this; + } + + private double computeEfficiency_ ; + /** + *
      +     * Percentage of theoretical compute performance.
      +     * 
      + * + * double compute_efficiency = 4; + */ + public double getComputeEfficiency() { + return computeEfficiency_; + } + /** + *
      +     * Percentage of theoretical compute performance.
      +     * 
      + * + * double compute_efficiency = 4; + */ + public Builder setComputeEfficiency(double value) { + + computeEfficiency_ = value; + onChanged(); + return this; + } + /** + *
      +     * Percentage of theoretical compute performance.
      +     * 
      + * + * double compute_efficiency = 4; + */ + public Builder clearComputeEfficiency() { + + computeEfficiency_ = 0D; + onChanged(); + return this; + } + + private double memoryEfficiency_ ; + /** + *
      +     * Percentage of theoretical memory performance.
      +     * 
      + * + * double memory_efficiency = 8; + */ + public double getMemoryEfficiency() { + return memoryEfficiency_; + } + /** + *
      +     * Percentage of theoretical memory performance.
      +     * 
      + * + * double memory_efficiency = 8; + */ + public Builder setMemoryEfficiency(double value) { + + memoryEfficiency_ = value; + onChanged(); + return this; + } + /** + *
      +     * Percentage of theoretical memory performance.
      +     * 
      + * + * double memory_efficiency = 8; + */ + public Builder clearMemoryEfficiency() { + + memoryEfficiency_ = 0D; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.NormalDistribution, org.tensorflow.proto.framework.NormalDistribution.Builder, org.tensorflow.proto.framework.NormalDistributionOrBuilder> executionTimeNormalBuilder_; + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public boolean hasExecutionTimeNormal() { + return executionTimeCase_ == 10; + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public org.tensorflow.proto.framework.NormalDistribution getExecutionTimeNormal() { + if (executionTimeNormalBuilder_ == null) { + if (executionTimeCase_ == 10) { + return (org.tensorflow.proto.framework.NormalDistribution) executionTime_; + } + return org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance(); + } else { + if (executionTimeCase_ == 10) { + return executionTimeNormalBuilder_.getMessage(); + } + return org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance(); + } + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public Builder setExecutionTimeNormal(org.tensorflow.proto.framework.NormalDistribution value) { + if (executionTimeNormalBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + executionTime_ = value; + onChanged(); + } else { + executionTimeNormalBuilder_.setMessage(value); + } + executionTimeCase_ = 10; + return this; + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public Builder setExecutionTimeNormal( + org.tensorflow.proto.framework.NormalDistribution.Builder builderForValue) { + if (executionTimeNormalBuilder_ == null) { + executionTime_ = builderForValue.build(); + onChanged(); + } else { + executionTimeNormalBuilder_.setMessage(builderForValue.build()); + } + executionTimeCase_ = 10; + return this; + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public Builder mergeExecutionTimeNormal(org.tensorflow.proto.framework.NormalDistribution value) { + if (executionTimeNormalBuilder_ == null) { + if (executionTimeCase_ == 10 && + executionTime_ != org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance()) { + executionTime_ = org.tensorflow.proto.framework.NormalDistribution.newBuilder((org.tensorflow.proto.framework.NormalDistribution) executionTime_) + .mergeFrom(value).buildPartial(); + } else { + executionTime_ = value; + } + onChanged(); + } else { + if (executionTimeCase_ == 10) { + executionTimeNormalBuilder_.mergeFrom(value); + } + executionTimeNormalBuilder_.setMessage(value); + } + executionTimeCase_ = 10; + return this; + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public Builder clearExecutionTimeNormal() { + if (executionTimeNormalBuilder_ == null) { + if (executionTimeCase_ == 10) { + executionTimeCase_ = 0; + executionTime_ = null; + onChanged(); + } + } else { + if (executionTimeCase_ == 10) { + executionTimeCase_ = 0; + executionTime_ = null; + } + executionTimeNormalBuilder_.clear(); + } + return this; + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public org.tensorflow.proto.framework.NormalDistribution.Builder getExecutionTimeNormalBuilder() { + return getExecutionTimeNormalFieldBuilder().getBuilder(); + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + public org.tensorflow.proto.framework.NormalDistributionOrBuilder getExecutionTimeNormalOrBuilder() { + if ((executionTimeCase_ == 10) && (executionTimeNormalBuilder_ != null)) { + return executionTimeNormalBuilder_.getMessageOrBuilder(); + } else { + if (executionTimeCase_ == 10) { + return (org.tensorflow.proto.framework.NormalDistribution) executionTime_; + } + return org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance(); + } + } + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.NormalDistribution, org.tensorflow.proto.framework.NormalDistribution.Builder, org.tensorflow.proto.framework.NormalDistributionOrBuilder> + getExecutionTimeNormalFieldBuilder() { + if (executionTimeNormalBuilder_ == null) { + if (!(executionTimeCase_ == 10)) { + executionTime_ = org.tensorflow.proto.framework.NormalDistribution.getDefaultInstance(); + } + executionTimeNormalBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.NormalDistribution, org.tensorflow.proto.framework.NormalDistribution.Builder, org.tensorflow.proto.framework.NormalDistributionOrBuilder>( + (org.tensorflow.proto.framework.NormalDistribution) executionTime_, + getParentForChildren(), + isClean()); + executionTime_ = null; + } + executionTimeCase_ = 10; + onChanged();; + return executionTimeNormalBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.LogNormalDistribution, org.tensorflow.proto.framework.LogNormalDistribution.Builder, org.tensorflow.proto.framework.LogNormalDistributionOrBuilder> executionTimeLogNormalBuilder_; + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public boolean hasExecutionTimeLogNormal() { + return executionTimeCase_ == 11; + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public org.tensorflow.proto.framework.LogNormalDistribution getExecutionTimeLogNormal() { + if (executionTimeLogNormalBuilder_ == null) { + if (executionTimeCase_ == 11) { + return (org.tensorflow.proto.framework.LogNormalDistribution) executionTime_; + } + return org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance(); + } else { + if (executionTimeCase_ == 11) { + return executionTimeLogNormalBuilder_.getMessage(); + } + return org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance(); + } + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public Builder setExecutionTimeLogNormal(org.tensorflow.proto.framework.LogNormalDistribution value) { + if (executionTimeLogNormalBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + executionTime_ = value; + onChanged(); + } else { + executionTimeLogNormalBuilder_.setMessage(value); + } + executionTimeCase_ = 11; + return this; + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public Builder setExecutionTimeLogNormal( + org.tensorflow.proto.framework.LogNormalDistribution.Builder builderForValue) { + if (executionTimeLogNormalBuilder_ == null) { + executionTime_ = builderForValue.build(); + onChanged(); + } else { + executionTimeLogNormalBuilder_.setMessage(builderForValue.build()); + } + executionTimeCase_ = 11; + return this; + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public Builder mergeExecutionTimeLogNormal(org.tensorflow.proto.framework.LogNormalDistribution value) { + if (executionTimeLogNormalBuilder_ == null) { + if (executionTimeCase_ == 11 && + executionTime_ != org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance()) { + executionTime_ = org.tensorflow.proto.framework.LogNormalDistribution.newBuilder((org.tensorflow.proto.framework.LogNormalDistribution) executionTime_) + .mergeFrom(value).buildPartial(); + } else { + executionTime_ = value; + } + onChanged(); + } else { + if (executionTimeCase_ == 11) { + executionTimeLogNormalBuilder_.mergeFrom(value); + } + executionTimeLogNormalBuilder_.setMessage(value); + } + executionTimeCase_ = 11; + return this; + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public Builder clearExecutionTimeLogNormal() { + if (executionTimeLogNormalBuilder_ == null) { + if (executionTimeCase_ == 11) { + executionTimeCase_ = 0; + executionTime_ = null; + onChanged(); + } + } else { + if (executionTimeCase_ == 11) { + executionTimeCase_ = 0; + executionTime_ = null; + } + executionTimeLogNormalBuilder_.clear(); + } + return this; + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public org.tensorflow.proto.framework.LogNormalDistribution.Builder getExecutionTimeLogNormalBuilder() { + return getExecutionTimeLogNormalFieldBuilder().getBuilder(); + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + public org.tensorflow.proto.framework.LogNormalDistributionOrBuilder getExecutionTimeLogNormalOrBuilder() { + if ((executionTimeCase_ == 11) && (executionTimeLogNormalBuilder_ != null)) { + return executionTimeLogNormalBuilder_.getMessageOrBuilder(); + } else { + if (executionTimeCase_ == 11) { + return (org.tensorflow.proto.framework.LogNormalDistribution) executionTime_; + } + return org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance(); + } + } + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.LogNormalDistribution, org.tensorflow.proto.framework.LogNormalDistribution.Builder, org.tensorflow.proto.framework.LogNormalDistributionOrBuilder> + getExecutionTimeLogNormalFieldBuilder() { + if (executionTimeLogNormalBuilder_ == null) { + if (!(executionTimeCase_ == 11)) { + executionTime_ = org.tensorflow.proto.framework.LogNormalDistribution.getDefaultInstance(); + } + executionTimeLogNormalBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.LogNormalDistribution, org.tensorflow.proto.framework.LogNormalDistribution.Builder, org.tensorflow.proto.framework.LogNormalDistributionOrBuilder>( + (org.tensorflow.proto.framework.LogNormalDistribution) executionTime_, + getParentForChildren(), + isClean()); + executionTime_ = null; + } + executionTimeCase_ = 11; + onChanged();; + return executionTimeLogNormalBuilder_; + } + + private org.tensorflow.proto.framework.OpPerformance.OpMemory opMemory_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.OpPerformance.OpMemory, org.tensorflow.proto.framework.OpPerformance.OpMemory.Builder, org.tensorflow.proto.framework.OpPerformance.OpMemoryOrBuilder> opMemoryBuilder_; + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public boolean hasOpMemory() { + return opMemoryBuilder_ != null || opMemory_ != null; + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public org.tensorflow.proto.framework.OpPerformance.OpMemory getOpMemory() { + if (opMemoryBuilder_ == null) { + return opMemory_ == null ? org.tensorflow.proto.framework.OpPerformance.OpMemory.getDefaultInstance() : opMemory_; + } else { + return opMemoryBuilder_.getMessage(); + } + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public Builder setOpMemory(org.tensorflow.proto.framework.OpPerformance.OpMemory value) { + if (opMemoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + opMemory_ = value; + onChanged(); + } else { + opMemoryBuilder_.setMessage(value); + } + + return this; + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public Builder setOpMemory( + org.tensorflow.proto.framework.OpPerformance.OpMemory.Builder builderForValue) { + if (opMemoryBuilder_ == null) { + opMemory_ = builderForValue.build(); + onChanged(); + } else { + opMemoryBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public Builder mergeOpMemory(org.tensorflow.proto.framework.OpPerformance.OpMemory value) { + if (opMemoryBuilder_ == null) { + if (opMemory_ != null) { + opMemory_ = + org.tensorflow.proto.framework.OpPerformance.OpMemory.newBuilder(opMemory_).mergeFrom(value).buildPartial(); + } else { + opMemory_ = value; + } + onChanged(); + } else { + opMemoryBuilder_.mergeFrom(value); + } + + return this; + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public Builder clearOpMemory() { + if (opMemoryBuilder_ == null) { + opMemory_ = null; + onChanged(); + } else { + opMemory_ = null; + opMemoryBuilder_ = null; + } + + return this; + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public org.tensorflow.proto.framework.OpPerformance.OpMemory.Builder getOpMemoryBuilder() { + + onChanged(); + return getOpMemoryFieldBuilder().getBuilder(); + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + public org.tensorflow.proto.framework.OpPerformance.OpMemoryOrBuilder getOpMemoryOrBuilder() { + if (opMemoryBuilder_ != null) { + return opMemoryBuilder_.getMessageOrBuilder(); + } else { + return opMemory_ == null ? + org.tensorflow.proto.framework.OpPerformance.OpMemory.getDefaultInstance() : opMemory_; + } + } + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.OpPerformance.OpMemory, org.tensorflow.proto.framework.OpPerformance.OpMemory.Builder, org.tensorflow.proto.framework.OpPerformance.OpMemoryOrBuilder> + getOpMemoryFieldBuilder() { + if (opMemoryBuilder_ == null) { + opMemoryBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.OpPerformance.OpMemory, org.tensorflow.proto.framework.OpPerformance.OpMemory.Builder, org.tensorflow.proto.framework.OpPerformance.OpMemoryOrBuilder>( + getOpMemory(), + getParentForChildren(), + isClean()); + opMemory_ = null; + } + return opMemoryBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.OpPerformance) + } + + // @@protoc_insertion_point(class_scope:tensorflow.OpPerformance) + private static final org.tensorflow.proto.framework.OpPerformance DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.OpPerformance(); + } + + public static org.tensorflow.proto.framework.OpPerformance getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OpPerformance parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OpPerformance(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformance getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceDataProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceDataProtos.java new file mode 100644 index 00000000000..4c3fcec5afa --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceDataProtos.java @@ -0,0 +1,186 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/grappler/costs/op_performance_data.proto + +package org.tensorflow.proto.framework; + +public final class OpPerformanceDataProtos { + private OpPerformanceDataProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_SessionInfo_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_SessionInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_OpInfo_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_OpInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_OpInfo_AttrEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_OpInfo_AttrEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_OpInfo_TensorProperties_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_OpInfo_TensorProperties_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_NormalDistribution_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_NormalDistribution_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_LogNormalDistribution_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_LogNormalDistribution_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_OpPerformance_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_OpPerformance_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_OpPerformance_OpMemory_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_OpPerformance_OpMemory_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_OpPerformanceList_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_OpPerformanceList_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n8tensorflow/core/grappler/costs/op_perf" + + "ormance_data.proto\022\ntensorflow\032&tensorfl" + + "ow/core/framework/tensor.proto\032,tensorfl" + + "ow/core/framework/tensor_shape.proto\032%te" + + "nsorflow/core/framework/types.proto\032*ten" + + "sorflow/core/framework/attr_value.proto\032" + + "0tensorflow/core/protobuf/device_propert" + + "ies.proto\"+\n\013SessionInfo\022\034\n\024intra_op_par" + + "allelism\030\001 \001(\003\"\333\003\n\006OpInfo\022\n\n\002op\030\001 \001(\t\022*\n" + + "\004attr\030\002 \003(\0132\034.tensorflow.OpInfo.AttrEntr" + + "y\0223\n\006inputs\030\003 \003(\0132#.tensorflow.OpInfo.Te" + + "nsorProperties\0224\n\007outputs\030\005 \003(\0132#.tensor" + + "flow.OpInfo.TensorProperties\022,\n\006device\030\004" + + " \001(\0132\034.tensorflow.DeviceProperties\022-\n\014se" + + "ssion_info\030\006 \001(\0132\027.tensorflow.SessionInf" + + "o\032B\n\tAttrEntry\022\013\n\003key\030\001 \001(\t\022$\n\005value\030\002 \001" + + "(\0132\025.tensorflow.AttrValue:\0028\001\032\214\001\n\020Tensor" + + "Properties\022#\n\005dtype\030\001 \001(\0162\024.tensorflow.D" + + "ataType\022+\n\005shape\030\002 \001(\0132\034.tensorflow.Tens" + + "orShapeProto\022&\n\005value\030\003 \001(\0132\027.tensorflow" + + ".TensorProto\"/\n\022NormalDistribution\022\n\n\002mu" + + "\030\001 \001(\001\022\r\n\005sigma\030\002 \001(\001\"2\n\025LogNormalDistri" + + "bution\022\n\n\002mu\030\001 \001(\001\022\r\n\005sigma\030\002 \001(\001\"\363\004\n\rOp" + + "Performance\022\036\n\002op\030\001 \001(\0132\022.tensorflow.OpI" + + "nfo\0221\n\014session_info\030\014 \001(\0132\027.tensorflow.S" + + "essionInfoB\002\030\001\022\014\n\004node\030\005 \001(\t\022\035\n\025temporar" + + "y_memory_size\030\002 \001(\003\022\024\n\014compute_cost\030\003 \001(" + + "\003\022\024\n\014compute_time\030\006 \001(\003\022\023\n\013memory_time\030\007" + + " \001(\003\022\032\n\022compute_efficiency\030\004 \001(\001\022\031\n\021memo" + + "ry_efficiency\030\010 \001(\001\022?\n\025execution_time_no" + + "rmal\030\n \001(\0132\036.tensorflow.NormalDistributi" + + "onH\000\022F\n\031execution_time_log_normal\030\013 \001(\0132" + + "!.tensorflow.LogNormalDistributionH\000\0225\n\t" + + "op_memory\030\t \001(\0132\".tensorflow.OpPerforman" + + "ce.OpMemory\032\227\001\n\010OpMemory\022\025\n\routput_memor" + + "y\030\001 \003(\003\022\023\n\013temp_memory\030\002 \001(\003\022\031\n\021persiste" + + "nt_memory\030\004 \001(\003\022\036\n\022device_temp_memory\030\003 " + + "\001(\003B\002\030\001\022$\n\030device_persistent_memory\030\005 \001(" + + "\003B\002\030\001B\020\n\016execution_time\"F\n\021OpPerformance" + + "List\0221\n\016op_performance\030\001 \003(\0132\031.tensorflo" + + "w.OpPerformanceB>\n\036org.tensorflow.proto." + + "frameworkB\027OpPerformanceDataProtosP\001\370\001\001b" + + "\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.tensorflow.proto.framework.TensorProtos.getDescriptor(), + org.tensorflow.proto.framework.TensorShapeProtos.getDescriptor(), + org.tensorflow.proto.framework.TypesProtos.getDescriptor(), + org.tensorflow.proto.framework.AttrValueProtos.getDescriptor(), + org.tensorflow.proto.framework.DevicePropertiesProtos.getDescriptor(), + }); + internal_static_tensorflow_SessionInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_SessionInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_SessionInfo_descriptor, + new java.lang.String[] { "IntraOpParallelism", }); + internal_static_tensorflow_OpInfo_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_tensorflow_OpInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_OpInfo_descriptor, + new java.lang.String[] { "Op", "Attr", "Inputs", "Outputs", "Device", "SessionInfo", }); + internal_static_tensorflow_OpInfo_AttrEntry_descriptor = + internal_static_tensorflow_OpInfo_descriptor.getNestedTypes().get(0); + internal_static_tensorflow_OpInfo_AttrEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_OpInfo_AttrEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_tensorflow_OpInfo_TensorProperties_descriptor = + internal_static_tensorflow_OpInfo_descriptor.getNestedTypes().get(1); + internal_static_tensorflow_OpInfo_TensorProperties_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_OpInfo_TensorProperties_descriptor, + new java.lang.String[] { "Dtype", "Shape", "Value", }); + internal_static_tensorflow_NormalDistribution_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_tensorflow_NormalDistribution_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_NormalDistribution_descriptor, + new java.lang.String[] { "Mu", "Sigma", }); + internal_static_tensorflow_LogNormalDistribution_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_tensorflow_LogNormalDistribution_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_LogNormalDistribution_descriptor, + new java.lang.String[] { "Mu", "Sigma", }); + internal_static_tensorflow_OpPerformance_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_tensorflow_OpPerformance_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_OpPerformance_descriptor, + new java.lang.String[] { "Op", "SessionInfo", "Node", "TemporaryMemorySize", "ComputeCost", "ComputeTime", "MemoryTime", "ComputeEfficiency", "MemoryEfficiency", "ExecutionTimeNormal", "ExecutionTimeLogNormal", "OpMemory", "ExecutionTime", }); + internal_static_tensorflow_OpPerformance_OpMemory_descriptor = + internal_static_tensorflow_OpPerformance_descriptor.getNestedTypes().get(0); + internal_static_tensorflow_OpPerformance_OpMemory_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_OpPerformance_OpMemory_descriptor, + new java.lang.String[] { "OutputMemory", "TempMemory", "PersistentMemory", "DeviceTempMemory", "DevicePersistentMemory", }); + internal_static_tensorflow_OpPerformanceList_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_tensorflow_OpPerformanceList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_OpPerformanceList_descriptor, + new java.lang.String[] { "OpPerformance", }); + org.tensorflow.proto.framework.TensorProtos.getDescriptor(); + org.tensorflow.proto.framework.TensorShapeProtos.getDescriptor(); + org.tensorflow.proto.framework.TypesProtos.getDescriptor(); + org.tensorflow.proto.framework.AttrValueProtos.getDescriptor(); + org.tensorflow.proto.framework.DevicePropertiesProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceList.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceList.java new file mode 100644 index 00000000000..0b09d450f2d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceList.java @@ -0,0 +1,773 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/grappler/costs/op_performance_data.proto + +package org.tensorflow.proto.framework; + +/** + *
      + * A collection of OpPerformance data points.
      + * 
      + * + * Protobuf type {@code tensorflow.OpPerformanceList} + */ +public final class OpPerformanceList extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.OpPerformanceList) + OpPerformanceListOrBuilder { +private static final long serialVersionUID = 0L; + // Use OpPerformanceList.newBuilder() to construct. + private OpPerformanceList(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private OpPerformanceList() { + opPerformance_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new OpPerformanceList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OpPerformanceList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + opPerformance_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + opPerformance_.add( + input.readMessage(org.tensorflow.proto.framework.OpPerformance.parser(), extensionRegistry)); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + opPerformance_ = java.util.Collections.unmodifiableList(opPerformance_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformanceList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformanceList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpPerformanceList.class, org.tensorflow.proto.framework.OpPerformanceList.Builder.class); + } + + public static final int OP_PERFORMANCE_FIELD_NUMBER = 1; + private java.util.List opPerformance_; + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public java.util.List getOpPerformanceList() { + return opPerformance_; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public java.util.List + getOpPerformanceOrBuilderList() { + return opPerformance_; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public int getOpPerformanceCount() { + return opPerformance_.size(); + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public org.tensorflow.proto.framework.OpPerformance getOpPerformance(int index) { + return opPerformance_.get(index); + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public org.tensorflow.proto.framework.OpPerformanceOrBuilder getOpPerformanceOrBuilder( + int index) { + return opPerformance_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < opPerformance_.size(); i++) { + output.writeMessage(1, opPerformance_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < opPerformance_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, opPerformance_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.OpPerformanceList)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.OpPerformanceList other = (org.tensorflow.proto.framework.OpPerformanceList) obj; + + if (!getOpPerformanceList() + .equals(other.getOpPerformanceList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOpPerformanceCount() > 0) { + hash = (37 * hash) + OP_PERFORMANCE_FIELD_NUMBER; + hash = (53 * hash) + getOpPerformanceList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.OpPerformanceList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.OpPerformanceList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +   * A collection of OpPerformance data points.
      +   * 
      + * + * Protobuf type {@code tensorflow.OpPerformanceList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.OpPerformanceList) + org.tensorflow.proto.framework.OpPerformanceListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformanceList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformanceList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.OpPerformanceList.class, org.tensorflow.proto.framework.OpPerformanceList.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.OpPerformanceList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getOpPerformanceFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (opPerformanceBuilder_ == null) { + opPerformance_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + opPerformanceBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_OpPerformanceList_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformanceList getDefaultInstanceForType() { + return org.tensorflow.proto.framework.OpPerformanceList.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformanceList build() { + org.tensorflow.proto.framework.OpPerformanceList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformanceList buildPartial() { + org.tensorflow.proto.framework.OpPerformanceList result = new org.tensorflow.proto.framework.OpPerformanceList(this); + int from_bitField0_ = bitField0_; + if (opPerformanceBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + opPerformance_ = java.util.Collections.unmodifiableList(opPerformance_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.opPerformance_ = opPerformance_; + } else { + result.opPerformance_ = opPerformanceBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.OpPerformanceList) { + return mergeFrom((org.tensorflow.proto.framework.OpPerformanceList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.OpPerformanceList other) { + if (other == org.tensorflow.proto.framework.OpPerformanceList.getDefaultInstance()) return this; + if (opPerformanceBuilder_ == null) { + if (!other.opPerformance_.isEmpty()) { + if (opPerformance_.isEmpty()) { + opPerformance_ = other.opPerformance_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOpPerformanceIsMutable(); + opPerformance_.addAll(other.opPerformance_); + } + onChanged(); + } + } else { + if (!other.opPerformance_.isEmpty()) { + if (opPerformanceBuilder_.isEmpty()) { + opPerformanceBuilder_.dispose(); + opPerformanceBuilder_ = null; + opPerformance_ = other.opPerformance_; + bitField0_ = (bitField0_ & ~0x00000001); + opPerformanceBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getOpPerformanceFieldBuilder() : null; + } else { + opPerformanceBuilder_.addAllMessages(other.opPerformance_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.OpPerformanceList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.OpPerformanceList) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List opPerformance_ = + java.util.Collections.emptyList(); + private void ensureOpPerformanceIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + opPerformance_ = new java.util.ArrayList(opPerformance_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpPerformance, org.tensorflow.proto.framework.OpPerformance.Builder, org.tensorflow.proto.framework.OpPerformanceOrBuilder> opPerformanceBuilder_; + + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public java.util.List getOpPerformanceList() { + if (opPerformanceBuilder_ == null) { + return java.util.Collections.unmodifiableList(opPerformance_); + } else { + return opPerformanceBuilder_.getMessageList(); + } + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public int getOpPerformanceCount() { + if (opPerformanceBuilder_ == null) { + return opPerformance_.size(); + } else { + return opPerformanceBuilder_.getCount(); + } + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public org.tensorflow.proto.framework.OpPerformance getOpPerformance(int index) { + if (opPerformanceBuilder_ == null) { + return opPerformance_.get(index); + } else { + return opPerformanceBuilder_.getMessage(index); + } + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder setOpPerformance( + int index, org.tensorflow.proto.framework.OpPerformance value) { + if (opPerformanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpPerformanceIsMutable(); + opPerformance_.set(index, value); + onChanged(); + } else { + opPerformanceBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder setOpPerformance( + int index, org.tensorflow.proto.framework.OpPerformance.Builder builderForValue) { + if (opPerformanceBuilder_ == null) { + ensureOpPerformanceIsMutable(); + opPerformance_.set(index, builderForValue.build()); + onChanged(); + } else { + opPerformanceBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder addOpPerformance(org.tensorflow.proto.framework.OpPerformance value) { + if (opPerformanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpPerformanceIsMutable(); + opPerformance_.add(value); + onChanged(); + } else { + opPerformanceBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder addOpPerformance( + int index, org.tensorflow.proto.framework.OpPerformance value) { + if (opPerformanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpPerformanceIsMutable(); + opPerformance_.add(index, value); + onChanged(); + } else { + opPerformanceBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder addOpPerformance( + org.tensorflow.proto.framework.OpPerformance.Builder builderForValue) { + if (opPerformanceBuilder_ == null) { + ensureOpPerformanceIsMutable(); + opPerformance_.add(builderForValue.build()); + onChanged(); + } else { + opPerformanceBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder addOpPerformance( + int index, org.tensorflow.proto.framework.OpPerformance.Builder builderForValue) { + if (opPerformanceBuilder_ == null) { + ensureOpPerformanceIsMutable(); + opPerformance_.add(index, builderForValue.build()); + onChanged(); + } else { + opPerformanceBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder addAllOpPerformance( + java.lang.Iterable values) { + if (opPerformanceBuilder_ == null) { + ensureOpPerformanceIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, opPerformance_); + onChanged(); + } else { + opPerformanceBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder clearOpPerformance() { + if (opPerformanceBuilder_ == null) { + opPerformance_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + opPerformanceBuilder_.clear(); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public Builder removeOpPerformance(int index) { + if (opPerformanceBuilder_ == null) { + ensureOpPerformanceIsMutable(); + opPerformance_.remove(index); + onChanged(); + } else { + opPerformanceBuilder_.remove(index); + } + return this; + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public org.tensorflow.proto.framework.OpPerformance.Builder getOpPerformanceBuilder( + int index) { + return getOpPerformanceFieldBuilder().getBuilder(index); + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public org.tensorflow.proto.framework.OpPerformanceOrBuilder getOpPerformanceOrBuilder( + int index) { + if (opPerformanceBuilder_ == null) { + return opPerformance_.get(index); } else { + return opPerformanceBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public java.util.List + getOpPerformanceOrBuilderList() { + if (opPerformanceBuilder_ != null) { + return opPerformanceBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(opPerformance_); + } + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public org.tensorflow.proto.framework.OpPerformance.Builder addOpPerformanceBuilder() { + return getOpPerformanceFieldBuilder().addBuilder( + org.tensorflow.proto.framework.OpPerformance.getDefaultInstance()); + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public org.tensorflow.proto.framework.OpPerformance.Builder addOpPerformanceBuilder( + int index) { + return getOpPerformanceFieldBuilder().addBuilder( + index, org.tensorflow.proto.framework.OpPerformance.getDefaultInstance()); + } + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + public java.util.List + getOpPerformanceBuilderList() { + return getOpPerformanceFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpPerformance, org.tensorflow.proto.framework.OpPerformance.Builder, org.tensorflow.proto.framework.OpPerformanceOrBuilder> + getOpPerformanceFieldBuilder() { + if (opPerformanceBuilder_ == null) { + opPerformanceBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.OpPerformance, org.tensorflow.proto.framework.OpPerformance.Builder, org.tensorflow.proto.framework.OpPerformanceOrBuilder>( + opPerformance_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + opPerformance_ = null; + } + return opPerformanceBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.OpPerformanceList) + } + + // @@protoc_insertion_point(class_scope:tensorflow.OpPerformanceList) + private static final org.tensorflow.proto.framework.OpPerformanceList DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.OpPerformanceList(); + } + + public static org.tensorflow.proto.framework.OpPerformanceList getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OpPerformanceList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OpPerformanceList(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.OpPerformanceList getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceListOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceListOrBuilder.java new file mode 100644 index 00000000000..9944ba70599 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceListOrBuilder.java @@ -0,0 +1,33 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/grappler/costs/op_performance_data.proto + +package org.tensorflow.proto.framework; + +public interface OpPerformanceListOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.OpPerformanceList) + com.google.protobuf.MessageOrBuilder { + + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + java.util.List + getOpPerformanceList(); + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + org.tensorflow.proto.framework.OpPerformance getOpPerformance(int index); + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + int getOpPerformanceCount(); + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + java.util.List + getOpPerformanceOrBuilderList(); + /** + * repeated .tensorflow.OpPerformance op_performance = 1; + */ + org.tensorflow.proto.framework.OpPerformanceOrBuilder getOpPerformanceOrBuilder( + int index); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceOrBuilder.java new file mode 100644 index 00000000000..513d2706c18 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OpPerformanceOrBuilder.java @@ -0,0 +1,174 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/grappler/costs/op_performance_data.proto + +package org.tensorflow.proto.framework; + +public interface OpPerformanceOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.OpPerformance) + com.google.protobuf.MessageOrBuilder { + + /** + *
      +   * The op
      +   * 
      + * + * .tensorflow.OpInfo op = 1; + */ + boolean hasOp(); + /** + *
      +   * The op
      +   * 
      + * + * .tensorflow.OpInfo op = 1; + */ + org.tensorflow.proto.framework.OpInfo getOp(); + /** + *
      +   * The op
      +   * 
      + * + * .tensorflow.OpInfo op = 1; + */ + org.tensorflow.proto.framework.OpInfoOrBuilder getOpOrBuilder(); + + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated boolean hasSessionInfo(); + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated org.tensorflow.proto.framework.SessionInfo getSessionInfo(); + /** + *
      +   * Information about the session configs.
      +   * 
      + * + * .tensorflow.SessionInfo session_info = 12 [deprecated = true]; + */ + @java.lang.Deprecated org.tensorflow.proto.framework.SessionInfoOrBuilder getSessionInfoOrBuilder(); + + /** + *
      +   * The node name (optional). Makes it easier to associate the performance data
      +   * with a specific graph node.
      +   * 
      + * + * string node = 5; + */ + java.lang.String getNode(); + /** + *
      +   * The node name (optional). Makes it easier to associate the performance data
      +   * with a specific graph node.
      +   * 
      + * + * string node = 5; + */ + com.google.protobuf.ByteString + getNodeBytes(); + + /** + *
      +   * Temporary memory used by this node (in bytes).
      +   * 
      + * + * int64 temporary_memory_size = 2; + */ + long getTemporaryMemorySize(); + + /** + *
      +   * Time it takes to run the op (in nanoseconds).
      +   * 
      + * + * int64 compute_cost = 3; + */ + long getComputeCost(); + + /** + *
      +   * Analytical compute cost (in nanoseconds).
      +   * 
      + * + * int64 compute_time = 6; + */ + long getComputeTime(); + + /** + *
      +   * Analytical memory access cost (in nanoseconds).
      +   * 
      + * + * int64 memory_time = 7; + */ + long getMemoryTime(); + + /** + *
      +   * Percentage of theoretical compute performance.
      +   * 
      + * + * double compute_efficiency = 4; + */ + double getComputeEfficiency(); + + /** + *
      +   * Percentage of theoretical memory performance.
      +   * 
      + * + * double memory_efficiency = 8; + */ + double getMemoryEfficiency(); + + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + boolean hasExecutionTimeNormal(); + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + org.tensorflow.proto.framework.NormalDistribution getExecutionTimeNormal(); + /** + * .tensorflow.NormalDistribution execution_time_normal = 10; + */ + org.tensorflow.proto.framework.NormalDistributionOrBuilder getExecutionTimeNormalOrBuilder(); + + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + boolean hasExecutionTimeLogNormal(); + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + org.tensorflow.proto.framework.LogNormalDistribution getExecutionTimeLogNormal(); + /** + * .tensorflow.LogNormalDistribution execution_time_log_normal = 11; + */ + org.tensorflow.proto.framework.LogNormalDistributionOrBuilder getExecutionTimeLogNormalOrBuilder(); + + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + boolean hasOpMemory(); + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + org.tensorflow.proto.framework.OpPerformance.OpMemory getOpMemory(); + /** + * .tensorflow.OpPerformance.OpMemory op_memory = 9; + */ + org.tensorflow.proto.framework.OpPerformance.OpMemoryOrBuilder getOpMemoryOrBuilder(); + + public org.tensorflow.proto.framework.OpPerformance.ExecutionTimeCase getExecutionTimeCase(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfo.java deleted file mode 100644 index bc2e6840de2..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfo.java +++ /dev/null @@ -1,3007 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/framework/remote_fused_graph_execute_info.proto - -package org.tensorflow.proto.framework; - -/** - *
      - * Protocol buffer representing a handle to a tensorflow resource. Handles are
      - * not valid across executions, but can be serialized back and forth from within
      - * a single run.
      - * 
      - * - * Protobuf type {@code tensorflow.RemoteFusedGraphExecuteInfo} - */ -public final class RemoteFusedGraphExecuteInfo extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:tensorflow.RemoteFusedGraphExecuteInfo) - RemoteFusedGraphExecuteInfoOrBuilder { -private static final long serialVersionUID = 0L; - // Use RemoteFusedGraphExecuteInfo.newBuilder() to construct. - private RemoteFusedGraphExecuteInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private RemoteFusedGraphExecuteInfo() { - graphInputNodeName_ = com.google.protobuf.LazyStringArrayList.EMPTY; - graphOutputNodeName_ = com.google.protobuf.LazyStringArrayList.EMPTY; - executorName_ = ""; - serializedExecutorParameters_ = com.google.protobuf.ByteString.EMPTY; - defaultGraphInputTensorShape_ = java.util.Collections.emptyList(); - defaultGraphOutputTensorShape_ = java.util.Collections.emptyList(); - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new RemoteFusedGraphExecuteInfo(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RemoteFusedGraphExecuteInfo( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - org.tensorflow.proto.framework.GraphDef.Builder subBuilder = null; - if (remoteGraph_ != null) { - subBuilder = remoteGraph_.toBuilder(); - } - remoteGraph_ = input.readMessage(org.tensorflow.proto.framework.GraphDef.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(remoteGraph_); - remoteGraph_ = subBuilder.buildPartial(); - } - - break; - } - case 18: { - java.lang.String s = input.readStringRequireUtf8(); - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - graphInputNodeName_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000001; - } - graphInputNodeName_.add(s); - break; - } - case 26: { - java.lang.String s = input.readStringRequireUtf8(); - if (!((mutable_bitField0_ & 0x00000002) != 0)) { - graphOutputNodeName_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000002; - } - graphOutputNodeName_.add(s); - break; - } - case 34: { - java.lang.String s = input.readStringRequireUtf8(); - - executorName_ = s; - break; - } - case 42: { - - serializedExecutorParameters_ = input.readBytes(); - break; - } - case 50: { - if (!((mutable_bitField0_ & 0x00000004) != 0)) { - defaultGraphInputTensorShape_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - defaultGraphInputTensorShape_.add( - input.readMessage(org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.parser(), extensionRegistry)); - break; - } - case 58: { - if (!((mutable_bitField0_ & 0x00000008) != 0)) { - defaultGraphOutputTensorShape_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; - } - defaultGraphOutputTensorShape_.add( - input.readMessage(org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.parser(), extensionRegistry)); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { - graphInputNodeName_ = graphInputNodeName_.getUnmodifiableView(); - } - if (((mutable_bitField0_ & 0x00000002) != 0)) { - graphOutputNodeName_ = graphOutputNodeName_.getUnmodifiableView(); - } - if (((mutable_bitField0_ & 0x00000004) != 0)) { - defaultGraphInputTensorShape_ = java.util.Collections.unmodifiableList(defaultGraphInputTensorShape_); - } - if (((mutable_bitField0_ & 0x00000008) != 0)) { - defaultGraphOutputTensorShape_ = java.util.Collections.unmodifiableList(defaultGraphOutputTensorShape_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.class, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.Builder.class); - } - - public interface TensorShapeTypeProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto) - com.google.protobuf.MessageOrBuilder { - - /** - * .tensorflow.DataType dtype = 1; - */ - int getDtypeValue(); - /** - * .tensorflow.DataType dtype = 1; - */ - org.tensorflow.proto.framework.DataType getDtype(); - - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - boolean hasShape(); - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - org.tensorflow.proto.framework.TensorShapeProto getShape(); - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - org.tensorflow.proto.framework.TensorShapeProtoOrBuilder getShapeOrBuilder(); - } - /** - * Protobuf type {@code tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto} - */ - public static final class TensorShapeTypeProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto) - TensorShapeTypeProtoOrBuilder { - private static final long serialVersionUID = 0L; - // Use TensorShapeTypeProto.newBuilder() to construct. - private TensorShapeTypeProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private TensorShapeTypeProto() { - dtype_ = 0; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new TensorShapeTypeProto(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private TensorShapeTypeProto( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 8: { - int rawValue = input.readEnum(); - - dtype_ = rawValue; - break; - } - case 18: { - org.tensorflow.proto.framework.TensorShapeProto.Builder subBuilder = null; - if (shape_ != null) { - subBuilder = shape_.toBuilder(); - } - shape_ = input.readMessage(org.tensorflow.proto.framework.TensorShapeProto.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(shape_); - shape_ = subBuilder.buildPartial(); - } - - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.class, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder.class); - } - - public static final int DTYPE_FIELD_NUMBER = 1; - private int dtype_; - /** - * .tensorflow.DataType dtype = 1; - */ - public int getDtypeValue() { - return dtype_; - } - /** - * .tensorflow.DataType dtype = 1; - */ - public org.tensorflow.proto.framework.DataType getDtype() { - @SuppressWarnings("deprecation") - org.tensorflow.proto.framework.DataType result = org.tensorflow.proto.framework.DataType.valueOf(dtype_); - return result == null ? org.tensorflow.proto.framework.DataType.UNRECOGNIZED : result; - } - - public static final int SHAPE_FIELD_NUMBER = 2; - private org.tensorflow.proto.framework.TensorShapeProto shape_; - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public boolean hasShape() { - return shape_ != null; - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public org.tensorflow.proto.framework.TensorShapeProto getShape() { - return shape_ == null ? org.tensorflow.proto.framework.TensorShapeProto.getDefaultInstance() : shape_; - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public org.tensorflow.proto.framework.TensorShapeProtoOrBuilder getShapeOrBuilder() { - return getShape(); - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (dtype_ != org.tensorflow.proto.framework.DataType.DT_INVALID.getNumber()) { - output.writeEnum(1, dtype_); - } - if (shape_ != null) { - output.writeMessage(2, getShape()); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (dtype_ != org.tensorflow.proto.framework.DataType.DT_INVALID.getNumber()) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, dtype_); - } - if (shape_ != null) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, getShape()); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto)) { - return super.equals(obj); - } - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto other = (org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto) obj; - - if (dtype_ != other.dtype_) return false; - if (hasShape() != other.hasShape()) return false; - if (hasShape()) { - if (!getShape() - .equals(other.getShape())) return false; - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + DTYPE_FIELD_NUMBER; - hash = (53 * hash) + dtype_; - if (hasShape()) { - hash = (37 * hash) + SHAPE_FIELD_NUMBER; - hash = (53 * hash) + getShape().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto) - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.class, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder.class); - } - - // Construct using org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - dtype_ = 0; - - if (shapeBuilder_ == null) { - shape_ = null; - } else { - shape_ = null; - shapeBuilder_ = null; - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_descriptor; - } - - @java.lang.Override - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultInstanceForType() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.getDefaultInstance(); - } - - @java.lang.Override - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto build() { - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto buildPartial() { - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto result = new org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto(this); - result.dtype_ = dtype_; - if (shapeBuilder_ == null) { - result.shape_ = shape_; - } else { - result.shape_ = shapeBuilder_.build(); - } - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto) { - return mergeFrom((org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto other) { - if (other == org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.getDefaultInstance()) return this; - if (other.dtype_ != 0) { - setDtypeValue(other.getDtypeValue()); - } - if (other.hasShape()) { - mergeShape(other.getShape()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private int dtype_ = 0; - /** - * .tensorflow.DataType dtype = 1; - */ - public int getDtypeValue() { - return dtype_; - } - /** - * .tensorflow.DataType dtype = 1; - */ - public Builder setDtypeValue(int value) { - dtype_ = value; - onChanged(); - return this; - } - /** - * .tensorflow.DataType dtype = 1; - */ - public org.tensorflow.proto.framework.DataType getDtype() { - @SuppressWarnings("deprecation") - org.tensorflow.proto.framework.DataType result = org.tensorflow.proto.framework.DataType.valueOf(dtype_); - return result == null ? org.tensorflow.proto.framework.DataType.UNRECOGNIZED : result; - } - /** - * .tensorflow.DataType dtype = 1; - */ - public Builder setDtype(org.tensorflow.proto.framework.DataType value) { - if (value == null) { - throw new NullPointerException(); - } - - dtype_ = value.getNumber(); - onChanged(); - return this; - } - /** - * .tensorflow.DataType dtype = 1; - */ - public Builder clearDtype() { - - dtype_ = 0; - onChanged(); - return this; - } - - private org.tensorflow.proto.framework.TensorShapeProto shape_; - private com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.framework.TensorShapeProto, org.tensorflow.proto.framework.TensorShapeProto.Builder, org.tensorflow.proto.framework.TensorShapeProtoOrBuilder> shapeBuilder_; - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public boolean hasShape() { - return shapeBuilder_ != null || shape_ != null; - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public org.tensorflow.proto.framework.TensorShapeProto getShape() { - if (shapeBuilder_ == null) { - return shape_ == null ? org.tensorflow.proto.framework.TensorShapeProto.getDefaultInstance() : shape_; - } else { - return shapeBuilder_.getMessage(); - } - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public Builder setShape(org.tensorflow.proto.framework.TensorShapeProto value) { - if (shapeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - shape_ = value; - onChanged(); - } else { - shapeBuilder_.setMessage(value); - } - - return this; - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public Builder setShape( - org.tensorflow.proto.framework.TensorShapeProto.Builder builderForValue) { - if (shapeBuilder_ == null) { - shape_ = builderForValue.build(); - onChanged(); - } else { - shapeBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public Builder mergeShape(org.tensorflow.proto.framework.TensorShapeProto value) { - if (shapeBuilder_ == null) { - if (shape_ != null) { - shape_ = - org.tensorflow.proto.framework.TensorShapeProto.newBuilder(shape_).mergeFrom(value).buildPartial(); - } else { - shape_ = value; - } - onChanged(); - } else { - shapeBuilder_.mergeFrom(value); - } - - return this; - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public Builder clearShape() { - if (shapeBuilder_ == null) { - shape_ = null; - onChanged(); - } else { - shape_ = null; - shapeBuilder_ = null; - } - - return this; - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public org.tensorflow.proto.framework.TensorShapeProto.Builder getShapeBuilder() { - - onChanged(); - return getShapeFieldBuilder().getBuilder(); - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - public org.tensorflow.proto.framework.TensorShapeProtoOrBuilder getShapeOrBuilder() { - if (shapeBuilder_ != null) { - return shapeBuilder_.getMessageOrBuilder(); - } else { - return shape_ == null ? - org.tensorflow.proto.framework.TensorShapeProto.getDefaultInstance() : shape_; - } - } - /** - * .tensorflow.TensorShapeProto shape = 2; - */ - private com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.framework.TensorShapeProto, org.tensorflow.proto.framework.TensorShapeProto.Builder, org.tensorflow.proto.framework.TensorShapeProtoOrBuilder> - getShapeFieldBuilder() { - if (shapeBuilder_ == null) { - shapeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.framework.TensorShapeProto, org.tensorflow.proto.framework.TensorShapeProto.Builder, org.tensorflow.proto.framework.TensorShapeProtoOrBuilder>( - getShape(), - getParentForChildren(), - isClean()); - shape_ = null; - } - return shapeBuilder_; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto) - } - - // @@protoc_insertion_point(class_scope:tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto) - private static final org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto(); - } - - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public TensorShapeTypeProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TensorShapeTypeProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public static final int REMOTE_GRAPH_FIELD_NUMBER = 1; - private org.tensorflow.proto.framework.GraphDef remoteGraph_; - /** - *
      -   * Definition of remote graph
      -   * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public boolean hasRemoteGraph() { - return remoteGraph_ != null; - } - /** - *
      -   * Definition of remote graph
      -   * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public org.tensorflow.proto.framework.GraphDef getRemoteGraph() { - return remoteGraph_ == null ? org.tensorflow.proto.framework.GraphDef.getDefaultInstance() : remoteGraph_; - } - /** - *
      -   * Definition of remote graph
      -   * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public org.tensorflow.proto.framework.GraphDefOrBuilder getRemoteGraphOrBuilder() { - return getRemoteGraph(); - } - - public static final int GRAPH_INPUT_NODE_NAME_FIELD_NUMBER = 2; - private com.google.protobuf.LazyStringList graphInputNodeName_; - /** - *
      -   * Remote fused graph input node name
      -   * 
      - * - * repeated string graph_input_node_name = 2; - */ - public com.google.protobuf.ProtocolStringList - getGraphInputNodeNameList() { - return graphInputNodeName_; - } - /** - *
      -   * Remote fused graph input node name
      -   * 
      - * - * repeated string graph_input_node_name = 2; - */ - public int getGraphInputNodeNameCount() { - return graphInputNodeName_.size(); - } - /** - *
      -   * Remote fused graph input node name
      -   * 
      - * - * repeated string graph_input_node_name = 2; - */ - public java.lang.String getGraphInputNodeName(int index) { - return graphInputNodeName_.get(index); - } - /** - *
      -   * Remote fused graph input node name
      -   * 
      - * - * repeated string graph_input_node_name = 2; - */ - public com.google.protobuf.ByteString - getGraphInputNodeNameBytes(int index) { - return graphInputNodeName_.getByteString(index); - } - - public static final int GRAPH_OUTPUT_NODE_NAME_FIELD_NUMBER = 3; - private com.google.protobuf.LazyStringList graphOutputNodeName_; - /** - *
      -   * Remote fused graph output node name
      -   * 
      - * - * repeated string graph_output_node_name = 3; - */ - public com.google.protobuf.ProtocolStringList - getGraphOutputNodeNameList() { - return graphOutputNodeName_; - } - /** - *
      -   * Remote fused graph output node name
      -   * 
      - * - * repeated string graph_output_node_name = 3; - */ - public int getGraphOutputNodeNameCount() { - return graphOutputNodeName_.size(); - } - /** - *
      -   * Remote fused graph output node name
      -   * 
      - * - * repeated string graph_output_node_name = 3; - */ - public java.lang.String getGraphOutputNodeName(int index) { - return graphOutputNodeName_.get(index); - } - /** - *
      -   * Remote fused graph output node name
      -   * 
      - * - * repeated string graph_output_node_name = 3; - */ - public com.google.protobuf.ByteString - getGraphOutputNodeNameBytes(int index) { - return graphOutputNodeName_.getByteString(index); - } - - public static final int EXECUTOR_NAME_FIELD_NUMBER = 4; - private volatile java.lang.Object executorName_; - /** - *
      -   * Executor's name
      -   * 
      - * - * string executor_name = 4; - */ - public java.lang.String getExecutorName() { - java.lang.Object ref = executorName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - executorName_ = s; - return s; - } - } - /** - *
      -   * Executor's name
      -   * 
      - * - * string executor_name = 4; - */ - public com.google.protobuf.ByteString - getExecutorNameBytes() { - java.lang.Object ref = executorName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - executorName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int SERIALIZED_EXECUTOR_PARAMETERS_FIELD_NUMBER = 5; - private com.google.protobuf.ByteString serializedExecutorParameters_; - /** - *
      -   * Optional: Parameters given to the executor
      -   * 
      - * - * bytes serialized_executor_parameters = 5; - */ - public com.google.protobuf.ByteString getSerializedExecutorParameters() { - return serializedExecutorParameters_; - } - - public static final int DEFAULT_GRAPH_INPUT_TENSOR_SHAPE_FIELD_NUMBER = 6; - private java.util.List defaultGraphInputTensorShape_; - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public java.util.List getDefaultGraphInputTensorShapeList() { - return defaultGraphInputTensorShape_; - } - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public java.util.List - getDefaultGraphInputTensorShapeOrBuilderList() { - return defaultGraphInputTensorShape_; - } - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public int getDefaultGraphInputTensorShapeCount() { - return defaultGraphInputTensorShape_.size(); - } - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultGraphInputTensorShape(int index) { - return defaultGraphInputTensorShape_.get(index); - } - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder getDefaultGraphInputTensorShapeOrBuilder( - int index) { - return defaultGraphInputTensorShape_.get(index); - } - - public static final int DEFAULT_GRAPH_OUTPUT_TENSOR_SHAPE_FIELD_NUMBER = 7; - private java.util.List defaultGraphOutputTensorShape_; - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public java.util.List getDefaultGraphOutputTensorShapeList() { - return defaultGraphOutputTensorShape_; - } - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public java.util.List - getDefaultGraphOutputTensorShapeOrBuilderList() { - return defaultGraphOutputTensorShape_; - } - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public int getDefaultGraphOutputTensorShapeCount() { - return defaultGraphOutputTensorShape_.size(); - } - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultGraphOutputTensorShape(int index) { - return defaultGraphOutputTensorShape_.get(index); - } - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder getDefaultGraphOutputTensorShapeOrBuilder( - int index) { - return defaultGraphOutputTensorShape_.get(index); - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (remoteGraph_ != null) { - output.writeMessage(1, getRemoteGraph()); - } - for (int i = 0; i < graphInputNodeName_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, graphInputNodeName_.getRaw(i)); - } - for (int i = 0; i < graphOutputNodeName_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 3, graphOutputNodeName_.getRaw(i)); - } - if (!getExecutorNameBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 4, executorName_); - } - if (!serializedExecutorParameters_.isEmpty()) { - output.writeBytes(5, serializedExecutorParameters_); - } - for (int i = 0; i < defaultGraphInputTensorShape_.size(); i++) { - output.writeMessage(6, defaultGraphInputTensorShape_.get(i)); - } - for (int i = 0; i < defaultGraphOutputTensorShape_.size(); i++) { - output.writeMessage(7, defaultGraphOutputTensorShape_.get(i)); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (remoteGraph_ != null) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getRemoteGraph()); - } - { - int dataSize = 0; - for (int i = 0; i < graphInputNodeName_.size(); i++) { - dataSize += computeStringSizeNoTag(graphInputNodeName_.getRaw(i)); - } - size += dataSize; - size += 1 * getGraphInputNodeNameList().size(); - } - { - int dataSize = 0; - for (int i = 0; i < graphOutputNodeName_.size(); i++) { - dataSize += computeStringSizeNoTag(graphOutputNodeName_.getRaw(i)); - } - size += dataSize; - size += 1 * getGraphOutputNodeNameList().size(); - } - if (!getExecutorNameBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, executorName_); - } - if (!serializedExecutorParameters_.isEmpty()) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, serializedExecutorParameters_); - } - for (int i = 0; i < defaultGraphInputTensorShape_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, defaultGraphInputTensorShape_.get(i)); - } - for (int i = 0; i < defaultGraphOutputTensorShape_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, defaultGraphOutputTensorShape_.get(i)); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo)) { - return super.equals(obj); - } - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo other = (org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo) obj; - - if (hasRemoteGraph() != other.hasRemoteGraph()) return false; - if (hasRemoteGraph()) { - if (!getRemoteGraph() - .equals(other.getRemoteGraph())) return false; - } - if (!getGraphInputNodeNameList() - .equals(other.getGraphInputNodeNameList())) return false; - if (!getGraphOutputNodeNameList() - .equals(other.getGraphOutputNodeNameList())) return false; - if (!getExecutorName() - .equals(other.getExecutorName())) return false; - if (!getSerializedExecutorParameters() - .equals(other.getSerializedExecutorParameters())) return false; - if (!getDefaultGraphInputTensorShapeList() - .equals(other.getDefaultGraphInputTensorShapeList())) return false; - if (!getDefaultGraphOutputTensorShapeList() - .equals(other.getDefaultGraphOutputTensorShapeList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasRemoteGraph()) { - hash = (37 * hash) + REMOTE_GRAPH_FIELD_NUMBER; - hash = (53 * hash) + getRemoteGraph().hashCode(); - } - if (getGraphInputNodeNameCount() > 0) { - hash = (37 * hash) + GRAPH_INPUT_NODE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getGraphInputNodeNameList().hashCode(); - } - if (getGraphOutputNodeNameCount() > 0) { - hash = (37 * hash) + GRAPH_OUTPUT_NODE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getGraphOutputNodeNameList().hashCode(); - } - hash = (37 * hash) + EXECUTOR_NAME_FIELD_NUMBER; - hash = (53 * hash) + getExecutorName().hashCode(); - hash = (37 * hash) + SERIALIZED_EXECUTOR_PARAMETERS_FIELD_NUMBER; - hash = (53 * hash) + getSerializedExecutorParameters().hashCode(); - if (getDefaultGraphInputTensorShapeCount() > 0) { - hash = (37 * hash) + DEFAULT_GRAPH_INPUT_TENSOR_SHAPE_FIELD_NUMBER; - hash = (53 * hash) + getDefaultGraphInputTensorShapeList().hashCode(); - } - if (getDefaultGraphOutputTensorShapeCount() > 0) { - hash = (37 * hash) + DEFAULT_GRAPH_OUTPUT_TENSOR_SHAPE_FIELD_NUMBER; - hash = (53 * hash) + getDefaultGraphOutputTensorShapeList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - *
      -   * Protocol buffer representing a handle to a tensorflow resource. Handles are
      -   * not valid across executions, but can be serialized back and forth from within
      -   * a single run.
      -   * 
      - * - * Protobuf type {@code tensorflow.RemoteFusedGraphExecuteInfo} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:tensorflow.RemoteFusedGraphExecuteInfo) - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.class, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.Builder.class); - } - - // Construct using org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - getDefaultGraphInputTensorShapeFieldBuilder(); - getDefaultGraphOutputTensorShapeFieldBuilder(); - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - if (remoteGraphBuilder_ == null) { - remoteGraph_ = null; - } else { - remoteGraph_ = null; - remoteGraphBuilder_ = null; - } - graphInputNodeName_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - graphOutputNodeName_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - executorName_ = ""; - - serializedExecutorParameters_ = com.google.protobuf.ByteString.EMPTY; - - if (defaultGraphInputTensorShapeBuilder_ == null) { - defaultGraphInputTensorShape_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - defaultGraphInputTensorShapeBuilder_.clear(); - } - if (defaultGraphOutputTensorShapeBuilder_ == null) { - defaultGraphOutputTensorShape_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - defaultGraphOutputTensorShapeBuilder_.clear(); - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfoProto.internal_static_tensorflow_RemoteFusedGraphExecuteInfo_descriptor; - } - - @java.lang.Override - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo getDefaultInstanceForType() { - return org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.getDefaultInstance(); - } - - @java.lang.Override - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo build() { - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo buildPartial() { - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo result = new org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo(this); - int from_bitField0_ = bitField0_; - if (remoteGraphBuilder_ == null) { - result.remoteGraph_ = remoteGraph_; - } else { - result.remoteGraph_ = remoteGraphBuilder_.build(); - } - if (((bitField0_ & 0x00000001) != 0)) { - graphInputNodeName_ = graphInputNodeName_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.graphInputNodeName_ = graphInputNodeName_; - if (((bitField0_ & 0x00000002) != 0)) { - graphOutputNodeName_ = graphOutputNodeName_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.graphOutputNodeName_ = graphOutputNodeName_; - result.executorName_ = executorName_; - result.serializedExecutorParameters_ = serializedExecutorParameters_; - if (defaultGraphInputTensorShapeBuilder_ == null) { - if (((bitField0_ & 0x00000004) != 0)) { - defaultGraphInputTensorShape_ = java.util.Collections.unmodifiableList(defaultGraphInputTensorShape_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.defaultGraphInputTensorShape_ = defaultGraphInputTensorShape_; - } else { - result.defaultGraphInputTensorShape_ = defaultGraphInputTensorShapeBuilder_.build(); - } - if (defaultGraphOutputTensorShapeBuilder_ == null) { - if (((bitField0_ & 0x00000008) != 0)) { - defaultGraphOutputTensorShape_ = java.util.Collections.unmodifiableList(defaultGraphOutputTensorShape_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.defaultGraphOutputTensorShape_ = defaultGraphOutputTensorShape_; - } else { - result.defaultGraphOutputTensorShape_ = defaultGraphOutputTensorShapeBuilder_.build(); - } - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo) { - return mergeFrom((org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo other) { - if (other == org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.getDefaultInstance()) return this; - if (other.hasRemoteGraph()) { - mergeRemoteGraph(other.getRemoteGraph()); - } - if (!other.graphInputNodeName_.isEmpty()) { - if (graphInputNodeName_.isEmpty()) { - graphInputNodeName_ = other.graphInputNodeName_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureGraphInputNodeNameIsMutable(); - graphInputNodeName_.addAll(other.graphInputNodeName_); - } - onChanged(); - } - if (!other.graphOutputNodeName_.isEmpty()) { - if (graphOutputNodeName_.isEmpty()) { - graphOutputNodeName_ = other.graphOutputNodeName_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureGraphOutputNodeNameIsMutable(); - graphOutputNodeName_.addAll(other.graphOutputNodeName_); - } - onChanged(); - } - if (!other.getExecutorName().isEmpty()) { - executorName_ = other.executorName_; - onChanged(); - } - if (other.getSerializedExecutorParameters() != com.google.protobuf.ByteString.EMPTY) { - setSerializedExecutorParameters(other.getSerializedExecutorParameters()); - } - if (defaultGraphInputTensorShapeBuilder_ == null) { - if (!other.defaultGraphInputTensorShape_.isEmpty()) { - if (defaultGraphInputTensorShape_.isEmpty()) { - defaultGraphInputTensorShape_ = other.defaultGraphInputTensorShape_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureDefaultGraphInputTensorShapeIsMutable(); - defaultGraphInputTensorShape_.addAll(other.defaultGraphInputTensorShape_); - } - onChanged(); - } - } else { - if (!other.defaultGraphInputTensorShape_.isEmpty()) { - if (defaultGraphInputTensorShapeBuilder_.isEmpty()) { - defaultGraphInputTensorShapeBuilder_.dispose(); - defaultGraphInputTensorShapeBuilder_ = null; - defaultGraphInputTensorShape_ = other.defaultGraphInputTensorShape_; - bitField0_ = (bitField0_ & ~0x00000004); - defaultGraphInputTensorShapeBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getDefaultGraphInputTensorShapeFieldBuilder() : null; - } else { - defaultGraphInputTensorShapeBuilder_.addAllMessages(other.defaultGraphInputTensorShape_); - } - } - } - if (defaultGraphOutputTensorShapeBuilder_ == null) { - if (!other.defaultGraphOutputTensorShape_.isEmpty()) { - if (defaultGraphOutputTensorShape_.isEmpty()) { - defaultGraphOutputTensorShape_ = other.defaultGraphOutputTensorShape_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureDefaultGraphOutputTensorShapeIsMutable(); - defaultGraphOutputTensorShape_.addAll(other.defaultGraphOutputTensorShape_); - } - onChanged(); - } - } else { - if (!other.defaultGraphOutputTensorShape_.isEmpty()) { - if (defaultGraphOutputTensorShapeBuilder_.isEmpty()) { - defaultGraphOutputTensorShapeBuilder_.dispose(); - defaultGraphOutputTensorShapeBuilder_ = null; - defaultGraphOutputTensorShape_ = other.defaultGraphOutputTensorShape_; - bitField0_ = (bitField0_ & ~0x00000008); - defaultGraphOutputTensorShapeBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getDefaultGraphOutputTensorShapeFieldBuilder() : null; - } else { - defaultGraphOutputTensorShapeBuilder_.addAllMessages(other.defaultGraphOutputTensorShape_); - } - } - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private org.tensorflow.proto.framework.GraphDef remoteGraph_; - private com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.framework.GraphDef, org.tensorflow.proto.framework.GraphDef.Builder, org.tensorflow.proto.framework.GraphDefOrBuilder> remoteGraphBuilder_; - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public boolean hasRemoteGraph() { - return remoteGraphBuilder_ != null || remoteGraph_ != null; - } - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public org.tensorflow.proto.framework.GraphDef getRemoteGraph() { - if (remoteGraphBuilder_ == null) { - return remoteGraph_ == null ? org.tensorflow.proto.framework.GraphDef.getDefaultInstance() : remoteGraph_; - } else { - return remoteGraphBuilder_.getMessage(); - } - } - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public Builder setRemoteGraph(org.tensorflow.proto.framework.GraphDef value) { - if (remoteGraphBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - remoteGraph_ = value; - onChanged(); - } else { - remoteGraphBuilder_.setMessage(value); - } - - return this; - } - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public Builder setRemoteGraph( - org.tensorflow.proto.framework.GraphDef.Builder builderForValue) { - if (remoteGraphBuilder_ == null) { - remoteGraph_ = builderForValue.build(); - onChanged(); - } else { - remoteGraphBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public Builder mergeRemoteGraph(org.tensorflow.proto.framework.GraphDef value) { - if (remoteGraphBuilder_ == null) { - if (remoteGraph_ != null) { - remoteGraph_ = - org.tensorflow.proto.framework.GraphDef.newBuilder(remoteGraph_).mergeFrom(value).buildPartial(); - } else { - remoteGraph_ = value; - } - onChanged(); - } else { - remoteGraphBuilder_.mergeFrom(value); - } - - return this; - } - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public Builder clearRemoteGraph() { - if (remoteGraphBuilder_ == null) { - remoteGraph_ = null; - onChanged(); - } else { - remoteGraph_ = null; - remoteGraphBuilder_ = null; - } - - return this; - } - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public org.tensorflow.proto.framework.GraphDef.Builder getRemoteGraphBuilder() { - - onChanged(); - return getRemoteGraphFieldBuilder().getBuilder(); - } - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - public org.tensorflow.proto.framework.GraphDefOrBuilder getRemoteGraphOrBuilder() { - if (remoteGraphBuilder_ != null) { - return remoteGraphBuilder_.getMessageOrBuilder(); - } else { - return remoteGraph_ == null ? - org.tensorflow.proto.framework.GraphDef.getDefaultInstance() : remoteGraph_; - } - } - /** - *
      -     * Definition of remote graph
      -     * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - private com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.framework.GraphDef, org.tensorflow.proto.framework.GraphDef.Builder, org.tensorflow.proto.framework.GraphDefOrBuilder> - getRemoteGraphFieldBuilder() { - if (remoteGraphBuilder_ == null) { - remoteGraphBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.framework.GraphDef, org.tensorflow.proto.framework.GraphDef.Builder, org.tensorflow.proto.framework.GraphDefOrBuilder>( - getRemoteGraph(), - getParentForChildren(), - isClean()); - remoteGraph_ = null; - } - return remoteGraphBuilder_; - } - - private com.google.protobuf.LazyStringList graphInputNodeName_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureGraphInputNodeNameIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { - graphInputNodeName_ = new com.google.protobuf.LazyStringArrayList(graphInputNodeName_); - bitField0_ |= 0x00000001; - } - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public com.google.protobuf.ProtocolStringList - getGraphInputNodeNameList() { - return graphInputNodeName_.getUnmodifiableView(); - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public int getGraphInputNodeNameCount() { - return graphInputNodeName_.size(); - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public java.lang.String getGraphInputNodeName(int index) { - return graphInputNodeName_.get(index); - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public com.google.protobuf.ByteString - getGraphInputNodeNameBytes(int index) { - return graphInputNodeName_.getByteString(index); - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public Builder setGraphInputNodeName( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureGraphInputNodeNameIsMutable(); - graphInputNodeName_.set(index, value); - onChanged(); - return this; - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public Builder addGraphInputNodeName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureGraphInputNodeNameIsMutable(); - graphInputNodeName_.add(value); - onChanged(); - return this; - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public Builder addAllGraphInputNodeName( - java.lang.Iterable values) { - ensureGraphInputNodeNameIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, graphInputNodeName_); - onChanged(); - return this; - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public Builder clearGraphInputNodeName() { - graphInputNodeName_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - /** - *
      -     * Remote fused graph input node name
      -     * 
      - * - * repeated string graph_input_node_name = 2; - */ - public Builder addGraphInputNodeNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - ensureGraphInputNodeNameIsMutable(); - graphInputNodeName_.add(value); - onChanged(); - return this; - } - - private com.google.protobuf.LazyStringList graphOutputNodeName_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureGraphOutputNodeNameIsMutable() { - if (!((bitField0_ & 0x00000002) != 0)) { - graphOutputNodeName_ = new com.google.protobuf.LazyStringArrayList(graphOutputNodeName_); - bitField0_ |= 0x00000002; - } - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public com.google.protobuf.ProtocolStringList - getGraphOutputNodeNameList() { - return graphOutputNodeName_.getUnmodifiableView(); - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public int getGraphOutputNodeNameCount() { - return graphOutputNodeName_.size(); - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public java.lang.String getGraphOutputNodeName(int index) { - return graphOutputNodeName_.get(index); - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public com.google.protobuf.ByteString - getGraphOutputNodeNameBytes(int index) { - return graphOutputNodeName_.getByteString(index); - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public Builder setGraphOutputNodeName( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureGraphOutputNodeNameIsMutable(); - graphOutputNodeName_.set(index, value); - onChanged(); - return this; - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public Builder addGraphOutputNodeName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureGraphOutputNodeNameIsMutable(); - graphOutputNodeName_.add(value); - onChanged(); - return this; - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public Builder addAllGraphOutputNodeName( - java.lang.Iterable values) { - ensureGraphOutputNodeNameIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, graphOutputNodeName_); - onChanged(); - return this; - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public Builder clearGraphOutputNodeName() { - graphOutputNodeName_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - return this; - } - /** - *
      -     * Remote fused graph output node name
      -     * 
      - * - * repeated string graph_output_node_name = 3; - */ - public Builder addGraphOutputNodeNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - ensureGraphOutputNodeNameIsMutable(); - graphOutputNodeName_.add(value); - onChanged(); - return this; - } - - private java.lang.Object executorName_ = ""; - /** - *
      -     * Executor's name
      -     * 
      - * - * string executor_name = 4; - */ - public java.lang.String getExecutorName() { - java.lang.Object ref = executorName_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - executorName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
      -     * Executor's name
      -     * 
      - * - * string executor_name = 4; - */ - public com.google.protobuf.ByteString - getExecutorNameBytes() { - java.lang.Object ref = executorName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - executorName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
      -     * Executor's name
      -     * 
      - * - * string executor_name = 4; - */ - public Builder setExecutorName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - executorName_ = value; - onChanged(); - return this; - } - /** - *
      -     * Executor's name
      -     * 
      - * - * string executor_name = 4; - */ - public Builder clearExecutorName() { - - executorName_ = getDefaultInstance().getExecutorName(); - onChanged(); - return this; - } - /** - *
      -     * Executor's name
      -     * 
      - * - * string executor_name = 4; - */ - public Builder setExecutorNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - executorName_ = value; - onChanged(); - return this; - } - - private com.google.protobuf.ByteString serializedExecutorParameters_ = com.google.protobuf.ByteString.EMPTY; - /** - *
      -     * Optional: Parameters given to the executor
      -     * 
      - * - * bytes serialized_executor_parameters = 5; - */ - public com.google.protobuf.ByteString getSerializedExecutorParameters() { - return serializedExecutorParameters_; - } - /** - *
      -     * Optional: Parameters given to the executor
      -     * 
      - * - * bytes serialized_executor_parameters = 5; - */ - public Builder setSerializedExecutorParameters(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - - serializedExecutorParameters_ = value; - onChanged(); - return this; - } - /** - *
      -     * Optional: Parameters given to the executor
      -     * 
      - * - * bytes serialized_executor_parameters = 5; - */ - public Builder clearSerializedExecutorParameters() { - - serializedExecutorParameters_ = getDefaultInstance().getSerializedExecutorParameters(); - onChanged(); - return this; - } - - private java.util.List defaultGraphInputTensorShape_ = - java.util.Collections.emptyList(); - private void ensureDefaultGraphInputTensorShapeIsMutable() { - if (!((bitField0_ & 0x00000004) != 0)) { - defaultGraphInputTensorShape_ = new java.util.ArrayList(defaultGraphInputTensorShape_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder> defaultGraphInputTensorShapeBuilder_; - - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public java.util.List getDefaultGraphInputTensorShapeList() { - if (defaultGraphInputTensorShapeBuilder_ == null) { - return java.util.Collections.unmodifiableList(defaultGraphInputTensorShape_); - } else { - return defaultGraphInputTensorShapeBuilder_.getMessageList(); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public int getDefaultGraphInputTensorShapeCount() { - if (defaultGraphInputTensorShapeBuilder_ == null) { - return defaultGraphInputTensorShape_.size(); - } else { - return defaultGraphInputTensorShapeBuilder_.getCount(); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultGraphInputTensorShape(int index) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - return defaultGraphInputTensorShape_.get(index); - } else { - return defaultGraphInputTensorShapeBuilder_.getMessage(index); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder setDefaultGraphInputTensorShape( - int index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto value) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDefaultGraphInputTensorShapeIsMutable(); - defaultGraphInputTensorShape_.set(index, value); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.setMessage(index, value); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder setDefaultGraphInputTensorShape( - int index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder builderForValue) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - ensureDefaultGraphInputTensorShapeIsMutable(); - defaultGraphInputTensorShape_.set(index, builderForValue.build()); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder addDefaultGraphInputTensorShape(org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto value) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDefaultGraphInputTensorShapeIsMutable(); - defaultGraphInputTensorShape_.add(value); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.addMessage(value); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder addDefaultGraphInputTensorShape( - int index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto value) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDefaultGraphInputTensorShapeIsMutable(); - defaultGraphInputTensorShape_.add(index, value); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.addMessage(index, value); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder addDefaultGraphInputTensorShape( - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder builderForValue) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - ensureDefaultGraphInputTensorShapeIsMutable(); - defaultGraphInputTensorShape_.add(builderForValue.build()); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder addDefaultGraphInputTensorShape( - int index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder builderForValue) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - ensureDefaultGraphInputTensorShapeIsMutable(); - defaultGraphInputTensorShape_.add(index, builderForValue.build()); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder addAllDefaultGraphInputTensorShape( - java.lang.Iterable values) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - ensureDefaultGraphInputTensorShapeIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, defaultGraphInputTensorShape_); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.addAllMessages(values); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder clearDefaultGraphInputTensorShape() { - if (defaultGraphInputTensorShapeBuilder_ == null) { - defaultGraphInputTensorShape_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.clear(); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public Builder removeDefaultGraphInputTensorShape(int index) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - ensureDefaultGraphInputTensorShapeIsMutable(); - defaultGraphInputTensorShape_.remove(index); - onChanged(); - } else { - defaultGraphInputTensorShapeBuilder_.remove(index); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder getDefaultGraphInputTensorShapeBuilder( - int index) { - return getDefaultGraphInputTensorShapeFieldBuilder().getBuilder(index); - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder getDefaultGraphInputTensorShapeOrBuilder( - int index) { - if (defaultGraphInputTensorShapeBuilder_ == null) { - return defaultGraphInputTensorShape_.get(index); } else { - return defaultGraphInputTensorShapeBuilder_.getMessageOrBuilder(index); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public java.util.List - getDefaultGraphInputTensorShapeOrBuilderList() { - if (defaultGraphInputTensorShapeBuilder_ != null) { - return defaultGraphInputTensorShapeBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(defaultGraphInputTensorShape_); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder addDefaultGraphInputTensorShapeBuilder() { - return getDefaultGraphInputTensorShapeFieldBuilder().addBuilder( - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.getDefaultInstance()); - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder addDefaultGraphInputTensorShapeBuilder( - int index) { - return getDefaultGraphInputTensorShapeFieldBuilder().addBuilder( - index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.getDefaultInstance()); - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - public java.util.List - getDefaultGraphInputTensorShapeBuilderList() { - return getDefaultGraphInputTensorShapeFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder> - getDefaultGraphInputTensorShapeFieldBuilder() { - if (defaultGraphInputTensorShapeBuilder_ == null) { - defaultGraphInputTensorShapeBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder>( - defaultGraphInputTensorShape_, - ((bitField0_ & 0x00000004) != 0), - getParentForChildren(), - isClean()); - defaultGraphInputTensorShape_ = null; - } - return defaultGraphInputTensorShapeBuilder_; - } - - private java.util.List defaultGraphOutputTensorShape_ = - java.util.Collections.emptyList(); - private void ensureDefaultGraphOutputTensorShapeIsMutable() { - if (!((bitField0_ & 0x00000008) != 0)) { - defaultGraphOutputTensorShape_ = new java.util.ArrayList(defaultGraphOutputTensorShape_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder> defaultGraphOutputTensorShapeBuilder_; - - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public java.util.List getDefaultGraphOutputTensorShapeList() { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - return java.util.Collections.unmodifiableList(defaultGraphOutputTensorShape_); - } else { - return defaultGraphOutputTensorShapeBuilder_.getMessageList(); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public int getDefaultGraphOutputTensorShapeCount() { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - return defaultGraphOutputTensorShape_.size(); - } else { - return defaultGraphOutputTensorShapeBuilder_.getCount(); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultGraphOutputTensorShape(int index) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - return defaultGraphOutputTensorShape_.get(index); - } else { - return defaultGraphOutputTensorShapeBuilder_.getMessage(index); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder setDefaultGraphOutputTensorShape( - int index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto value) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDefaultGraphOutputTensorShapeIsMutable(); - defaultGraphOutputTensorShape_.set(index, value); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.setMessage(index, value); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder setDefaultGraphOutputTensorShape( - int index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder builderForValue) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - ensureDefaultGraphOutputTensorShapeIsMutable(); - defaultGraphOutputTensorShape_.set(index, builderForValue.build()); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder addDefaultGraphOutputTensorShape(org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto value) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDefaultGraphOutputTensorShapeIsMutable(); - defaultGraphOutputTensorShape_.add(value); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.addMessage(value); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder addDefaultGraphOutputTensorShape( - int index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto value) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDefaultGraphOutputTensorShapeIsMutable(); - defaultGraphOutputTensorShape_.add(index, value); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.addMessage(index, value); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder addDefaultGraphOutputTensorShape( - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder builderForValue) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - ensureDefaultGraphOutputTensorShapeIsMutable(); - defaultGraphOutputTensorShape_.add(builderForValue.build()); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder addDefaultGraphOutputTensorShape( - int index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder builderForValue) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - ensureDefaultGraphOutputTensorShapeIsMutable(); - defaultGraphOutputTensorShape_.add(index, builderForValue.build()); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder addAllDefaultGraphOutputTensorShape( - java.lang.Iterable values) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - ensureDefaultGraphOutputTensorShapeIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, defaultGraphOutputTensorShape_); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.addAllMessages(values); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder clearDefaultGraphOutputTensorShape() { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - defaultGraphOutputTensorShape_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.clear(); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public Builder removeDefaultGraphOutputTensorShape(int index) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - ensureDefaultGraphOutputTensorShapeIsMutable(); - defaultGraphOutputTensorShape_.remove(index); - onChanged(); - } else { - defaultGraphOutputTensorShapeBuilder_.remove(index); - } - return this; - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder getDefaultGraphOutputTensorShapeBuilder( - int index) { - return getDefaultGraphOutputTensorShapeFieldBuilder().getBuilder(index); - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder getDefaultGraphOutputTensorShapeOrBuilder( - int index) { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - return defaultGraphOutputTensorShape_.get(index); } else { - return defaultGraphOutputTensorShapeBuilder_.getMessageOrBuilder(index); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public java.util.List - getDefaultGraphOutputTensorShapeOrBuilderList() { - if (defaultGraphOutputTensorShapeBuilder_ != null) { - return defaultGraphOutputTensorShapeBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(defaultGraphOutputTensorShape_); - } - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder addDefaultGraphOutputTensorShapeBuilder() { - return getDefaultGraphOutputTensorShapeFieldBuilder().addBuilder( - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.getDefaultInstance()); - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder addDefaultGraphOutputTensorShapeBuilder( - int index) { - return getDefaultGraphOutputTensorShapeFieldBuilder().addBuilder( - index, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.getDefaultInstance()); - } - /** - *
      -     * Optional: Default graph input tensor shape used to allocate memory
      -     * before executing op
      -     * TODO(satok): Remote output tensor shape once shape information is stored
      -     * in NodeDef
      -     * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - public java.util.List - getDefaultGraphOutputTensorShapeBuilderList() { - return getDefaultGraphOutputTensorShapeFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder> - getDefaultGraphOutputTensorShapeFieldBuilder() { - if (defaultGraphOutputTensorShapeBuilder_ == null) { - defaultGraphOutputTensorShapeBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto.Builder, org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder>( - defaultGraphOutputTensorShape_, - ((bitField0_ & 0x00000008) != 0), - getParentForChildren(), - isClean()); - defaultGraphOutputTensorShape_ = null; - } - return defaultGraphOutputTensorShapeBuilder_; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:tensorflow.RemoteFusedGraphExecuteInfo) - } - - // @@protoc_insertion_point(class_scope:tensorflow.RemoteFusedGraphExecuteInfo) - private static final org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo(); - } - - public static org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public RemoteFusedGraphExecuteInfo parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RemoteFusedGraphExecuteInfo(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - -} - diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfoOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfoOrBuilder.java deleted file mode 100644 index 6e14ad8a2e0..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfoOrBuilder.java +++ /dev/null @@ -1,239 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/framework/remote_fused_graph_execute_info.proto - -package org.tensorflow.proto.framework; - -public interface RemoteFusedGraphExecuteInfoOrBuilder extends - // @@protoc_insertion_point(interface_extends:tensorflow.RemoteFusedGraphExecuteInfo) - com.google.protobuf.MessageOrBuilder { - - /** - *
      -   * Definition of remote graph
      -   * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - boolean hasRemoteGraph(); - /** - *
      -   * Definition of remote graph
      -   * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - org.tensorflow.proto.framework.GraphDef getRemoteGraph(); - /** - *
      -   * Definition of remote graph
      -   * 
      - * - * .tensorflow.GraphDef remote_graph = 1; - */ - org.tensorflow.proto.framework.GraphDefOrBuilder getRemoteGraphOrBuilder(); - - /** - *
      -   * Remote fused graph input node name
      -   * 
      - * - * repeated string graph_input_node_name = 2; - */ - java.util.List - getGraphInputNodeNameList(); - /** - *
      -   * Remote fused graph input node name
      -   * 
      - * - * repeated string graph_input_node_name = 2; - */ - int getGraphInputNodeNameCount(); - /** - *
      -   * Remote fused graph input node name
      -   * 
      - * - * repeated string graph_input_node_name = 2; - */ - java.lang.String getGraphInputNodeName(int index); - /** - *
      -   * Remote fused graph input node name
      -   * 
      - * - * repeated string graph_input_node_name = 2; - */ - com.google.protobuf.ByteString - getGraphInputNodeNameBytes(int index); - - /** - *
      -   * Remote fused graph output node name
      -   * 
      - * - * repeated string graph_output_node_name = 3; - */ - java.util.List - getGraphOutputNodeNameList(); - /** - *
      -   * Remote fused graph output node name
      -   * 
      - * - * repeated string graph_output_node_name = 3; - */ - int getGraphOutputNodeNameCount(); - /** - *
      -   * Remote fused graph output node name
      -   * 
      - * - * repeated string graph_output_node_name = 3; - */ - java.lang.String getGraphOutputNodeName(int index); - /** - *
      -   * Remote fused graph output node name
      -   * 
      - * - * repeated string graph_output_node_name = 3; - */ - com.google.protobuf.ByteString - getGraphOutputNodeNameBytes(int index); - - /** - *
      -   * Executor's name
      -   * 
      - * - * string executor_name = 4; - */ - java.lang.String getExecutorName(); - /** - *
      -   * Executor's name
      -   * 
      - * - * string executor_name = 4; - */ - com.google.protobuf.ByteString - getExecutorNameBytes(); - - /** - *
      -   * Optional: Parameters given to the executor
      -   * 
      - * - * bytes serialized_executor_parameters = 5; - */ - com.google.protobuf.ByteString getSerializedExecutorParameters(); - - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - java.util.List - getDefaultGraphInputTensorShapeList(); - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultGraphInputTensorShape(int index); - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - int getDefaultGraphInputTensorShapeCount(); - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - java.util.List - getDefaultGraphInputTensorShapeOrBuilderList(); - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_input_tensor_shape = 6; - */ - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder getDefaultGraphInputTensorShapeOrBuilder( - int index); - - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - java.util.List - getDefaultGraphOutputTensorShapeList(); - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto getDefaultGraphOutputTensorShape(int index); - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - int getDefaultGraphOutputTensorShapeCount(); - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - java.util.List - getDefaultGraphOutputTensorShapeOrBuilderList(); - /** - *
      -   * Optional: Default graph input tensor shape used to allocate memory
      -   * before executing op
      -   * TODO(satok): Remote output tensor shape once shape information is stored
      -   * in NodeDef
      -   * 
      - * - * repeated .tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto default_graph_output_tensor_shape = 7; - */ - org.tensorflow.proto.framework.RemoteFusedGraphExecuteInfo.TensorShapeTypeProtoOrBuilder getDefaultGraphOutputTensorShapeOrBuilder( - int index); -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfoProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfoProto.java deleted file mode 100644 index 95aeddaf025..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RemoteFusedGraphExecuteInfoProto.java +++ /dev/null @@ -1,85 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tensorflow/core/framework/remote_fused_graph_execute_info.proto - -package org.tensorflow.proto.framework; - -public final class RemoteFusedGraphExecuteInfoProto { - private RemoteFusedGraphExecuteInfoProto() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistryLite registry) { - } - - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions( - (com.google.protobuf.ExtensionRegistryLite) registry); - } - static final com.google.protobuf.Descriptors.Descriptor - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_descriptor; - static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_fieldAccessorTable; - static final com.google.protobuf.Descriptors.Descriptor - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_descriptor; - static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n?tensorflow/core/framework/remote_fused" + - "_graph_execute_info.proto\022\ntensorflow\032%t" + - "ensorflow/core/framework/graph.proto\032,te" + - "nsorflow/core/framework/tensor_shape.pro" + - "to\032%tensorflow/core/framework/types.prot" + - "o\"\202\004\n\033RemoteFusedGraphExecuteInfo\022*\n\014rem" + - "ote_graph\030\001 \001(\0132\024.tensorflow.GraphDef\022\035\n" + - "\025graph_input_node_name\030\002 \003(\t\022\036\n\026graph_ou" + - "tput_node_name\030\003 \003(\t\022\025\n\rexecutor_name\030\004 " + - "\001(\t\022&\n\036serialized_executor_parameters\030\005 " + - "\001(\014\022f\n default_graph_input_tensor_shape\030" + - "\006 \003(\0132<.tensorflow.RemoteFusedGraphExecu" + - "teInfo.TensorShapeTypeProto\022g\n!default_g" + - "raph_output_tensor_shape\030\007 \003(\0132<.tensorf" + - "low.RemoteFusedGraphExecuteInfo.TensorSh" + - "apeTypeProto\032h\n\024TensorShapeTypeProto\022#\n\005" + - "dtype\030\001 \001(\0162\024.tensorflow.DataType\022+\n\005sha" + - "pe\030\002 \001(\0132\034.tensorflow.TensorShapeProtoB\257" + - "\001\n\036org.tensorflow.proto.frameworkB Remot" + - "eFusedGraphExecuteInfoProtoP\001Zfgithub.co" + - "m/tensorflow/tensorflow/tensorflow/go/co" + - "re/framework/remote_fused_graph_execute_" + - "info_go_proto\370\001\001b\006proto3" - }; - descriptor = com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.tensorflow.proto.framework.GraphProtos.getDescriptor(), - org.tensorflow.proto.framework.TensorShapeProtos.getDescriptor(), - org.tensorflow.proto.framework.TypesProtos.getDescriptor(), - }); - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_descriptor, - new java.lang.String[] { "RemoteGraph", "GraphInputNodeName", "GraphOutputNodeName", "ExecutorName", "SerializedExecutorParameters", "DefaultGraphInputTensorShape", "DefaultGraphOutputTensorShape", }); - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_descriptor = - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_descriptor.getNestedTypes().get(0); - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_tensorflow_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto_descriptor, - new java.lang.String[] { "Dtype", "Shape", }); - org.tensorflow.proto.framework.GraphProtos.getDescriptor(); - org.tensorflow.proto.framework.TensorShapeProtos.getDescriptor(); - org.tensorflow.proto.framework.TypesProtos.getDescriptor(); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfig.java index 9665f55231c..c914aee785c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfig.java @@ -37,6 +37,7 @@ private RewriterConfig() { implementationSelector_ = 0; autoMixedPrecision_ = 0; autoMixedPrecisionMkl_ = 0; + usePluginOptimizers_ = 0; metaOptimizerIterations_ = 0; memoryOptimization_ = 0; memoryOptimizerTargetNodeNameScope_ = ""; @@ -239,6 +240,17 @@ private RewriterConfig( experimentalDisableCompressedTensorOptimization_ = input.readBool(); break; } + case 216: { + + experimentalDisableFoldingQuantizationEmulation_ = input.readBool(); + break; + } + case 224: { + int rawValue = input.readEnum(); + + usePluginOptimizers_ = rawValue; + break; + } case 400: { int rawValue = input.readEnum(); @@ -2212,6 +2224,31 @@ public boolean getDisableMetaOptimizer() { return disableMetaOptimizer_; } + public static final int USE_PLUGIN_OPTIMIZERS_FIELD_NUMBER = 28; + private int usePluginOptimizers_; + /** + *
      +   * Optimizers registered by plugin (default is ON)
      +   * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + public int getUsePluginOptimizersValue() { + return usePluginOptimizers_; + } + /** + *
      +   * Optimizers registered by plugin (default is ON)
      +   * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + public org.tensorflow.proto.framework.RewriterConfig.Toggle getUsePluginOptimizers() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.framework.RewriterConfig.Toggle result = org.tensorflow.proto.framework.RewriterConfig.Toggle.valueOf(usePluginOptimizers_); + return result == null ? org.tensorflow.proto.framework.RewriterConfig.Toggle.UNRECOGNIZED : result; + } + public static final int META_OPTIMIZER_ITERATIONS_FIELD_NUMBER = 12; private int metaOptimizerIterations_; /** @@ -2269,6 +2306,24 @@ public boolean getExperimentalDisableCompressedTensorOptimization() { return experimentalDisableCompressedTensorOptimization_; } + public static final int EXPERIMENTAL_DISABLE_FOLDING_QUANTIZATION_EMULATION_FIELD_NUMBER = 27; + private boolean experimentalDisableFoldingQuantizationEmulation_; + /** + *
      +   * Disable folding quantization emulation ops such as FakeQuantWithMinMax* and
      +   * QuantizeAndDequantize*. Some compilers (e.g. the TF-to-tflite converter)
      +   * have to extract quantization configs (e.g. min/max range, number of bits,
      +   * and per-channel) from the quantization emulation ops. Note that this flag
      +   * is experimental and may be removed in the future. See b/174138564 for more
      +   * details.
      +   * 
      + * + * bool experimental_disable_folding_quantization_emulation = 27; + */ + public boolean getExperimentalDisableFoldingQuantizationEmulation() { + return experimentalDisableFoldingQuantizationEmulation_; + } + public static final int MEMORY_OPTIMIZATION_FIELD_NUMBER = 4; private int memoryOptimization_; /** @@ -2738,6 +2793,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (experimentalDisableCompressedTensorOptimization_ != false) { output.writeBool(26, experimentalDisableCompressedTensorOptimization_); } + if (experimentalDisableFoldingQuantizationEmulation_ != false) { + output.writeBool(27, experimentalDisableFoldingQuantizationEmulation_); + } + if (usePluginOptimizers_ != org.tensorflow.proto.framework.RewriterConfig.Toggle.DEFAULT.getNumber()) { + output.writeEnum(28, usePluginOptimizers_); + } if (cpuLayoutConversion_ != org.tensorflow.proto.framework.RewriterConfig.CpuLayout.NO_CONVERSION_ON_CPU.getNumber()) { output.writeEnum(50, cpuLayoutConversion_); } @@ -2865,6 +2926,14 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeBoolSize(26, experimentalDisableCompressedTensorOptimization_); } + if (experimentalDisableFoldingQuantizationEmulation_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(27, experimentalDisableFoldingQuantizationEmulation_); + } + if (usePluginOptimizers_ != org.tensorflow.proto.framework.RewriterConfig.Toggle.DEFAULT.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(28, usePluginOptimizers_); + } if (cpuLayoutConversion_ != org.tensorflow.proto.framework.RewriterConfig.CpuLayout.NO_CONVERSION_ON_CPU.getNumber()) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(50, cpuLayoutConversion_); @@ -2924,11 +2993,14 @@ public boolean equals(final java.lang.Object obj) { if (autoMixedPrecisionMkl_ != other.autoMixedPrecisionMkl_) return false; if (getDisableMetaOptimizer() != other.getDisableMetaOptimizer()) return false; + if (usePluginOptimizers_ != other.usePluginOptimizers_) return false; if (metaOptimizerIterations_ != other.metaOptimizerIterations_) return false; if (getMinGraphNodes() != other.getMinGraphNodes()) return false; if (getExperimentalDisableCompressedTensorOptimization() != other.getExperimentalDisableCompressedTensorOptimization()) return false; + if (getExperimentalDisableFoldingQuantizationEmulation() + != other.getExperimentalDisableFoldingQuantizationEmulation()) return false; if (memoryOptimization_ != other.memoryOptimization_) return false; if (!getMemoryOptimizerTargetNodeNameScope() .equals(other.getMemoryOptimizerTargetNodeNameScope())) return false; @@ -3009,6 +3081,8 @@ public int hashCode() { hash = (37 * hash) + DISABLE_META_OPTIMIZER_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getDisableMetaOptimizer()); + hash = (37 * hash) + USE_PLUGIN_OPTIMIZERS_FIELD_NUMBER; + hash = (53 * hash) + usePluginOptimizers_; hash = (37 * hash) + META_OPTIMIZER_ITERATIONS_FIELD_NUMBER; hash = (53 * hash) + metaOptimizerIterations_; hash = (37 * hash) + MIN_GRAPH_NODES_FIELD_NUMBER; @@ -3016,6 +3090,9 @@ public int hashCode() { hash = (37 * hash) + EXPERIMENTAL_DISABLE_COMPRESSED_TENSOR_OPTIMIZATION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getExperimentalDisableCompressedTensorOptimization()); + hash = (37 * hash) + EXPERIMENTAL_DISABLE_FOLDING_QUANTIZATION_EMULATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getExperimentalDisableFoldingQuantizationEmulation()); hash = (37 * hash) + MEMORY_OPTIMIZATION_FIELD_NUMBER; hash = (53 * hash) + memoryOptimization_; hash = (37 * hash) + MEMORY_OPTIMIZER_TARGET_NODE_NAME_SCOPE_FIELD_NUMBER; @@ -3225,12 +3302,16 @@ public Builder clear() { disableMetaOptimizer_ = false; + usePluginOptimizers_ = 0; + metaOptimizerIterations_ = 0; minGraphNodes_ = 0; experimentalDisableCompressedTensorOptimization_ = false; + experimentalDisableFoldingQuantizationEmulation_ = false; + memoryOptimization_ = 0; memoryOptimizerTargetNodeNameScope_ = ""; @@ -3316,9 +3397,11 @@ public org.tensorflow.proto.framework.RewriterConfig buildPartial() { result.autoMixedPrecision_ = autoMixedPrecision_; result.autoMixedPrecisionMkl_ = autoMixedPrecisionMkl_; result.disableMetaOptimizer_ = disableMetaOptimizer_; + result.usePluginOptimizers_ = usePluginOptimizers_; result.metaOptimizerIterations_ = metaOptimizerIterations_; result.minGraphNodes_ = minGraphNodes_; result.experimentalDisableCompressedTensorOptimization_ = experimentalDisableCompressedTensorOptimization_; + result.experimentalDisableFoldingQuantizationEmulation_ = experimentalDisableFoldingQuantizationEmulation_; result.memoryOptimization_ = memoryOptimization_; result.memoryOptimizerTargetNodeNameScope_ = memoryOptimizerTargetNodeNameScope_; result.metaOptimizerTimeoutMs_ = metaOptimizerTimeoutMs_; @@ -3459,6 +3542,9 @@ public Builder mergeFrom(org.tensorflow.proto.framework.RewriterConfig other) { if (other.getDisableMetaOptimizer() != false) { setDisableMetaOptimizer(other.getDisableMetaOptimizer()); } + if (other.usePluginOptimizers_ != 0) { + setUsePluginOptimizersValue(other.getUsePluginOptimizersValue()); + } if (other.metaOptimizerIterations_ != 0) { setMetaOptimizerIterationsValue(other.getMetaOptimizerIterationsValue()); } @@ -3468,6 +3554,9 @@ public Builder mergeFrom(org.tensorflow.proto.framework.RewriterConfig other) { if (other.getExperimentalDisableCompressedTensorOptimization() != false) { setExperimentalDisableCompressedTensorOptimization(other.getExperimentalDisableCompressedTensorOptimization()); } + if (other.getExperimentalDisableFoldingQuantizationEmulation() != false) { + setExperimentalDisableFoldingQuantizationEmulation(other.getExperimentalDisableFoldingQuantizationEmulation()); + } if (other.memoryOptimization_ != 0) { setMemoryOptimizationValue(other.getMemoryOptimizationValue()); } @@ -4750,6 +4839,71 @@ public Builder clearDisableMetaOptimizer() { return this; } + private int usePluginOptimizers_ = 0; + /** + *
      +     * Optimizers registered by plugin (default is ON)
      +     * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + public int getUsePluginOptimizersValue() { + return usePluginOptimizers_; + } + /** + *
      +     * Optimizers registered by plugin (default is ON)
      +     * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + public Builder setUsePluginOptimizersValue(int value) { + usePluginOptimizers_ = value; + onChanged(); + return this; + } + /** + *
      +     * Optimizers registered by plugin (default is ON)
      +     * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + public org.tensorflow.proto.framework.RewriterConfig.Toggle getUsePluginOptimizers() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.framework.RewriterConfig.Toggle result = org.tensorflow.proto.framework.RewriterConfig.Toggle.valueOf(usePluginOptimizers_); + return result == null ? org.tensorflow.proto.framework.RewriterConfig.Toggle.UNRECOGNIZED : result; + } + /** + *
      +     * Optimizers registered by plugin (default is ON)
      +     * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + public Builder setUsePluginOptimizers(org.tensorflow.proto.framework.RewriterConfig.Toggle value) { + if (value == null) { + throw new NullPointerException(); + } + + usePluginOptimizers_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
      +     * Optimizers registered by plugin (default is ON)
      +     * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + public Builder clearUsePluginOptimizers() { + + usePluginOptimizers_ = 0; + onChanged(); + return this; + } + private int metaOptimizerIterations_ = 0; /** *
      @@ -4908,6 +5062,59 @@ public Builder clearExperimentalDisableCompressedTensorOptimization() {
             return this;
           }
       
      +    private boolean experimentalDisableFoldingQuantizationEmulation_ ;
      +    /**
      +     * 
      +     * Disable folding quantization emulation ops such as FakeQuantWithMinMax* and
      +     * QuantizeAndDequantize*. Some compilers (e.g. the TF-to-tflite converter)
      +     * have to extract quantization configs (e.g. min/max range, number of bits,
      +     * and per-channel) from the quantization emulation ops. Note that this flag
      +     * is experimental and may be removed in the future. See b/174138564 for more
      +     * details.
      +     * 
      + * + * bool experimental_disable_folding_quantization_emulation = 27; + */ + public boolean getExperimentalDisableFoldingQuantizationEmulation() { + return experimentalDisableFoldingQuantizationEmulation_; + } + /** + *
      +     * Disable folding quantization emulation ops such as FakeQuantWithMinMax* and
      +     * QuantizeAndDequantize*. Some compilers (e.g. the TF-to-tflite converter)
      +     * have to extract quantization configs (e.g. min/max range, number of bits,
      +     * and per-channel) from the quantization emulation ops. Note that this flag
      +     * is experimental and may be removed in the future. See b/174138564 for more
      +     * details.
      +     * 
      + * + * bool experimental_disable_folding_quantization_emulation = 27; + */ + public Builder setExperimentalDisableFoldingQuantizationEmulation(boolean value) { + + experimentalDisableFoldingQuantizationEmulation_ = value; + onChanged(); + return this; + } + /** + *
      +     * Disable folding quantization emulation ops such as FakeQuantWithMinMax* and
      +     * QuantizeAndDequantize*. Some compilers (e.g. the TF-to-tflite converter)
      +     * have to extract quantization configs (e.g. min/max range, number of bits,
      +     * and per-channel) from the quantization emulation ops. Note that this flag
      +     * is experimental and may be removed in the future. See b/174138564 for more
      +     * details.
      +     * 
      + * + * bool experimental_disable_folding_quantization_emulation = 27; + */ + public Builder clearExperimentalDisableFoldingQuantizationEmulation() { + + experimentalDisableFoldingQuantizationEmulation_ = false; + onChanged(); + return this; + } + private int memoryOptimization_ = 0; /** *
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigOrBuilder.java
      index baad17a9738..30d25886bd3 100644
      --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigOrBuilder.java
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigOrBuilder.java
      @@ -327,6 +327,23 @@ public interface RewriterConfigOrBuilder extends
          */
         boolean getDisableMetaOptimizer();
       
      +  /**
      +   * 
      +   * Optimizers registered by plugin (default is ON)
      +   * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + int getUsePluginOptimizersValue(); + /** + *
      +   * Optimizers registered by plugin (default is ON)
      +   * 
      + * + * .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; + */ + org.tensorflow.proto.framework.RewriterConfig.Toggle getUsePluginOptimizers(); + /** *
          * Controls how many times we run the optimizers in meta optimizer (default
      @@ -368,6 +385,20 @@ public interface RewriterConfigOrBuilder extends
          */
         boolean getExperimentalDisableCompressedTensorOptimization();
       
      +  /**
      +   * 
      +   * Disable folding quantization emulation ops such as FakeQuantWithMinMax* and
      +   * QuantizeAndDequantize*. Some compilers (e.g. the TF-to-tflite converter)
      +   * have to extract quantization configs (e.g. min/max range, number of bits,
      +   * and per-channel) from the quantization emulation ops. Note that this flag
      +   * is experimental and may be removed in the future. See b/174138564 for more
      +   * details.
      +   * 
      + * + * bool experimental_disable_folding_quantization_emulation = 27; + */ + boolean getExperimentalDisableFoldingQuantizationEmulation(); + /** *
          * Configures memory optimization passes through the meta-optimizer. Has no
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigProtos.java
      index fd75fc78d94..513dd4d850d 100644
      --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigProtos.java
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigProtos.java
      @@ -54,7 +54,7 @@ public static void registerAllExtensions(
             "e/protobuf/verifier_config.proto\";\n\023Auto" +
             "ParallelOptions\022\016\n\006enable\030\001 \001(\010\022\024\n\014num_r" +
             "eplicas\030\002 \001(\005\"+\n\026ScopedAllocatorOptions\022" +
      -      "\021\n\tenable_op\030\001 \003(\t\"\342\022\n\016RewriterConfig\022C\n" +
      +      "\021\n\tenable_op\030\001 \003(\t\"\341\023\n\016RewriterConfig\022C\n" +
             "\025cpu_layout_conversion\0302 \001(\0162$.tensorflo" +
             "w.RewriterConfig.CpuLayout\022;\n\020layout_opt" +
             "imizer\030\001 \001(\0162!.tensorflow.RewriterConfig" +
      @@ -82,43 +82,46 @@ public static void registerAllExtensions(
             "cision\030\027 \001(\0162!.tensorflow.RewriterConfig" +
             ".Toggle\022C\n\030auto_mixed_precision_mkl\030\031 \001(" +
             "\0162!.tensorflow.RewriterConfig.Toggle\022\036\n\026" +
      -      "disable_meta_optimizer\030\023 \001(\010\022O\n\031meta_opt" +
      -      "imizer_iterations\030\014 \001(\0162,.tensorflow.Rew" +
      -      "riterConfig.NumIterationsType\022\027\n\017min_gra" +
      -      "ph_nodes\030\021 \001(\005\022;\n3experimental_disable_c" +
      -      "ompressed_tensor_optimization\030\032 \001(\010\022B\n\023m" +
      -      "emory_optimization\030\004 \001(\0162%.tensorflow.Re" +
      -      "writerConfig.MemOptType\022/\n\'memory_optimi" +
      -      "zer_target_node_name_scope\030\006 \001(\t\022!\n\031meta" +
      -      "_optimizer_timeout_ms\030\024 \001(\003\0226\n\rauto_para" +
      -      "llel\030\005 \001(\0132\037.tensorflow.AutoParallelOpti" +
      -      "ons\022 \n\030fail_on_optimizer_errors\030\025 \001(\010\022A\n" +
      -      "\025scoped_allocator_opts\030\020 \001(\0132\".tensorflo" +
      -      "w.ScopedAllocatorOptions\022\022\n\noptimizers\030d" +
      -      " \003(\t\022K\n\021custom_optimizers\030\310\001 \003(\0132/.tenso" +
      -      "rflow.RewriterConfig.CustomGraphOptimize" +
      -      "r\022D\n\037inter_optimizer_verifier_config\030\254\002 " +
      -      "\001(\0132\032.tensorflow.VerifierConfig\022F\n!post_" +
      -      "optimization_verifier_config\030\255\002 \001(\0132\032.te" +
      -      "nsorflow.VerifierConfig\032\312\001\n\024CustomGraphO" +
      -      "ptimizer\022\014\n\004name\030\001 \001(\t\022X\n\rparameter_map\030" +
      -      "\002 \003(\0132A.tensorflow.RewriterConfig.Custom" +
      -      "GraphOptimizer.ParameterMapEntry\032J\n\021Para" +
      -      "meterMapEntry\022\013\n\003key\030\001 \001(\t\022$\n\005value\030\002 \001(" +
      -      "\0132\025.tensorflow.AttrValue:\0028\001\"6\n\006Toggle\022\013" +
      -      "\n\007DEFAULT\020\000\022\006\n\002ON\020\001\022\007\n\003OFF\020\002\022\016\n\nAGGRESSI" +
      -      "VE\020\003\"I\n\tCpuLayout\022\030\n\024NO_CONVERSION_ON_CP" +
      -      "U\020\000\022\020\n\014NCHW_TO_NHWC\020\001\022\020\n\014NHWC_TO_NCHW\020\002\"" +
      -      "<\n\021NumIterationsType\022\025\n\021DEFAULT_NUM_ITER" +
      -      "S\020\000\022\007\n\003ONE\020\001\022\007\n\003TWO\020\002\"\237\001\n\nMemOptType\022\023\n\017" +
      -      "DEFAULT_MEM_OPT\020\000\022\016\n\nNO_MEM_OPT\020\001\022\n\n\006MAN" +
      -      "UAL\020\002\022\027\n\023SWAPPING_HEURISTICS\020\004\022\034\n\030RECOMP" +
      -      "UTATION_HEURISTICS\020\005\022\031\n\025SCHEDULING_HEURI" +
      -      "STICS\020\006\022\016\n\nHEURISTICS\020\003B\222\001\n\036org.tensorfl" +
      -      "ow.proto.frameworkB\024RewriterConfigProtos" +
      -      "P\001ZUgithub.com/tensorflow/tensorflow/ten" +
      -      "sorflow/go/core/protobuf/for_core_protos" +
      -      "_go_proto\370\001\001b\006proto3"
      +      "disable_meta_optimizer\030\023 \001(\010\022@\n\025use_plug" +
      +      "in_optimizers\030\034 \001(\0162!.tensorflow.Rewrite" +
      +      "rConfig.Toggle\022O\n\031meta_optimizer_iterati" +
      +      "ons\030\014 \001(\0162,.tensorflow.RewriterConfig.Nu" +
      +      "mIterationsType\022\027\n\017min_graph_nodes\030\021 \001(\005" +
      +      "\022;\n3experimental_disable_compressed_tens" +
      +      "or_optimization\030\032 \001(\010\022;\n3experimental_di" +
      +      "sable_folding_quantization_emulation\030\033 \001" +
      +      "(\010\022B\n\023memory_optimization\030\004 \001(\0162%.tensor" +
      +      "flow.RewriterConfig.MemOptType\022/\n\'memory" +
      +      "_optimizer_target_node_name_scope\030\006 \001(\t\022" +
      +      "!\n\031meta_optimizer_timeout_ms\030\024 \001(\003\0226\n\rau" +
      +      "to_parallel\030\005 \001(\0132\037.tensorflow.AutoParal" +
      +      "lelOptions\022 \n\030fail_on_optimizer_errors\030\025" +
      +      " \001(\010\022A\n\025scoped_allocator_opts\030\020 \001(\0132\".te" +
      +      "nsorflow.ScopedAllocatorOptions\022\022\n\noptim" +
      +      "izers\030d \003(\t\022K\n\021custom_optimizers\030\310\001 \003(\0132" +
      +      "/.tensorflow.RewriterConfig.CustomGraphO" +
      +      "ptimizer\022D\n\037inter_optimizer_verifier_con" +
      +      "fig\030\254\002 \001(\0132\032.tensorflow.VerifierConfig\022F" +
      +      "\n!post_optimization_verifier_config\030\255\002 \001" +
      +      "(\0132\032.tensorflow.VerifierConfig\032\312\001\n\024Custo" +
      +      "mGraphOptimizer\022\014\n\004name\030\001 \001(\t\022X\n\rparamet" +
      +      "er_map\030\002 \003(\0132A.tensorflow.RewriterConfig" +
      +      ".CustomGraphOptimizer.ParameterMapEntry\032" +
      +      "J\n\021ParameterMapEntry\022\013\n\003key\030\001 \001(\t\022$\n\005val" +
      +      "ue\030\002 \001(\0132\025.tensorflow.AttrValue:\0028\001\"6\n\006T" +
      +      "oggle\022\013\n\007DEFAULT\020\000\022\006\n\002ON\020\001\022\007\n\003OFF\020\002\022\016\n\nA" +
      +      "GGRESSIVE\020\003\"I\n\tCpuLayout\022\030\n\024NO_CONVERSIO" +
      +      "N_ON_CPU\020\000\022\020\n\014NCHW_TO_NHWC\020\001\022\020\n\014NHWC_TO_" +
      +      "NCHW\020\002\"<\n\021NumIterationsType\022\025\n\021DEFAULT_N" +
      +      "UM_ITERS\020\000\022\007\n\003ONE\020\001\022\007\n\003TWO\020\002\"\237\001\n\nMemOptT" +
      +      "ype\022\023\n\017DEFAULT_MEM_OPT\020\000\022\016\n\nNO_MEM_OPT\020\001" +
      +      "\022\n\n\006MANUAL\020\002\022\027\n\023SWAPPING_HEURISTICS\020\004\022\034\n" +
      +      "\030RECOMPUTATION_HEURISTICS\020\005\022\031\n\025SCHEDULIN" +
      +      "G_HEURISTICS\020\006\022\016\n\nHEURISTICS\020\003B\222\001\n\036org.t" +
      +      "ensorflow.proto.frameworkB\024RewriterConfi" +
      +      "gProtosP\001ZUgithub.com/tensorflow/tensorf" +
      +      "low/tensorflow/go/core/protobuf/for_core" +
      +      "_protos_go_proto\370\001\001b\006proto3"
           };
           descriptor = com.google.protobuf.Descriptors.FileDescriptor
             .internalBuildGeneratedFileFrom(descriptorData,
      @@ -143,7 +146,7 @@ public static void registerAllExtensions(
           internal_static_tensorflow_RewriterConfig_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
               internal_static_tensorflow_RewriterConfig_descriptor,
      -        new java.lang.String[] { "CpuLayoutConversion", "LayoutOptimizer", "ConstantFolding", "ShapeOptimization", "Remapping", "CommonSubgraphElimination", "ArithmeticOptimization", "DependencyOptimization", "LoopOptimization", "FunctionOptimization", "DebugStripper", "DisableModelPruning", "ScopedAllocatorOptimization", "PinToHostOptimization", "ImplementationSelector", "AutoMixedPrecision", "AutoMixedPrecisionMkl", "DisableMetaOptimizer", "MetaOptimizerIterations", "MinGraphNodes", "ExperimentalDisableCompressedTensorOptimization", "MemoryOptimization", "MemoryOptimizerTargetNodeNameScope", "MetaOptimizerTimeoutMs", "AutoParallel", "FailOnOptimizerErrors", "ScopedAllocatorOpts", "Optimizers", "CustomOptimizers", "InterOptimizerVerifierConfig", "PostOptimizationVerifierConfig", });
      +        new java.lang.String[] { "CpuLayoutConversion", "LayoutOptimizer", "ConstantFolding", "ShapeOptimization", "Remapping", "CommonSubgraphElimination", "ArithmeticOptimization", "DependencyOptimization", "LoopOptimization", "FunctionOptimization", "DebugStripper", "DisableModelPruning", "ScopedAllocatorOptimization", "PinToHostOptimization", "ImplementationSelector", "AutoMixedPrecision", "AutoMixedPrecisionMkl", "DisableMetaOptimizer", "UsePluginOptimizers", "MetaOptimizerIterations", "MinGraphNodes", "ExperimentalDisableCompressedTensorOptimization", "ExperimentalDisableFoldingQuantizationEmulation", "MemoryOptimization", "MemoryOptimizerTargetNodeNameScope", "MetaOptimizerTimeoutMs", "AutoParallel", "FailOnOptimizerErrors", "ScopedAllocatorOpts", "Optimizers", "CustomOptimizers", "InterOptimizerVerifierConfig", "PostOptimizationVerifierConfig", });
           internal_static_tensorflow_RewriterConfig_CustomGraphOptimizer_descriptor =
             internal_static_tensorflow_RewriterConfig_descriptor.getNestedTypes().get(0);
           internal_static_tensorflow_RewriterConfig_CustomGraphOptimizer_fieldAccessorTable = new
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java
      index d38d76b85a1..fc2595a31ce 100644
      --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java
      @@ -284,7 +284,7 @@ public int getNumber() {
          * 
          * Objects which this object depends on: named edges in the dependency
          * graph.
      -   * Note: currently only valid if kind == "user_object".
      +   * Note: currently only valid if kind == "user_object" or "resource".
          * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -296,7 +296,7 @@ public java.util.List * Objects which this object depends on: named edges in the dependency * graph. - * Note: currently only valid if kind == "user_object". + * Note: currently only valid if kind == "user_object" or "resource". *
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -309,7 +309,7 @@ public java.util.List * Objects which this object depends on: named edges in the dependency * graph. - * Note: currently only valid if kind == "user_object". + * Note: currently only valid if kind == "user_object" or "resource". *
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -321,7 +321,7 @@ public int getChildrenCount() { *
          * Objects which this object depends on: named edges in the dependency
          * graph.
      -   * Note: currently only valid if kind == "user_object".
      +   * Note: currently only valid if kind == "user_object" or "resource".
          * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -333,7 +333,7 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec *
          * Objects which this object depends on: named edges in the dependency
          * graph.
      -   * Note: currently only valid if kind == "user_object".
      +   * Note: currently only valid if kind == "user_object" or "resource".
          * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1347,7 +1347,7 @@ private void ensureChildrenIsMutable() { *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1363,7 +1363,7 @@ public java.util.List * Objects which this object depends on: named edges in the dependency * graph. - * Note: currently only valid if kind == "user_object". + * Note: currently only valid if kind == "user_object" or "resource". *
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1379,7 +1379,7 @@ public int getChildrenCount() { *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1395,7 +1395,7 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1418,7 +1418,7 @@ public Builder setChildren( *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1438,7 +1438,7 @@ public Builder setChildren( *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1460,7 +1460,7 @@ public Builder addChildren(org.tensorflow.proto.framework.TrackableObjectGraph.T *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1483,7 +1483,7 @@ public Builder addChildren( *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1503,7 +1503,7 @@ public Builder addChildren( *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1523,7 +1523,7 @@ public Builder addChildren( *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1544,7 +1544,7 @@ public Builder addAllChildren( *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1563,7 +1563,7 @@ public Builder clearChildren() { *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1582,7 +1582,7 @@ public Builder removeChildren(int index) { *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1595,7 +1595,7 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1611,7 +1611,7 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1628,7 +1628,7 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1641,7 +1641,7 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -1655,7 +1655,7 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec *
            * Objects which this object depends on: named edges in the dependency
            * graph.
      -     * Note: currently only valid if kind == "user_object".
      +     * Note: currently only valid if kind == "user_object" or "resource".
            * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java index 9405d43b87d..42f43aee942 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java @@ -149,20 +149,20 @@ public static void registerAllExtensions( "sorflow.VariableAggregation\022\014\n\004name\030\006 \001(" + "\t\022\016\n\006device\030\007 \001(\t\022O\n,experimental_distri" + "buted_variable_components\030\010 \003(\0132\031.tensor" + - "flow.SavedVariable\"\226\002\n\014FunctionSpec\0220\n\013f" + + "flow.SavedVariable\"\373\001\n\014FunctionSpec\0220\n\013f" + "ullargspec\030\001 \001(\0132\033.tensorflow.Structured" + "Value\022\021\n\tis_method\030\002 \001(\010\0224\n\017input_signat" + - "ure\030\005 \001(\0132\033.tensorflow.StructuredValue\022J" + - "\n\024experimental_compile\030\006 \001(\0162,.tensorflo" + - "w.FunctionSpec.ExperimentalCompile\"3\n\023Ex" + - "perimentalCompile\022\013\n\007DEFAULT\020\000\022\006\n\002ON\020\001\022\007" + - "\n\003OFF\020\002J\004\010\003\020\004J\004\010\004\020\005\"\037\n\rSavedResource\022\016\n\006" + - "device\030\001 \001(\t\"A\n\016SaveableObject\022\025\n\rsave_f" + - "unction\030\002 \001(\005\022\030\n\020restore_function\030\003 \001(\005B" + - "\224\001\n\036org.tensorflow.proto.frameworkB\026Save" + - "dObjectGraphProtosP\001ZUgithub.com/tensorf" + - "low/tensorflow/tensorflow/go/core/protob" + - "uf/for_core_protos_go_proto\370\001\001b\006proto3" + "ure\030\005 \001(\0132\033.tensorflow.StructuredValue\0228" + + "\n\013jit_compile\030\006 \001(\0162#.tensorflow.Functio" + + "nSpec.JitCompile\"*\n\nJitCompile\022\013\n\007DEFAUL" + + "T\020\000\022\006\n\002ON\020\001\022\007\n\003OFF\020\002J\004\010\003\020\004J\004\010\004\020\005\"\037\n\rSave" + + "dResource\022\016\n\006device\030\001 \001(\t\"A\n\016SaveableObj" + + "ect\022\025\n\rsave_function\030\002 \001(\005\022\030\n\020restore_fu" + + "nction\030\003 \001(\005B\224\001\n\036org.tensorflow.proto.fr" + + "ameworkB\026SavedObjectGraphProtosP\001ZUgithu" + + "b.com/tensorflow/tensorflow/tensorflow/g" + + "o/core/protobuf/for_core_protos_go_proto" + + "\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -245,7 +245,7 @@ public static void registerAllExtensions( internal_static_tensorflow_FunctionSpec_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_FunctionSpec_descriptor, - new java.lang.String[] { "Fullargspec", "IsMethod", "InputSignature", "ExperimentalCompile", }); + new java.lang.String[] { "Fullargspec", "IsMethod", "InputSignature", "JitCompile", }); internal_static_tensorflow_SavedResource_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_tensorflow_SavedResource_fieldAccessorTable = new diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java index 269ee74c9f2..79283c86bce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java @@ -11,7 +11,7 @@ public interface SavedObjectOrBuilder extends *
          * Objects which this object depends on: named edges in the dependency
          * graph.
      -   * Note: currently only valid if kind == "user_object".
      +   * Note: currently only valid if kind == "user_object" or "resource".
          * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -22,7 +22,7 @@ public interface SavedObjectOrBuilder extends *
          * Objects which this object depends on: named edges in the dependency
          * graph.
      -   * Note: currently only valid if kind == "user_object".
      +   * Note: currently only valid if kind == "user_object" or "resource".
          * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -32,7 +32,7 @@ public interface SavedObjectOrBuilder extends *
          * Objects which this object depends on: named edges in the dependency
          * graph.
      -   * Note: currently only valid if kind == "user_object".
      +   * Note: currently only valid if kind == "user_object" or "resource".
          * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -42,7 +42,7 @@ public interface SavedObjectOrBuilder extends *
          * Objects which this object depends on: named edges in the dependency
          * graph.
      -   * Note: currently only valid if kind == "user_object".
      +   * Note: currently only valid if kind == "user_object" or "resource".
          * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; @@ -53,7 +53,7 @@ public interface SavedObjectOrBuilder extends *
          * Objects which this object depends on: named edges in the dependency
          * graph.
      -   * Note: currently only valid if kind == "user_object".
      +   * Note: currently only valid if kind == "user_object" or "resource".
          * 
      * * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference children = 1; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedUserObject.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedUserObject.java index f66874096f5..888f24bb867 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedUserObject.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedUserObject.java @@ -194,6 +194,9 @@ public org.tensorflow.proto.framework.VersionDefOrBuilder getVersionOrBuilder() private volatile java.lang.Object metadata_; /** *
      +   * Deprecated! At the time of deprecation, Keras was the only user of this
      +   * field, and its saving and loading code will be updated shortly.
      +   * Please save your application-specific metadata to separate file
          * Initialization-related metadata.
          * 
      * @@ -213,6 +216,9 @@ public java.lang.String getMetadata() { } /** *
      +   * Deprecated! At the time of deprecation, Keras was the only user of this
      +   * field, and its saving and loading code will be updated shortly.
      +   * Please save your application-specific metadata to separate file
          * Initialization-related metadata.
          * 
      * @@ -834,6 +840,9 @@ public org.tensorflow.proto.framework.VersionDefOrBuilder getVersionOrBuilder() private java.lang.Object metadata_ = ""; /** *
      +     * Deprecated! At the time of deprecation, Keras was the only user of this
      +     * field, and its saving and loading code will be updated shortly.
      +     * Please save your application-specific metadata to separate file
            * Initialization-related metadata.
            * 
      * @@ -853,6 +862,9 @@ public java.lang.String getMetadata() { } /** *
      +     * Deprecated! At the time of deprecation, Keras was the only user of this
      +     * field, and its saving and loading code will be updated shortly.
      +     * Please save your application-specific metadata to separate file
            * Initialization-related metadata.
            * 
      * @@ -873,6 +885,9 @@ public java.lang.String getMetadata() { } /** *
      +     * Deprecated! At the time of deprecation, Keras was the only user of this
      +     * field, and its saving and loading code will be updated shortly.
      +     * Please save your application-specific metadata to separate file
            * Initialization-related metadata.
            * 
      * @@ -890,6 +905,9 @@ public Builder setMetadata( } /** *
      +     * Deprecated! At the time of deprecation, Keras was the only user of this
      +     * field, and its saving and loading code will be updated shortly.
      +     * Please save your application-specific metadata to separate file
            * Initialization-related metadata.
            * 
      * @@ -903,6 +921,9 @@ public Builder clearMetadata() { } /** *
      +     * Deprecated! At the time of deprecation, Keras was the only user of this
      +     * field, and its saving and loading code will be updated shortly.
      +     * Please save your application-specific metadata to separate file
            * Initialization-related metadata.
            * 
      * diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedUserObjectOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedUserObjectOrBuilder.java index bd7cbadcbd3..91d00aa1861 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedUserObjectOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedUserObjectOrBuilder.java @@ -52,6 +52,9 @@ public interface SavedUserObjectOrBuilder extends /** *
      +   * Deprecated! At the time of deprecation, Keras was the only user of this
      +   * field, and its saving and loading code will be updated shortly.
      +   * Please save your application-specific metadata to separate file
          * Initialization-related metadata.
          * 
      * @@ -60,6 +63,9 @@ public interface SavedUserObjectOrBuilder extends java.lang.String getMetadata(); /** *
      +   * Deprecated! At the time of deprecation, Keras was the only user of this
      +   * field, and its saving and loading code will be updated shortly.
      +   * Please save your application-specific metadata to separate file
          * Initialization-related metadata.
          * 
      * diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SessionInfo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SessionInfo.java new file mode 100644 index 00000000000..f44e0f0e52d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SessionInfo.java @@ -0,0 +1,485 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/grappler/costs/op_performance_data.proto + +package org.tensorflow.proto.framework; + +/** + *
      + * Description of the session when an op is run.
      + * 
      + * + * Protobuf type {@code tensorflow.SessionInfo} + */ +public final class SessionInfo extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.SessionInfo) + SessionInfoOrBuilder { +private static final long serialVersionUID = 0L; + // Use SessionInfo.newBuilder() to construct. + private SessionInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SessionInfo() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new SessionInfo(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SessionInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + intraOpParallelism_ = input.readInt64(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_SessionInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_SessionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.SessionInfo.class, org.tensorflow.proto.framework.SessionInfo.Builder.class); + } + + public static final int INTRA_OP_PARALLELISM_FIELD_NUMBER = 1; + private long intraOpParallelism_; + /** + * int64 intra_op_parallelism = 1; + */ + public long getIntraOpParallelism() { + return intraOpParallelism_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (intraOpParallelism_ != 0L) { + output.writeInt64(1, intraOpParallelism_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (intraOpParallelism_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, intraOpParallelism_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.SessionInfo)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.SessionInfo other = (org.tensorflow.proto.framework.SessionInfo) obj; + + if (getIntraOpParallelism() + != other.getIntraOpParallelism()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INTRA_OP_PARALLELISM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getIntraOpParallelism()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.SessionInfo parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.SessionInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.SessionInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.SessionInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.SessionInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
      +   * Description of the session when an op is run.
      +   * 
      + * + * Protobuf type {@code tensorflow.SessionInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.SessionInfo) + org.tensorflow.proto.framework.SessionInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_SessionInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_SessionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.SessionInfo.class, org.tensorflow.proto.framework.SessionInfo.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.SessionInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + intraOpParallelism_ = 0L; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.OpPerformanceDataProtos.internal_static_tensorflow_SessionInfo_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.SessionInfo getDefaultInstanceForType() { + return org.tensorflow.proto.framework.SessionInfo.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.SessionInfo build() { + org.tensorflow.proto.framework.SessionInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.SessionInfo buildPartial() { + org.tensorflow.proto.framework.SessionInfo result = new org.tensorflow.proto.framework.SessionInfo(this); + result.intraOpParallelism_ = intraOpParallelism_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.SessionInfo) { + return mergeFrom((org.tensorflow.proto.framework.SessionInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.SessionInfo other) { + if (other == org.tensorflow.proto.framework.SessionInfo.getDefaultInstance()) return this; + if (other.getIntraOpParallelism() != 0L) { + setIntraOpParallelism(other.getIntraOpParallelism()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.SessionInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.SessionInfo) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private long intraOpParallelism_ ; + /** + * int64 intra_op_parallelism = 1; + */ + public long getIntraOpParallelism() { + return intraOpParallelism_; + } + /** + * int64 intra_op_parallelism = 1; + */ + public Builder setIntraOpParallelism(long value) { + + intraOpParallelism_ = value; + onChanged(); + return this; + } + /** + * int64 intra_op_parallelism = 1; + */ + public Builder clearIntraOpParallelism() { + + intraOpParallelism_ = 0L; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.SessionInfo) + } + + // @@protoc_insertion_point(class_scope:tensorflow.SessionInfo) + private static final org.tensorflow.proto.framework.SessionInfo DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.SessionInfo(); + } + + public static org.tensorflow.proto.framework.SessionInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SessionInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SessionInfo(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.SessionInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SessionInfoOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SessionInfoOrBuilder.java new file mode 100644 index 00000000000..43181b82317 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SessionInfoOrBuilder.java @@ -0,0 +1,14 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/grappler/costs/op_performance_data.proto + +package org.tensorflow.proto.framework; + +public interface SessionInfoOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.SessionInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * int64 intra_op_parallelism = 1; + */ + long getIntraOpParallelism(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SignatureDef.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SignatureDef.java index fe588917c86..73e8cf42672 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SignatureDef.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SignatureDef.java @@ -8,7 +8,7 @@ * SignatureDef defines the signature of a computation supported by a TensorFlow * graph. * For example, a model with two loss computations, sharing a single input, - * might have the following signature_def map. + * might have the following signature_def map, in a MetaGraphDef message. * Note that across the two SignatureDefs "loss_A" and "loss_B", the input key, * output key, and method_name are identical, and will be used by system(s) that * implement or rely upon this particular loss method. The output tensor names @@ -32,9 +32,9 @@ * tensor_shape: ... * } * } + * method_name: "some/package/compute_loss" * } * ... - * method_name: "some/package/compute_loss" * } * signature_def { * key: "loss_B" @@ -55,9 +55,9 @@ * tensor_shape: ... * } * } + * method_name: "some/package/compute_loss" * } * ... - * method_name: "some/package/compute_loss" * } *
      * @@ -626,7 +626,7 @@ protected Builder newBuilderForType( * SignatureDef defines the signature of a computation supported by a TensorFlow * graph. * For example, a model with two loss computations, sharing a single input, - * might have the following signature_def map. + * might have the following signature_def map, in a MetaGraphDef message. * Note that across the two SignatureDefs "loss_A" and "loss_B", the input key, * output key, and method_name are identical, and will be used by system(s) that * implement or rely upon this particular loss method. The output tensor names @@ -650,9 +650,9 @@ protected Builder newBuilderForType( * tensor_shape: ... * } * } + * method_name: "some/package/compute_loss" * } * ... - * method_name: "some/package/compute_loss" * } * signature_def { * key: "loss_B" @@ -673,9 +673,9 @@ protected Builder newBuilderForType( * tensor_shape: ... * } * } + * method_name: "some/package/compute_loss" * } * ... - * method_name: "some/package/compute_loss" * } *
      * diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SpecializedType.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SpecializedType.java index 8af9cfa82cc..1237f97c862 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SpecializedType.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SpecializedType.java @@ -31,6 +31,14 @@ public enum SpecializedType * ST_TENSOR_LIST = 1; */ ST_TENSOR_LIST(1), + /** + *
      +   * "tensorflow::data::Optional" in the variant type registry.
      +   * 
      + * + * ST_OPTIONAL = 2; + */ + ST_OPTIONAL(2), UNRECOGNIZED(-1), ; @@ -50,6 +58,14 @@ public enum SpecializedType * ST_TENSOR_LIST = 1; */ public static final int ST_TENSOR_LIST_VALUE = 1; + /** + *
      +   * "tensorflow::data::Optional" in the variant type registry.
      +   * 
      + * + * ST_OPTIONAL = 2; + */ + public static final int ST_OPTIONAL_VALUE = 2; public final int getNumber() { @@ -72,6 +88,7 @@ public static SpecializedType forNumber(int value) { switch (value) { case 0: return ST_INVALID; case 1: return ST_TENSOR_LIST; + case 2: return ST_OPTIONAL; default: return null; } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/StructProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/StructProtos.java index f5b217be8b1..f2f04f9c9ed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/StructProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/StructProtos.java @@ -117,21 +117,21 @@ public static void registerAllExtensions( "hapeProto\022#\n\005dtype\030\003 \001(\0162\024.tensorflow.Da" + "taType\022(\n\007minimum\030\004 \001(\0132\027.tensorflow.Ten" + "sorProto\022(\n\007maximum\030\005 \001(\0132\027.tensorflow.T" + - "ensorProto\"\264\003\n\rTypeSpecProto\022@\n\017type_spe" + + "ensorProto\"\250\003\n\rTypeSpecProto\022@\n\017type_spe" + "c_class\030\001 \001(\0162\'.tensorflow.TypeSpecProto" + ".TypeSpecClass\022/\n\ntype_state\030\002 \001(\0132\033.ten" + "sorflow.StructuredValue\022\034\n\024type_spec_cla" + - "ss_name\030\003 \001(\t\"\221\002\n\rTypeSpecClass\022\013\n\007UNKNO" + + "ss_name\030\003 \001(\t\"\205\002\n\rTypeSpecClass\022\013\n\007UNKNO" + "WN\020\000\022\026\n\022SPARSE_TENSOR_SPEC\020\001\022\027\n\023INDEXED_" + "SLICES_SPEC\020\002\022\026\n\022RAGGED_TENSOR_SPEC\020\003\022\025\n" + "\021TENSOR_ARRAY_SPEC\020\004\022\025\n\021DATA_DATASET_SPE" + "C\020\005\022\026\n\022DATA_ITERATOR_SPEC\020\006\022\021\n\rOPTIONAL_" + "SPEC\020\007\022\024\n\020PER_REPLICA_SPEC\020\010\022\021\n\rVARIABLE" + - "_SPEC\020\t\022\026\n\022ROW_PARTITION_SPEC\020\n\022\020\n\014NDARR" + - "AY_SPEC\020\013B\207\001\n\036org.tensorflow.proto.frame" + - "workB\014StructProtosP\001ZUgithub.com/tensorf" + - "low/tensorflow/tensorflow/go/core/protob" + - "uf/for_core_protos_go_protob\006proto3" + "_SPEC\020\t\022\026\n\022ROW_PARTITION_SPEC\020\n\"\004\010\013\020\013B\207\001" + + "\n\036org.tensorflow.proto.frameworkB\014Struct" + + "ProtosP\001ZUgithub.com/tensorflow/tensorfl" + + "ow/tensorflow/go/core/protobuf/for_core_" + + "protos_go_protob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java index da8535cd530..10c64eeb41c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java @@ -200,14 +200,6 @@ public enum TypeSpecClass * ROW_PARTITION_SPEC = 10; */ ROW_PARTITION_SPEC(10), - /** - *
      -     * TF Numpy NDarray spec
      -     * 
      - * - * NDARRAY_SPEC = 11; - */ - NDARRAY_SPEC(11), UNRECOGNIZED(-1), ; @@ -295,14 +287,6 @@ public enum TypeSpecClass * ROW_PARTITION_SPEC = 10; */ public static final int ROW_PARTITION_SPEC_VALUE = 10; - /** - *
      -     * TF Numpy NDarray spec
      -     * 
      - * - * NDARRAY_SPEC = 11; - */ - public static final int NDARRAY_SPEC_VALUE = 11; public final int getNumber() { @@ -334,7 +318,6 @@ public static TypeSpecClass forNumber(int value) { case 8: return PER_REPLICA_SPEC; case 9: return VARIABLE_SPEC; case 10: return ROW_PARTITION_SPEC; - case 11: return NDARRAY_SPEC; default: return null; } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypesProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypesProtos.java index b575702424b..869330204e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypesProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypesProtos.java @@ -44,12 +44,12 @@ public static void registerAllExtensions( "_REF\020t\022\021\n\rDT_UINT16_REF\020u\022\025\n\021DT_COMPLEX1" + "28_REF\020v\022\017\n\013DT_HALF_REF\020w\022\023\n\017DT_RESOURCE" + "_REF\020x\022\022\n\016DT_VARIANT_REF\020y\022\021\n\rDT_UINT32_" + - "REF\020z\022\021\n\rDT_UINT64_REF\020{*5\n\017SpecializedT" + - "ype\022\016\n\nST_INVALID\020\000\022\022\n\016ST_TENSOR_LIST\020\001B" + - "\200\001\n\036org.tensorflow.proto.frameworkB\013Type" + - "sProtosP\001ZLgithub.com/tensorflow/tensorf" + - "low/tensorflow/go/core/framework/types_g" + - "o_proto\370\001\001b\006proto3" + "REF\020z\022\021\n\rDT_UINT64_REF\020{*F\n\017SpecializedT" + + "ype\022\016\n\nST_INVALID\020\000\022\022\n\016ST_TENSOR_LIST\020\001\022" + + "\017\n\013ST_OPTIONAL\020\002B\200\001\n\036org.tensorflow.prot" + + "o.frameworkB\013TypesProtosP\001ZLgithub.com/t" + + "ensorflow/tensorflow/tensorflow/go/core/" + + "framework/types_go_proto\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XEventMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XEventMetadata.java index 1085ab6ecce..f06bc4c25b7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XEventMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XEventMetadata.java @@ -7,7 +7,7 @@ *
        * Metadata for an XEvent, corresponds to an event type and is shared by
        * all XEvents with the same metadata_id.
      - * Next ID: 6
      + * Next ID: 7
        * 
      * * Protobuf type {@code tensorflow.profiler.XEventMetadata} @@ -26,6 +26,7 @@ private XEventMetadata() { displayName_ = ""; metadata_ = com.google.protobuf.ByteString.EMPTY; stats_ = java.util.Collections.emptyList(); + childId_ = emptyLongList(); } @java.lang.Override @@ -90,6 +91,27 @@ private XEventMetadata( input.readMessage(org.tensorflow.proto.profiler.XStat.parser(), extensionRegistry)); break; } + case 48: { + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + childId_ = newLongList(); + mutable_bitField0_ |= 0x00000002; + } + childId_.addLong(input.readInt64()); + break; + } + case 50: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000002) != 0) && input.getBytesUntilLimit() > 0) { + childId_ = newLongList(); + mutable_bitField0_ |= 0x00000002; + } + while (input.getBytesUntilLimit() > 0) { + childId_.addLong(input.readInt64()); + } + input.popLimit(limit); + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -108,6 +130,9 @@ private XEventMetadata( if (((mutable_bitField0_ & 0x00000001) != 0)) { stats_ = java.util.Collections.unmodifiableList(stats_); } + if (((mutable_bitField0_ & 0x00000002) != 0)) { + childId_.makeImmutable(); // C + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -295,6 +320,41 @@ public org.tensorflow.proto.profiler.XStatOrBuilder getStatsOrBuilder( return stats_.get(index); } + public static final int CHILD_ID_FIELD_NUMBER = 6; + private com.google.protobuf.Internal.LongList childId_; + /** + *
      +   * XPlane.event_metadata map key for children events.
      +   * 
      + * + * repeated int64 child_id = 6; + */ + public java.util.List + getChildIdList() { + return childId_; + } + /** + *
      +   * XPlane.event_metadata map key for children events.
      +   * 
      + * + * repeated int64 child_id = 6; + */ + public int getChildIdCount() { + return childId_.size(); + } + /** + *
      +   * XPlane.event_metadata map key for children events.
      +   * 
      + * + * repeated int64 child_id = 6; + */ + public long getChildId(int index) { + return childId_.getLong(index); + } + private int childIdMemoizedSerializedSize = -1; + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -309,6 +369,7 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); if (id_ != 0L) { output.writeInt64(1, id_); } @@ -324,6 +385,13 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) for (int i = 0; i < stats_.size(); i++) { output.writeMessage(5, stats_.get(i)); } + if (getChildIdList().size() > 0) { + output.writeUInt32NoTag(50); + output.writeUInt32NoTag(childIdMemoizedSerializedSize); + } + for (int i = 0; i < childId_.size(); i++) { + output.writeInt64NoTag(childId_.getLong(i)); + } unknownFields.writeTo(output); } @@ -351,6 +419,20 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(5, stats_.get(i)); } + { + int dataSize = 0; + for (int i = 0; i < childId_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeInt64SizeNoTag(childId_.getLong(i)); + } + size += dataSize; + if (!getChildIdList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(dataSize); + } + childIdMemoizedSerializedSize = dataSize; + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -376,6 +458,8 @@ public boolean equals(final java.lang.Object obj) { .equals(other.getMetadata())) return false; if (!getStatsList() .equals(other.getStatsList())) return false; + if (!getChildIdList() + .equals(other.getChildIdList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -400,6 +484,10 @@ public int hashCode() { hash = (37 * hash) + STATS_FIELD_NUMBER; hash = (53 * hash) + getStatsList().hashCode(); } + if (getChildIdCount() > 0) { + hash = (37 * hash) + CHILD_ID_FIELD_NUMBER; + hash = (53 * hash) + getChildIdList().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -499,7 +587,7 @@ protected Builder newBuilderForType( *
          * Metadata for an XEvent, corresponds to an event type and is shared by
          * all XEvents with the same metadata_id.
      -   * Next ID: 6
      +   * Next ID: 7
          * 
      * * Protobuf type {@code tensorflow.profiler.XEventMetadata} @@ -554,6 +642,8 @@ public Builder clear() { } else { statsBuilder_.clear(); } + childId_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -594,6 +684,11 @@ public org.tensorflow.proto.profiler.XEventMetadata buildPartial() { } else { result.stats_ = statsBuilder_.build(); } + if (((bitField0_ & 0x00000002) != 0)) { + childId_.makeImmutable(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.childId_ = childId_; onBuilt(); return result; } @@ -682,6 +777,16 @@ public Builder mergeFrom(org.tensorflow.proto.profiler.XEventMetadata other) { } } } + if (!other.childId_.isEmpty()) { + if (childId_.isEmpty()) { + childId_ = other.childId_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureChildIdIsMutable(); + childId_.addAll(other.childId_); + } + onChanged(); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1298,6 +1403,101 @@ public org.tensorflow.proto.profiler.XStat.Builder addStatsBuilder( } return statsBuilder_; } + + private com.google.protobuf.Internal.LongList childId_ = emptyLongList(); + private void ensureChildIdIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + childId_ = mutableCopy(childId_); + bitField0_ |= 0x00000002; + } + } + /** + *
      +     * XPlane.event_metadata map key for children events.
      +     * 
      + * + * repeated int64 child_id = 6; + */ + public java.util.List + getChildIdList() { + return ((bitField0_ & 0x00000002) != 0) ? + java.util.Collections.unmodifiableList(childId_) : childId_; + } + /** + *
      +     * XPlane.event_metadata map key for children events.
      +     * 
      + * + * repeated int64 child_id = 6; + */ + public int getChildIdCount() { + return childId_.size(); + } + /** + *
      +     * XPlane.event_metadata map key for children events.
      +     * 
      + * + * repeated int64 child_id = 6; + */ + public long getChildId(int index) { + return childId_.getLong(index); + } + /** + *
      +     * XPlane.event_metadata map key for children events.
      +     * 
      + * + * repeated int64 child_id = 6; + */ + public Builder setChildId( + int index, long value) { + ensureChildIdIsMutable(); + childId_.setLong(index, value); + onChanged(); + return this; + } + /** + *
      +     * XPlane.event_metadata map key for children events.
      +     * 
      + * + * repeated int64 child_id = 6; + */ + public Builder addChildId(long value) { + ensureChildIdIsMutable(); + childId_.addLong(value); + onChanged(); + return this; + } + /** + *
      +     * XPlane.event_metadata map key for children events.
      +     * 
      + * + * repeated int64 child_id = 6; + */ + public Builder addAllChildId( + java.lang.Iterable values) { + ensureChildIdIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, childId_); + onChanged(); + return this; + } + /** + *
      +     * XPlane.event_metadata map key for children events.
      +     * 
      + * + * repeated int64 child_id = 6; + */ + public Builder clearChildId() { + childId_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XEventMetadataOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XEventMetadataOrBuilder.java index 4f865d87b0e..3fbae52ad47 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XEventMetadataOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XEventMetadataOrBuilder.java @@ -109,4 +109,29 @@ public interface XEventMetadataOrBuilder extends */ org.tensorflow.proto.profiler.XStatOrBuilder getStatsOrBuilder( int index); + + /** + *
      +   * XPlane.event_metadata map key for children events.
      +   * 
      + * + * repeated int64 child_id = 6; + */ + java.util.List getChildIdList(); + /** + *
      +   * XPlane.event_metadata map key for children events.
      +   * 
      + * + * repeated int64 child_id = 6; + */ + int getChildIdCount(); + /** + *
      +   * XPlane.event_metadata map key for children events.
      +   * 
      + * + * repeated int64 child_id = 6; + */ + long getChildId(int index); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XPlaneProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XPlaneProtos.java index 4da0ce0a22e..041babd2d79 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XPlaneProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/XPlaneProtos.java @@ -96,13 +96,14 @@ public static void registerAllExtensions( "ouble_value\030\002 \001(\001H\000\022\026\n\014uint64_value\030\003 \001(" + "\004H\000\022\025\n\013int64_value\030\004 \001(\003H\000\022\023\n\tstr_value\030" + "\005 \001(\tH\000\022\025\n\013bytes_value\030\006 \001(\014H\000\022\023\n\tref_va" + - "lue\030\007 \001(\004H\000B\007\n\005value\"}\n\016XEventMetadata\022\n" + - "\n\002id\030\001 \001(\003\022\014\n\004name\030\002 \001(\t\022\024\n\014display_name" + - "\030\004 \001(\t\022\020\n\010metadata\030\003 \001(\014\022)\n\005stats\030\005 \003(\0132" + - "\032.tensorflow.profiler.XStat\">\n\rXStatMeta" + - "data\022\n\n\002id\030\001 \001(\003\022\014\n\004name\030\002 \001(\t\022\023\n\013descri" + - "ption\030\003 \001(\tB2\n\035org.tensorflow.proto.prof" + - "ilerB\014XPlaneProtosP\001\370\001\001b\006proto3" + "lue\030\007 \001(\004H\000B\007\n\005value\"\217\001\n\016XEventMetadata\022" + + "\n\n\002id\030\001 \001(\003\022\014\n\004name\030\002 \001(\t\022\024\n\014display_nam" + + "e\030\004 \001(\t\022\020\n\010metadata\030\003 \001(\014\022)\n\005stats\030\005 \003(\013" + + "2\032.tensorflow.profiler.XStat\022\020\n\010child_id" + + "\030\006 \003(\003\">\n\rXStatMetadata\022\n\n\002id\030\001 \001(\003\022\014\n\004n" + + "ame\030\002 \001(\t\022\023\n\013description\030\003 \001(\tB2\n\035org.te" + + "nsorflow.proto.profilerB\014XPlaneProtosP\001\370" + + "\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -155,7 +156,7 @@ public static void registerAllExtensions( internal_static_tensorflow_profiler_XEventMetadata_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_profiler_XEventMetadata_descriptor, - new java.lang.String[] { "Id", "Name", "DisplayName", "Metadata", "Stats", }); + new java.lang.String[] { "Id", "Name", "DisplayName", "Metadata", "Stats", "ChildId", }); internal_static_tensorflow_profiler_XStatMetadata_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_tensorflow_profiler_XStatMetadata_fieldAccessorTable = new diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/Event.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/Event.java index 9b30419c091..b43e146b202 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/Event.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/Event.java @@ -174,7 +174,7 @@ public enum WhatCase FILE_VERSION(3), GRAPH_DEF(4), SUMMARY(5), - LOG_MESSAGE(6), + @java.lang.Deprecated LOG_MESSAGE(6), SESSION_LOG(7), TAGGED_RUN_METADATA(8), META_GRAPH_DEF(9), @@ -354,24 +354,26 @@ public org.tensorflow.proto.framework.SummaryOrBuilder getSummaryOrBuilder() { public static final int LOG_MESSAGE_FIELD_NUMBER = 6; /** *
      -   * The user output a log message. Not all messages are logged, only ones
      -   * generated via the Python tensorboard_logging module.
      +   * The user output a log message. This was theoretically used by the defunct
      +   * tensorboard_logging module, which has since been removed; this field is
      +   * now deprecated and should not be used.
          * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public boolean hasLogMessage() { + @java.lang.Deprecated public boolean hasLogMessage() { return whatCase_ == 6; } /** *
      -   * The user output a log message. Not all messages are logged, only ones
      -   * generated via the Python tensorboard_logging module.
      +   * The user output a log message. This was theoretically used by the defunct
      +   * tensorboard_logging module, which has since been removed; this field is
      +   * now deprecated and should not be used.
          * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public org.tensorflow.proto.util.LogMessage getLogMessage() { + @java.lang.Deprecated public org.tensorflow.proto.util.LogMessage getLogMessage() { if (whatCase_ == 6) { return (org.tensorflow.proto.util.LogMessage) what_; } @@ -379,13 +381,14 @@ public org.tensorflow.proto.util.LogMessage getLogMessage() { } /** *
      -   * The user output a log message. Not all messages are logged, only ones
      -   * generated via the Python tensorboard_logging module.
      +   * The user output a log message. This was theoretically used by the defunct
      +   * tensorboard_logging module, which has since been removed; this field is
      +   * now deprecated and should not be used.
          * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public org.tensorflow.proto.util.LogMessageOrBuilder getLogMessageOrBuilder() { + @java.lang.Deprecated public org.tensorflow.proto.util.LogMessageOrBuilder getLogMessageOrBuilder() { if (whatCase_ == 6) { return (org.tensorflow.proto.util.LogMessage) what_; } @@ -1429,24 +1432,26 @@ public org.tensorflow.proto.framework.SummaryOrBuilder getSummaryOrBuilder() { org.tensorflow.proto.util.LogMessage, org.tensorflow.proto.util.LogMessage.Builder, org.tensorflow.proto.util.LogMessageOrBuilder> logMessageBuilder_; /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public boolean hasLogMessage() { + @java.lang.Deprecated public boolean hasLogMessage() { return whatCase_ == 6; } /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public org.tensorflow.proto.util.LogMessage getLogMessage() { + @java.lang.Deprecated public org.tensorflow.proto.util.LogMessage getLogMessage() { if (logMessageBuilder_ == null) { if (whatCase_ == 6) { return (org.tensorflow.proto.util.LogMessage) what_; @@ -1461,13 +1466,14 @@ public org.tensorflow.proto.util.LogMessage getLogMessage() { } /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public Builder setLogMessage(org.tensorflow.proto.util.LogMessage value) { + @java.lang.Deprecated public Builder setLogMessage(org.tensorflow.proto.util.LogMessage value) { if (logMessageBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -1482,13 +1488,14 @@ public Builder setLogMessage(org.tensorflow.proto.util.LogMessage value) { } /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public Builder setLogMessage( + @java.lang.Deprecated public Builder setLogMessage( org.tensorflow.proto.util.LogMessage.Builder builderForValue) { if (logMessageBuilder_ == null) { what_ = builderForValue.build(); @@ -1501,13 +1508,14 @@ public Builder setLogMessage( } /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public Builder mergeLogMessage(org.tensorflow.proto.util.LogMessage value) { + @java.lang.Deprecated public Builder mergeLogMessage(org.tensorflow.proto.util.LogMessage value) { if (logMessageBuilder_ == null) { if (whatCase_ == 6 && what_ != org.tensorflow.proto.util.LogMessage.getDefaultInstance()) { @@ -1528,13 +1536,14 @@ public Builder mergeLogMessage(org.tensorflow.proto.util.LogMessage value) { } /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public Builder clearLogMessage() { + @java.lang.Deprecated public Builder clearLogMessage() { if (logMessageBuilder_ == null) { if (whatCase_ == 6) { whatCase_ = 0; @@ -1552,24 +1561,26 @@ public Builder clearLogMessage() { } /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public org.tensorflow.proto.util.LogMessage.Builder getLogMessageBuilder() { + @java.lang.Deprecated public org.tensorflow.proto.util.LogMessage.Builder getLogMessageBuilder() { return getLogMessageFieldBuilder().getBuilder(); } /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - public org.tensorflow.proto.util.LogMessageOrBuilder getLogMessageOrBuilder() { + @java.lang.Deprecated public org.tensorflow.proto.util.LogMessageOrBuilder getLogMessageOrBuilder() { if ((whatCase_ == 6) && (logMessageBuilder_ != null)) { return logMessageBuilder_.getMessageOrBuilder(); } else { @@ -1581,11 +1592,12 @@ public org.tensorflow.proto.util.LogMessageOrBuilder getLogMessageOrBuilder() { } /** *
      -     * The user output a log message. Not all messages are logged, only ones
      -     * generated via the Python tensorboard_logging module.
      +     * The user output a log message. This was theoretically used by the defunct
      +     * tensorboard_logging module, which has since been removed; this field is
      +     * now deprecated and should not be used.
            * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ private com.google.protobuf.SingleFieldBuilderV3< org.tensorflow.proto.util.LogMessage, org.tensorflow.proto.util.LogMessage.Builder, org.tensorflow.proto.util.LogMessageOrBuilder> diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/EventOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/EventOrBuilder.java index dbe86dad114..c318fe32cb6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/EventOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/EventOrBuilder.java @@ -85,31 +85,34 @@ public interface EventOrBuilder extends /** *
      -   * The user output a log message. Not all messages are logged, only ones
      -   * generated via the Python tensorboard_logging module.
      +   * The user output a log message. This was theoretically used by the defunct
      +   * tensorboard_logging module, which has since been removed; this field is
      +   * now deprecated and should not be used.
          * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - boolean hasLogMessage(); + @java.lang.Deprecated boolean hasLogMessage(); /** *
      -   * The user output a log message. Not all messages are logged, only ones
      -   * generated via the Python tensorboard_logging module.
      +   * The user output a log message. This was theoretically used by the defunct
      +   * tensorboard_logging module, which has since been removed; this field is
      +   * now deprecated and should not be used.
          * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - org.tensorflow.proto.util.LogMessage getLogMessage(); + @java.lang.Deprecated org.tensorflow.proto.util.LogMessage getLogMessage(); /** *
      -   * The user output a log message. Not all messages are logged, only ones
      -   * generated via the Python tensorboard_logging module.
      +   * The user output a log message. This was theoretically used by the defunct
      +   * tensorboard_logging module, which has since been removed; this field is
      +   * now deprecated and should not be used.
          * 
      * - * .tensorflow.LogMessage log_message = 6; + * .tensorflow.LogMessage log_message = 6 [deprecated = true]; */ - org.tensorflow.proto.util.LogMessageOrBuilder getLogMessageOrBuilder(); + @java.lang.Deprecated org.tensorflow.proto.util.LogMessageOrBuilder getLogMessageOrBuilder(); /** *
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/EventProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/EventProtos.java
      index 7259855e6f4..74fc5501c1f 100644
      --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/EventProtos.java
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/EventProtos.java
      @@ -65,43 +65,43 @@ public static void registerAllExtensions(
           java.lang.String[] descriptorData = {
             "\n tensorflow/core/util/event.proto\022\ntens" +
             "orflow\032\'tensorflow/core/framework/summar" +
      -      "y.proto\"\273\002\n\005Event\022\021\n\twall_time\030\001 \001(\001\022\014\n\004" +
      +      "y.proto\"\277\002\n\005Event\022\021\n\twall_time\030\001 \001(\001\022\014\n\004" +
             "step\030\002 \001(\003\022\026\n\014file_version\030\003 \001(\tH\000\022\023\n\tgr" +
             "aph_def\030\004 \001(\014H\000\022&\n\007summary\030\005 \001(\0132\023.tenso" +
      -      "rflow.SummaryH\000\022-\n\013log_message\030\006 \001(\0132\026.t" +
      -      "ensorflow.LogMessageH\000\022-\n\013session_log\030\007 " +
      -      "\001(\0132\026.tensorflow.SessionLogH\000\022<\n\023tagged_" +
      -      "run_metadata\030\010 \001(\0132\035.tensorflow.TaggedRu" +
      -      "nMetadataH\000\022\030\n\016meta_graph_def\030\t \001(\014H\000B\006\n" +
      -      "\004what\"\231\001\n\nLogMessage\022+\n\005level\030\001 \001(\0162\034.te" +
      -      "nsorflow.LogMessage.Level\022\017\n\007message\030\002 \001" +
      -      "(\t\"M\n\005Level\022\013\n\007UNKNOWN\020\000\022\r\n\tDEBUGGING\020\n\022" +
      -      "\010\n\004INFO\020\024\022\010\n\004WARN\020\036\022\t\n\005ERROR\020(\022\t\n\005FATAL\020" +
      -      "2\"\266\001\n\nSessionLog\0224\n\006status\030\001 \001(\0162$.tenso" +
      -      "rflow.SessionLog.SessionStatus\022\027\n\017checkp" +
      -      "oint_path\030\002 \001(\t\022\013\n\003msg\030\003 \001(\t\"L\n\rSessionS" +
      -      "tatus\022\026\n\022STATUS_UNSPECIFIED\020\000\022\t\n\005START\020\001" +
      -      "\022\010\n\004STOP\020\002\022\016\n\nCHECKPOINT\020\003\"6\n\021TaggedRunM" +
      -      "etadata\022\013\n\003tag\030\001 \001(\t\022\024\n\014run_metadata\030\002 \001" +
      -      "(\014\"$\n\016WatchdogConfig\022\022\n\ntimeout_ms\030\001 \001(\003" +
      -      "\"&\n\021RequestedExitCode\022\021\n\texit_code\030\001 \001(\005" +
      -      "\"\266\001\n\026WorkerHeartbeatRequest\0225\n\rshutdown_" +
      -      "mode\030\001 \001(\0162\036.tensorflow.WorkerShutdownMo" +
      -      "de\0223\n\017watchdog_config\030\002 \001(\0132\032.tensorflow" +
      -      ".WatchdogConfig\0220\n\texit_code\030\003 \001(\0132\035.ten" +
      -      "sorflow.RequestedExitCode\"\203\001\n\027WorkerHear" +
      -      "tbeatResponse\022/\n\rhealth_status\030\001 \001(\0162\030.t" +
      -      "ensorflow.WorkerHealth\022%\n\nworker_log\030\002 \003" +
      -      "(\0132\021.tensorflow.Event\022\020\n\010hostname\030\003 \001(\t*" +
      -      "[\n\014WorkerHealth\022\006\n\002OK\020\000\022\034\n\030RECEIVED_SHUT" +
      -      "DOWN_SIGNAL\020\001\022\022\n\016INTERNAL_ERROR\020\002\022\021\n\rSHU" +
      -      "TTING_DOWN\020\003*k\n\022WorkerShutdownMode\022\013\n\007DE" +
      -      "FAULT\020\000\022\022\n\016NOT_CONFIGURED\020\001\022\030\n\024WAIT_FOR_" +
      -      "COORDINATOR\020\002\022\032\n\026SHUTDOWN_AFTER_TIMEOUT\020" +
      -      "\003Bv\n\031org.tensorflow.proto.utilB\013EventPro" +
      -      "tosP\001ZGgithub.com/tensorflow/tensorflow/" +
      -      "tensorflow/go/core/util/event_go_proto\370\001" +
      -      "\001b\006proto3"
      +      "rflow.SummaryH\000\0221\n\013log_message\030\006 \001(\0132\026.t" +
      +      "ensorflow.LogMessageB\002\030\001H\000\022-\n\013session_lo" +
      +      "g\030\007 \001(\0132\026.tensorflow.SessionLogH\000\022<\n\023tag" +
      +      "ged_run_metadata\030\010 \001(\0132\035.tensorflow.Tagg" +
      +      "edRunMetadataH\000\022\030\n\016meta_graph_def\030\t \001(\014H" +
      +      "\000B\006\n\004what\"\241\001\n\nLogMessage\022+\n\005level\030\001 \001(\0162" +
      +      "\034.tensorflow.LogMessage.Level\022\017\n\007message" +
      +      "\030\002 \001(\t\"Q\n\005Level\022\013\n\007UNKNOWN\020\000\022\r\n\tDEBUGGIN" +
      +      "G\020\n\022\010\n\004INFO\020\024\022\010\n\004WARN\020\036\022\t\n\005ERROR\020(\022\t\n\005FA" +
      +      "TAL\0202\032\002\030\001:\002\030\001\"\266\001\n\nSessionLog\0224\n\006status\030\001" +
      +      " \001(\0162$.tensorflow.SessionLog.SessionStat" +
      +      "us\022\027\n\017checkpoint_path\030\002 \001(\t\022\013\n\003msg\030\003 \001(\t" +
      +      "\"L\n\rSessionStatus\022\026\n\022STATUS_UNSPECIFIED\020" +
      +      "\000\022\t\n\005START\020\001\022\010\n\004STOP\020\002\022\016\n\nCHECKPOINT\020\003\"6" +
      +      "\n\021TaggedRunMetadata\022\013\n\003tag\030\001 \001(\t\022\024\n\014run_" +
      +      "metadata\030\002 \001(\014\"$\n\016WatchdogConfig\022\022\n\ntime" +
      +      "out_ms\030\001 \001(\003\"&\n\021RequestedExitCode\022\021\n\texi" +
      +      "t_code\030\001 \001(\005\"\266\001\n\026WorkerHeartbeatRequest\022" +
      +      "5\n\rshutdown_mode\030\001 \001(\0162\036.tensorflow.Work" +
      +      "erShutdownMode\0223\n\017watchdog_config\030\002 \001(\0132" +
      +      "\032.tensorflow.WatchdogConfig\0220\n\texit_code" +
      +      "\030\003 \001(\0132\035.tensorflow.RequestedExitCode\"\203\001" +
      +      "\n\027WorkerHeartbeatResponse\022/\n\rhealth_stat" +
      +      "us\030\001 \001(\0162\030.tensorflow.WorkerHealth\022%\n\nwo" +
      +      "rker_log\030\002 \003(\0132\021.tensorflow.Event\022\020\n\010hos" +
      +      "tname\030\003 \001(\t*[\n\014WorkerHealth\022\006\n\002OK\020\000\022\034\n\030R" +
      +      "ECEIVED_SHUTDOWN_SIGNAL\020\001\022\022\n\016INTERNAL_ER" +
      +      "ROR\020\002\022\021\n\rSHUTTING_DOWN\020\003*k\n\022WorkerShutdo" +
      +      "wnMode\022\013\n\007DEFAULT\020\000\022\022\n\016NOT_CONFIGURED\020\001\022" +
      +      "\030\n\024WAIT_FOR_COORDINATOR\020\002\022\032\n\026SHUTDOWN_AF" +
      +      "TER_TIMEOUT\020\003Bv\n\031org.tensorflow.proto.ut" +
      +      "ilB\013EventProtosP\001ZGgithub.com/tensorflow" +
      +      "/tensorflow/tensorflow/go/core/util/even" +
      +      "t_go_proto\370\001\001b\006proto3"
           };
           descriptor = com.google.protobuf.Descriptors.FileDescriptor
             .internalBuildGeneratedFileFrom(descriptorData,
      diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/LogMessage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/LogMessage.java
      index 3a7dd55b4c8..4104327961a 100644
      --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/LogMessage.java
      +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/LogMessage.java
      @@ -6,11 +6,13 @@
       /**
        * 
        * Protocol buffer used for logging messages to the events file.
      + * This was theoretically used by the defunct tensorboard_logging module, which
      + * has been removed; this message is now deprecated and should not be used.
        * 
      * * Protobuf type {@code tensorflow.LogMessage} */ -public final class LogMessage extends +@java.lang.Deprecated public final class LogMessage extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:tensorflow.LogMessage) LogMessageOrBuilder { @@ -464,6 +466,8 @@ protected Builder newBuilderForType( /** *
          * Protocol buffer used for logging messages to the events file.
      +   * This was theoretically used by the defunct tensorboard_logging module, which
      +   * has been removed; this message is now deprecated and should not be used.
          * 
      * * Protobuf type {@code tensorflow.LogMessage} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/LogMessageOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/LogMessageOrBuilder.java index a19c21eebdf..bb57ab21916 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/LogMessageOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/util/LogMessageOrBuilder.java @@ -3,7 +3,7 @@ package org.tensorflow.proto.util; -public interface LogMessageOrBuilder extends +@java.lang.Deprecated public interface LogMessageOrBuilder extends // @@protoc_insertion_point(interface_extends:tensorflow.LogMessage) com.google.protobuf.MessageOrBuilder { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb index 5472f5f883985bf6f4f266c9cc29668e59833462..4c3e6bef038286ce7eb88eeb01798070a47682f6 100644 GIT binary patch delta 17266 zcmc(G349ynm3U`#`tu=cbXbo@GbrU{tJM>~Cox*bo*6$A*U^v0;B`L=0{1 zkByCriAYGU7n=A{QKo>FFI?C_$e<2jDg_}H_s4`70!d7^5}pD-F08>USZx#y;^PMq zsyGo6<4L|<{fbC2UXXbtoX2ljOeA?(Gct@aKMjmV%Jy+u9R-O;`$o9)NXv+NL&!SlXb8teQ!U=F1B+v zVv-mcUkb|Li96Z##;Xka6x-%NvB6tO$`iu46qo%f9K+^-51#lg zTMCOFX3r@073t784JZV^&6dE>(jpgB-OXMCuDjSX;rs(^ouyA3psc@(J)Jckpmc4# zi*>+j53=XO7w^JZ2k&IhHy%L&S3BllS4SWC9{Vx`_p#@|OZTt~lwJGSwJiIb37)x| z-B`!fkRmA&M&XPN_a~*TV$9=gv~|pGg1!4#A1wbKyQa=pPI7xWLnM~;vYyY?Ghbnt z8S^s+xcYnSwLTx83!2M8rLEk55Br6MfdzBn<|0BIaM|7LdX+iFeL29CkM3cgH?dEe z;inJ&bqibZ5IbFa3Jd!q{OTe0x>H`)7awN7a?0!4`5=3)x-LiVy2uUXihO`AV%S|? z<%;jKVV3ljiPk;eLyBMEVL|0Ar@|A>KYWnZ=^q4x|PCg`gr z?y!&)`jZJECJ%{;{&+%4N+BuAndKyvKQGIDPL-e=OoUsFLwm%;_DD#iKaqH--e)RT z9{iJGH?uVG`QrjR4iXBy^(i9Z>pf_U0eL}tdPh zCWE^~aUC<>cb4f*BQdnYe!sOA-dSdHvv(Q6*lJq;`Gz)W6dfi}bDqSBxD-S3kz76+ ziHSmj9~z5=sCoCV`j(*=bw~U9P0(_K#oNs{^0mCkU&2rD&HQ5g9T~!p3I1%pg|8)Q z??k3`#d5Vd<&Vkeimh0&g1Shtm|u*JQSS?0Y;d?hk{o&PVS~fw#-A+1qN8784*i%bUkp#$pjF z=RSa{V)Lbn&ui{8GS|UdPV;w}|4?ppnN7x1ocGju`G*a}wg!bB&lxbkv6MmQoutrF zPKrkaxj!VtDR;vC32_Jxa!g14b8LEeO68;K^ANYL`gBm@^r4q8f)+1v)YZ=g#mxmS z{i?VPez?!+fYyVg0IHVcyTSYzvB5bUV^iJ?o3CkP4#2iO=GqgUf&2EDzqr5X+bkh# z#bl@IuFAmtl69_d6E34q`X#N)W0^7k7n4007vmsyeKA=urrs>3Xr9w<;Eef^JbjGv{U2K#tUKUL57GxycTrjKyd}x9|CkTwJZt%pcOFQp zGw{!Ow7*I@?YR?!sxq?24AB1z%Tnek12t%<0nU*ndlSY;6GSPne$=)HY@KKN>Ry+X)-rnVJ*ENmO~~t zn`&>zW*W=!(hxd%S*yWF7LA)8^~?wS@GaJFmij5h=pgAF%HUS2AGV&yoIV>D`7n>P zo_vi^eU-IgE_-)AV7+O|Cpt8zu2o7U@o&RVm8C# z%dDqW&%;}us*lmg4uRTB0^2UL-dGwu-bTRTowi2hl6$O6n4;Oc0o7wxkMftRt(RX5 zj=e5BoL!n%to`crrhfg@ljpXei58;6et651_v`e-k4y93OFta+=AD^-_%!U^J!V%#-Qq+X_3hQ zFNXw-lhh_PUy>N0rz)fN)t+ocJbmOj2R!+qX$6$MWO9@YPU?zT^%T=O8Xny% zNNzi);O9o-q$(g96{E0mkFf>*{IqE)ynCN32pX0w|Z2WvL%Uie17vha<9?Khz>aQk?nmtlY6f;+|w-SEOhp|5ulZAJ*F%QEFj~1?}s3gYTNO(fa?RnW2Lu&yc z0V~b#nN7;##|nSvV?KqlLxnXbG|m?sD*Wmh%%DKll5C#{7pRmDXpHO#TMn~HAUV4^A-5-O+_nV)pdm&V}Qz@qTueG zn~Rj4n{6X*<69Qe0vpe_Ik`0{eVDNrE5Bqrj>Ry3!>rG%pF1z3?ono!kux=!g$MT7 zT$Lvp-5Ak~LnmyE*=pxC4li!El|~XLF%I2n0&=ZG90eH-1j;Op^iNGoRRqv2Nkqi$ z^G;CgAzM9Td@hfyI4&E}GI(I+)i(QF^ANw$c9ZdC%Lyh2AGB7Qvp6{4K5dKGN|{q* z-qimZ^HBLp2@Eqw{f$>M#5J4{!VxhBAkO&qzO`Y+^1wft~);h=W5+IeCxO zhlJsQ>&;&H@*v}^+sQE3(-a_P?=^VgaPItY7`CmpR72M>WBzpG0rw z@qT()kjgBz){z>{5L~^QGxTg)#ThniTF)6)uiMNSy4yG4{|NQ%dzY6dp%T(jAu5v8F=%Oo9U*TQ0hN9xi>K+U*mK#bo2G> z=`}Ov?o10hm52v#WU|GXilEE(0Vr zoTkdv%NS1knw>g_jRnREAa5G%C4E}Io-ilZFvLpn!l!%KGs$S%=jb0lmOrwIq z>)93?rS(#Mj2MZkXP|OvrUB{8p43Li&{JUj8bbM5Po%;_aj7ZP>4coQq3caaKiiHHqT060&XK9VTVq(^A zXgK}bS3r!yL`3H3C+vNOD=~IpJPpj72Jcj#KB!L}LcpMH2*L_SsEf&2i7}30xQB2Y z!HW>g<6wezO!OIh5}{tsLGrb>At5U3ji?XI(pVBl`i!gPWG@D8c{IbL&f|>qrw?JN zO^xb2z0plHpxQm2m6!uYYUh2}j&Q&}#5kfDGOH4kCNM`d((QEBlyFY{^m($0=du^# zowWomYd!G5-A?C}JDd87eDL~r zjb(8CH3d$T!Qdl#J~+Isz~@b!3G+}SiqU9F$G1tdbv!;J;A|aFM^Ndu?9jA09Qwpq zP8GbnG^n+#j!Ok0L@j7+3kx`OIPv@Gbgh>H3ij5WNN1R+tiqZ#D8=~riuNw3-eOrOW4{NF{m)E?k zpXs#%I#UUpnti6VMyMV_85kt_C3%^HEPTHx08dm~mO*TR#Z&OOE(q&Jh2iP_!A@^j zlO+0#K(o~o*uk>emRiS{uA%n)-r}!YsXeTdF_{)gia0S_FX_$j#1K=FV)uG!cow_y z8E0j0Jsm+8ZsP@8~vW- zn8`+>&gAIyl#kF_mCEg5WBo)EKiR;GQE?R0>CNMqsl^2PJl+xlSG^_EYtTU(vK!)x-LU?81pibM^_OO)zt}v&M!F%vKrinK*< za{Sc%XhjVu#Uy3YJ&eyrjIxM^HFd2I8#@XeD`(7!d4M2RnxIG6YIHCQ!CYs}1-IW? z$d%y%P#t0oJxOV{Zc&I*%~q*vHu{{ySK0)OrJx2{UN5W|O{utjn3Px7Cx*vDB9~9} z0uXvm3pMA+XIe)uY`xg0`z}l`-p%Ht2Z2*&Z+aog=ZqWLgBTR_Y}()}CF)#w@!Jju zymYzIuDW_xu7*@-SmHs0)q0HiPisB9NsZ&eppIIEULbI55WOI4}{C*_gMmOwynMOtFV|kKQPYj- z1MHoHP@6M9H^C=xTwO}0X0zx!tOmCSy=ioT;7CL9>a4}?y|K8w?oORDeM}DS)J%co zyQ(o<8dGy4GTxMFKjsQ~1>EOwyB73WutETf7v#Rw&=f0v%4nWN4^|YxyMHY9!4E?g zRi_wPUpXAS3+s|vYR&m&muALMmSnpeuycvK1t#AsE_I|1fgZo~M22It;uqS0v$NHc zI!4v0$8=g?#V5rpvN+Qpu$5HKm@$r0ouHm5t|XU)`#vdlm*f5qidg8zXnlVAP(+Xx3)t~^IX`WiMEOmNyxL2^2fKXuXW3M90V+Hm`)?YqrY8Ds@AW*CWP{DXvkMd@b{rNoRPB`GPw=n{I z zigMl&jS$awEHbp7)di1#z%4WQSGB=`54crkKlxV+dZRmzVj))jQLYYxZ*gULD!SLr zdPgC`$DE*kO{9%wnjLk@e;wso7_h(3x!8^#*z*DBvozs#DLRHAe!$h_`&Yfh>Lo?+ z@dsQx3=l_|6YK2gO@|!9n_n#R8KxJk5dMIxg)e-_J;q*Vg^8nFHKk^ql03?N@qFv^ zc2dxjlrT_zC%?%!W4^@>^{X5s?7m{<##N53?9>{(dDjbdnzVvDX()-_d%Bp0(3&5C z-;Rn9e@yTpA;u4i{5U2%>#0U@(cJ!V=b%jSFgVS$Yb|oBTQdN_SosQ76dQ}+aO z^E?&|p?RXn)Ur^b-(MuL8f`3!)dO@-X*C+|#CSr&lcDOb29(xl2rYtE^c@TH$pltQ z;V}HbVs(K`p)$mcjV0BSg*8IB)9MQ35fv~MbBY#s9R2y@NV_T+C)z<7PhfwF zkJCp84yTczUflh-abEDKPSFFyobZ%?9^}+3q{un-lh%dL#?IwDV_-Ii7H&=pHNG^_ z-FkK>m3ovMEVi2(!QxAliCA-3q_x95l7lj!@mWGT69;J5_4@{JvEi6}^(hmM8*0b_@ub-dIZ6fO&&?r!^=TqEh z;xR=)t4U|cfvQ)LgwR+bK?_s01*KJ}&Rl;VZf{dU?#tQDzBxNb_8LikudtO?1(qu# zZ#q8iVeSEOne#C7jIy@X88EOPv+(gUXBphQ+*!#SgagZ+TNKv{XPH%btJC@KyP31$ zr@wNZZCG`s8JZ6}*C{=}cK*)7oDRzmIZrQL^@L`lk>vtqE3Ld$H4R*Y-3)<)hnx2JnNZMW{kUn;S|-CwZtj>|Ev~xXANWp3p5&br%ou!_KoAqTGAfc^+YY z0w28XTzJCbgr)B|J1xo{)^*!v<_T!N$n}rRoyt!xa=BUdN}?Rz;@ZF#PiR&lrShtfzzgFx+9Gcb&@>^<8e`{{mW$!}3tbmk2w6j& z19+eplkdkaapgI3;cwZp7!HN35Y; zWc7b1Jc}=eHJe;P<*5OeG5PriY$Wf(s6ekoHV3#0Vu+5&TybWDw9;pvas7zg%T*FX z0=r?`i!O)J*gYaE9Y1r$n0h{)2y)}0#Mcqf==JNlRy7^ut05G^G&5FZ#-BJ7>|F6O zFumaVdbzKI9A_G51c8>i+-`AOrgvRj0=K>3y1x7)OBTt+45pznD^S+F==vq&MIEUU ztbWn}05RgN62-gAVDrGzU%2k|2i6@I?>zKmC}CRR_7_~sl&W94PA8{bXC+o-AYQg0 z|H?G}6;b}JKetCO9M`#51_Q6S+zUn%^;`m(a4fMjbM_dbG{cEnvKQrT$pS-5^D#H>_*5ou8IvwvCGt<+*XpiFjv+It0 z_Mt*Jz`8Ht_F`HgE~u5O{peY7yBXN%aQmR$;P%w``3+-ol3)y=Dm}C%Vl&nfqT1G7 zDSq+v4Tl`3V?8I9=A%=F;r&)>d{njO_gQ=Oj1_kW-fE}Nj6{Y{&!e;OMl=*$%V&Ha@yMp};JUuSVrx`eEa@Vo{ zd(2bLH@hEQ*ajxzMNQHpCRb0!gwaT7H40pezJt<3FFn*7?sTdT({`i!ds7BlcR&+` z9s}(38$45u%QpyHspL}MA`;?ps0Ih66Gc_vC!+WU4Vt_#x+-A}S7wku@KuJ!v-DqY zj-xUQ7)6P0QZ$O8GCBro;6lNkj$RM-e5ewXu_qQ*qhJb8!Wz>aFq<^+R=laaO09!)5q1C!Jb0)%yR(s8#=Wj4Jb|a-^9B_NhWZ4uaM1 z4wO=Xb9*%$V zf!!a7_K2exOyV2ZYUB#{G`Kt9g`k^b-d5fSy4x~CR~>I?G2GYYc1}HBKx`=Sn`uaa z`F%B`-9^l*4wbpKAinb?C#hB)$f^^LVEB*7QSBnxaSYbc*~G-EjOmX_l;R!<0IrRSwEhU|LIvCkd@mgXW2)|H%I7%h-tRGZ&&8ajTB;*+t!dJx z-#77gWP~Qm11jkQe)#l5n`g@R8eVJBNCXR!G4-QY?mW!|sUNL3rc+SVhzRjGt^t1p z8awnVX#|buMav3N1uPJnh7^SuzKxm+B`~O_ubU#D=vp)rgjuwybwm85G?q?L=$%Mw zNPGR1sBb3IbE>_DnnjhK8NlO8;}_%#O+7YaBo)(Xr)742dVLU)X^e}hCoMthE4yGs zM2HDK$w_J$r}e6&`ENevYG%L3K=m(OD?vKuDuO5L3LR6JjG!+E%46|3T>zrQ@Cfq> zqgjk8(0*!YRxwxrk~AJ&d`XQjovGpm_d z+rlvu;)lS%1!G8?XLxflf!J^`2b{rBR}d%?9!bCn5X=!msN7v80TLi*a)0;CtX8sY zY>M+$T|pJr&ZE2kNB`gd|6kAb4|VRj{i1z$Px9|a*mHg&vATu!jmI_gb3d+fyS<*! z!-psRdV}!H?k8@USW}IKj(}V5&q}SOxVXjd^|<^+0RiEypWb4AdW*2(BXqZm_U%He z(Tj*azYFax-XpB(m1LskJB9fAc-Ya4Ar{702E6{#7U3otWnmhNRvB-S; zL9`H|eQsL6AFZNCeuPTsn~$OpJ-8n&pikY4Jm`;zp8gQJhI$`FI(=gwT1*cSAkiw~eO0bNyN z);@_6NVtSveh}3O7f|5=Pl&!c83pL;2hr#v5>Ipc40Jt>%&wddi zr4c1dbXAnKw5UtFyHKcQ{w6V#O-n?T;)gA`$<++bS18&ad$46lvFKBI$%;& z(sF4XmLQhLNnGR(lS#KklCZw65K-R_p zd6dcMrP1b=tS2tezxR1O=)EcmSErdxswYrn#xC_Vl(^|=JdqyW;;Es{S9pre{X0D0 zwBC0AsLFm~!jl($=D2G;r|%T5po_!a`-H>hpTpiGF2SUi*L!P)o9V6f-b+k%ymy^j zxQ@Ou!TUqu_vYq_UXOdES4?q=_mTzXk8|Fgjl#pU^>*(>9_hBV`fvAsd!ISB&DXY0 zc!6g3`F?>u_S3$7zALD z9UVQ*AM#~HT}^H3qSL1N59Kc^r~5z5UzE=9pOwE@QgC5jIm5r|=faEh4L_bh-@Z&J zr#Fqqqs%&tH#Z4)&|7EV@rCAleFk2&*IfH9zGarVbW-u|LM)r z1+`kPK~9qr3so`OJAacDr;D0HUB)!rs7We>QR-4D(WUbDx{~b0t{(Pgmjtgu z$aSUUq!RFI;2yc>PVb|>ktc;W%Y|^wF&{=2VsA=@7}f&(@7cN*o_@aCodTo-`~ii! zC6xmS&ovs^n9_TRn13N5>LOt&O`|J6!=tC=ADqm{iBoXnx|V+tPD=HYFjHh*J6)x^ zYYYsCIFjkXN{hgpWMgkC-b><&tcxjGBi!Ib%S!Q7G8Gr~lp<@<)<)(m7(qm$<+|;c z6d5ipZ!yr0OB!V6=$w~b46mJeRu)cFujODZfO_j`O4p=xvI$ZzIK#$u4NyEml$_3P z$&e{DwzVXp>52*{FKTp|>KFG4fExc;5(hGq+$7~`c z4A8qLrRfsLgl!Y>4Qp#=ZCJ1d;mOOnv?kRg(2yl5$3}%F!Yq#+WmRqzI|^W^7}Kr6 zL*@rp*=bV4M@tVL)^Zux*fOMRurlO!Ze;k&QYPej_5dm>?hSM1tgKIkhmKj<6P8!}6FjoQ>8d`!1LKI)9oV+QNK z3OuT%l;>7IHeY;v)Fb0ew@^O2)cC9hyV|nxKr!}-i3H?Jt=bpB*qf2`UL^s);ejgk zI4T6!YJ^KND%vzVk+I06g55I?pBm^7}X1rYFw(P zG7@ME7}Ax|0XEjUbJ>R6AgNKj$g;QLR5HEbiwx{;=2)3bB~n_- zKv~hm_J~X}tn6VG7ZfpKIxVG%nvH{>xm`3XHsEb83^-9#d%(-GPP$b^OvJ$qHh#q{ zvqI$zyDtXg4QAL_H1fLOm^@&cKKySZYii9v9ED-Kv0XfZYA2+b@ztqjOEjpPt=jLv zjkcu%&X4dbwsJ;+AKUZk$0|PoZdi`dTDgV=2G83!@5U3-btF}dFXzUgy4p^SHm)5E zk3c?9!t>i%t<9?h7V*sOmT5@4WH=f3*eYA$rbS!yf^f?R9uK%q=6)v>SrHCt8}{P- zOjhr=tw=HwbP((Tw`M|0im<1N;f#vn63#;LfH#Q-=rx$|5S&a|d&X$XooG0!3qI>#O6Zz`1)1|t(R08 z0-J+2^u114L1y9ZvQnZ95-|{CxXpSBC-xGOm|*RP(;~yRwiMGemJ!$xj`f2(c7vGC zNyGrO=ux3nft{Cn1yVN*HEiv)=$wBqb(&34>zCubsv;{n4fM@gL!sA#5)4*= z@&m#Wz>rN~k8fgtELyX(VkbW>DSV3|qXY>&36@HbZZM_Mt4b2dK-I&c6(R|mO(>5E zbZxPGl+`m^Fi=08;W=As&hqv_2(;W_FWF#?b%2%+WrEtc$+7|gOK|t7Qi%K39%Ih{ z`-A>zcX5?rhYr_m1MFEC1*+Mo)CB9me>`swF-0Do8lo64fH9KQtwV{GB>hT`^okqu z`Oap%KCl(^vPuSQkc*{9_YPKb1h*YTBcs0ZKFR$CJgveaWL~yd#c_hL# z9aJ=eZ9l=2DR}|Ua0H83t8{>#vAnk|E`q+YK3GK$Jmx7~1q#}I42f|~7~FDkL3t~u zoWsMJ*~_kSt@}5CbZxh-Z*O*el8q z@CgZe73h0eLV>w~9t_$Dk|zNlKpTUX zz~Cw?A+uRoR)t;UfFq*{_y|zJ0RWbRR8>(SkbqwU>XFA#7M`mtM?s2b8*wmA@$gRf z!+Pxi^xShDYJXcc9*eW>YC~dHm4IbrmygQ*}BS@f{ob#*#DL8o0Y zcRJN2eq6wwHmy(9cONcq7}!u&Zf+>6Xg>#iSVDI#si-u6v#6p*pnubff^_(Na&_zxuUS?U0(5!U-&tFY)!>n^gV$dSyQoQY->}Vbkvljo=xYVGL5B`o_g>E z{j{@U%s|(L6=v6kfz5BacNGE0boq?RG4z*bS1zS1W>k9239XfvEztE#214OER2 zo~0iTRIN3aTv0`ejE@i=*TJb4NLc`vb9wM~Kz3d&0-AQGrNAg$tiyGe<)Z4U{0%D& zRr`1wcmEeoRPG9s;fqCC2~@aRWWkWt0)x6isH9E*DO4NF?e{Dp?DwpTa0?_j zClgqKX2ZOlwV`%}_)K&$wX`T(KH7Swr8$%;K)S=01!E{V6~d2Y;CvJ~`&2g+NXyaA zLh$YcU?FC_k}M-&KSX-wBnP}%3=m`O!#Qy9Qw}ReRUHE zoTb4{3s$*W0FkLd5u>fA)8@~8aF+CJFmz#|oA5R_9z9&Wm4NlBxPJ7`3E;8>gs{}* zoN-hOVW4sVqM&lDrga#NUwV`4m=l8Dz}FsQwaCn)^`)I32xE(#T@#JWqN!)f>x{8& zqP(88K)wQ5;8@5yhp|hmXya-}qJS+~cwPrs%#so|2UiZV5o9e^3pm!~IZP6Py0j80 ztAMR0jVJMF%K}`^=b(@LRTzgzJAJ+wjavt_=?aU-WE0toIQ4%|Y6Fz37W2sn<)Qm* zm@)Aq2$Nyfp|5->Tv+*naP))O9|={Jk)!TQ^iVB|<7gga=+uj}lDyKC(wTGT-S@@qsE9qn-k{KHd5M2tMDC8CWN90xO+ zj*l5UMDp9Jby8j=R$W-JW#O*@7mTUpz%#^ocn88&N0|?fHjcJjDfY7kyEJ%O?czD? zd12(iV6sr*lX05PeAW&lj~O4(;995}ZJbiDQebe=aFFePw!G6#GI^q{bqggRdF9<(}B)@4G42yg|5F&d^9)&FzQpnhW@DN?r|A{R2a6Yd5eyl(h3U623smWdM`lP%PTG zWq6bahDCjNZGMRnRsc#@ai+ClLIV<)@mElz=4%SZ2{OLpD<{?gbr`eGa}s<(bIOTz z!(!HX=759@bjYbC)(xh3zN{DsC zV%ARyu|5*1UTtgsHL3nJsm{rEr>==XCCon^_?lF=j}*@%Uz6(p&q(!e$?4R3i|B?{ z#rgs_#^`Q-L}8y8LzB+Vz>iP4+cgVR@HXWRXGIR>4#^GD2|q7yY=jeYIFqml_=VQe zdHx+La5#2=!)c4CkJ;@LtF-Lm*uG;|N-?O!bol;`9z5(08BilxXF{-^2F+;xkdYq& z*lg%XMV0+kPC>+5C7<<46sXU>b0d(bQ;bu!P=Q@S3u2#DC_o(<86e3xyTheQ!NZMk zB+5@)HIhgrF#A}5HF@XoY6oV5!(r<*n5n@BurJ8?iK}(kWuN~=k8^tPUC2Y|eYA9Ea1T9g2eiPs?ZG&W z?hKBj?`{vyri*q3Yw467!L8Q4Cw2rEnai&Wx^>fA7P{JR-c=i-Rp>tWJi9scGCg-{ zXr^$hnVT9K?=pWfEp%Hk+U=*kvqS63fq~6Jr4J`ZT~IX#TIoC9P=tOqJ5+tcx=?XN z8!mAk_ojM!12ft4G4wy?g~pIbQAbJ!!cfuz%W=8$8b3!)DQcjZy&9x5=Y(qMcP4~7 zX!YEXhCX-GYvzVF3#%!f7pfHs) Date: Fri, 11 Jun 2021 12:10:45 -0400 Subject: [PATCH 36/60] Skip implementation-less TF_InitKernel --- .../java/org/tensorflow/internal/c_api/presets/tensorflow.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java index b6924ff1dc4..6cb3be62eb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java @@ -383,7 +383,8 @@ public void map(InfoMap infoMap) { .put( new Info( "TF_ShapeInferenceContextDimValueKnown", - "TFE_NewTensorHandle(const tensorflow::Tensor&, TF_Status*)") + "TFE_NewTensorHandle(const tensorflow::Tensor&, TF_Status*)", + "TF_InitKernel") .skip()); } } From b997f12e0b54ff6175bd35af43061841c706685f Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Fri, 11 Jun 2021 15:03:57 -0400 Subject: [PATCH 37/60] Upgrade TF version in current snapshots --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 39ae3bbd5f5..2e0d071d7b6 100644 --- a/README.md +++ b/README.md @@ -146,7 +146,7 @@ This table shows the mapping between different version of TensorFlow for Java an | 0.2.0 | 2.3.1 | | 0.3.0 | 2.4.1 | | 0.3.1 | 2.4.1 | -| 0.4.0-SNAPSHOT | 2.4.1 +| 0.4.0-SNAPSHOT | 2.5.0 ## How to Contribute? From 031a0c16a2bffde7cdf8ed4e377dbf325b1de16c Mon Sep 17 00:00:00 2001 From: Adam Pocock Date: Fri, 11 Jun 2021 15:07:40 -0400 Subject: [PATCH 38/60] SavedModelBundle leak fix (#335) --- .../java/org/tensorflow/SavedModelBundle.java | 43 +++++------ .../internal/c_api/AbstractTF_Graph.java | 75 +++++++++++-------- 2 files changed, 62 insertions(+), 56 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java index 3a6433701e6..a60a0dd9b22 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java @@ -1,21 +1,20 @@ /* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow; -import static org.tensorflow.internal.c_api.global.tensorflow.TF_LoadSessionFromSavedModel; import static org.tensorflow.internal.c_api.global.tensorflow.TF_NewGraph; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetConfig; @@ -34,7 +33,6 @@ import java.util.Map.Entry; import java.util.stream.Collectors; import org.bytedeco.javacpp.BytePointer; -import org.bytedeco.javacpp.PointerPointer; import org.bytedeco.javacpp.PointerScope; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.c_api.TF_Buffer; @@ -510,21 +508,18 @@ private static SavedModelBundle load( TF_Graph graph = TF_NewGraph(); TF_Buffer metagraphDef = TF_Buffer.newBuffer(); TF_Session session = - TF_LoadSessionFromSavedModel( - opts, - runOpts, - new BytePointer(exportDir), - new PointerPointer(tags), - tags.length, - graph, - metagraphDef, - status); + TF_Session.loadSessionFromSavedModel( + opts, runOpts, exportDir, tags, graph, metagraphDef, status); status.throwExceptionIfNotOK(); // handle the result try { bundle = fromHandle(graph, session, MetaGraphDef.parseFrom(metagraphDef.dataAsByteBuffer())); + // Only retain the references if the metagraphdef parses correctly, + // otherwise allow the pointer scope to clean them up + graph.retainReference(); + session.retainReference(); } catch (InvalidProtocolBufferException e) { throw new TensorFlowException("Cannot parse MetaGraphDef protocol buffer", e); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/AbstractTF_Graph.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/AbstractTF_Graph.java index ffc371e95e7..3cc7624ab71 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/AbstractTF_Graph.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/AbstractTF_Graph.java @@ -1,19 +1,19 @@ /* - Copyright 2019 The TensorFlow Authors. All Rights Reserved. +Copyright 2019 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow.internal.c_api; @@ -25,29 +25,40 @@ @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) public abstract class AbstractTF_Graph extends Pointer { - protected static class DeleteDeallocator extends TF_Graph implements Pointer.Deallocator { - DeleteDeallocator(TF_Graph s) { super(s); } - @Override public void deallocate() { if (!isNull()) TF_DeleteGraph(this); setNull(); } + protected static class DeleteDeallocator extends TF_Graph implements Pointer.Deallocator { + DeleteDeallocator(TF_Graph s) { + super(s); } - public AbstractTF_Graph(Pointer p) { super(p); } - - /** - * Calls TF_NewGraph(), and registers a deallocator. - * @return TF_Graph created. Do not call TF_DeleteGraph() on it. - */ - public static TF_Graph newGraph() { - TF_Graph g = TF_NewGraph(); - if (g != null) { - g.deallocator(new DeleteDeallocator(g)); - } - return g; + @Override + public void deallocate() { + if (!isNull()) TF_DeleteGraph(this); + setNull(); } + } - /** - * Calls the deallocator, if registered, otherwise has no effect. - */ - public void delete() { - deallocate(); + public AbstractTF_Graph(Pointer p) { + super(p); + } + + /** + * Calls TF_NewGraph(), and registers a deallocator. + * + *

      Note {@link org.tensorflow.Graph} will call TF_DeleteGraph on close, so do not use this + * method when constructing a reference for use inside a {@code Graph} object. + * + * @return TF_Graph created. Do not call TF_DeleteGraph() on it. + */ + public static TF_Graph newGraph() { + TF_Graph g = TF_NewGraph(); + if (g != null) { + g.deallocator(new DeleteDeallocator(g)); } + return g; + } + + /** Calls the deallocator, if registered, otherwise has no effect. */ + public void delete() { + deallocate(); + } } From b38cc0466708ef6b4078fc3cfcd638c98e6f0c0d Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Tue, 15 Jun 2021 19:18:44 -0700 Subject: [PATCH 39/60] Use OP_NAME constant instead of hard coding (#328) --- .../tensorflow/op/audio/AudioSpectrogram.java | 2 +- .../org/tensorflow/op/audio/DecodeWav.java | 2 +- .../org/tensorflow/op/audio/EncodeWav.java | 2 +- .../java/org/tensorflow/op/audio/Mfcc.java | 2 +- .../org/tensorflow/op/bitwise/BitwiseAnd.java | 2 +- .../org/tensorflow/op/bitwise/BitwiseOr.java | 2 +- .../org/tensorflow/op/bitwise/BitwiseXor.java | 2 +- .../org/tensorflow/op/bitwise/Invert.java | 2 +- .../org/tensorflow/op/bitwise/LeftShift.java | 2 +- .../org/tensorflow/op/bitwise/RightShift.java | 2 +- .../op/cluster/KMC2ChainInitialization.java | 2 +- .../cluster/KmeansPlusPlusInitialization.java | 2 +- .../tensorflow/op/collective/AllReduce.java | 2 +- .../op/collective/BroadcastRecv.java | 2 +- .../op/collective/BroadcastSend.java | 2 +- .../org/tensorflow/op/collective/Gather.java | 2 +- .../tensorflow/op/collective/GatherV2.java | 2 +- .../org/tensorflow/op/collective/Reduce.java | 2 +- .../tensorflow/op/collective/ReduceV2.java | 2 +- .../java/org/tensorflow/op/core/Abort.java | 2 +- .../gen/java/org/tensorflow/op/core/All.java | 2 +- .../gen/java/org/tensorflow/op/core/Any.java | 2 +- .../org/tensorflow/op/core/AssertThat.java | 2 +- .../java/org/tensorflow/op/core/Assign.java | 2 +- .../org/tensorflow/op/core/AssignAdd.java | 2 +- .../op/core/AssignAddVariableOp.java | 2 +- .../org/tensorflow/op/core/AssignSub.java | 2 +- .../op/core/AssignSubVariableOp.java | 2 +- .../tensorflow/op/core/AssignVariableOp.java | 2 +- .../java/org/tensorflow/op/core/Barrier.java | 2 +- .../org/tensorflow/op/core/BarrierClose.java | 2 +- .../op/core/BarrierIncompleteSize.java | 2 +- .../tensorflow/op/core/BarrierInsertMany.java | 2 +- .../tensorflow/op/core/BarrierReadySize.java | 2 +- .../tensorflow/op/core/BarrierTakeMany.java | 2 +- .../java/org/tensorflow/op/core/Batch.java | 2 +- .../org/tensorflow/op/core/BatchToSpace.java | 2 +- .../tensorflow/op/core/BatchToSpaceNd.java | 2 +- .../java/org/tensorflow/op/core/Bitcast.java | 2 +- .../op/core/BroadcastDynamicShape.java | 2 +- .../op/core/BroadcastGradientArgs.java | 2 +- .../org/tensorflow/op/core/BroadcastTo.java | 2 +- .../org/tensorflow/op/core/Bucketize.java | 2 +- .../org/tensorflow/op/core/ClipByValue.java | 2 +- .../tensorflow/op/core/CollectiveGather.java | 2 +- .../java/org/tensorflow/op/core/Concat.java | 2 +- .../tensorflow/op/core/ConsumeMutexLock.java | 2 +- .../tensorflow/op/core/ControlTrigger.java | 2 +- .../gen/java/org/tensorflow/op/core/Copy.java | 2 +- .../java/org/tensorflow/op/core/CopyHost.java | 2 +- .../org/tensorflow/op/core/CountUpTo.java | 2 +- .../org/tensorflow/op/core/DecodeProto.java | 2 +- .../java/org/tensorflow/op/core/DeepCopy.java | 2 +- .../op/core/DeleteSessionTensor.java | 2 +- .../tensorflow/op/core/DestroyResourceOp.java | 2 +- .../op/core/DestroyTemporaryVariable.java | 2 +- .../org/tensorflow/op/core/DeviceIndex.java | 2 +- .../tensorflow/op/core/DummyMemoryCache.java | 2 +- .../tensorflow/op/core/DynamicPartition.java | 2 +- .../org/tensorflow/op/core/DynamicStitch.java | 2 +- .../org/tensorflow/op/core/EditDistance.java | 2 +- .../java/org/tensorflow/op/core/Empty.java | 2 +- .../tensorflow/op/core/EmptyTensorList.java | 2 +- .../tensorflow/op/core/EmptyTensorMap.java | 2 +- .../org/tensorflow/op/core/EncodeProto.java | 2 +- .../org/tensorflow/op/core/EnsureShape.java | 2 +- .../java/org/tensorflow/op/core/Enter.java | 2 +- .../gen/java/org/tensorflow/op/core/Exit.java | 2 +- .../org/tensorflow/op/core/ExpandDims.java | 2 +- .../op/core/ExtractVolumePatches.java | 2 +- .../gen/java/org/tensorflow/op/core/Fill.java | 2 +- .../org/tensorflow/op/core/Fingerprint.java | 2 +- .../java/org/tensorflow/op/core/Gather.java | 2 +- .../java/org/tensorflow/op/core/GatherNd.java | 2 +- .../tensorflow/op/core/GetSessionHandle.java | 2 +- .../tensorflow/op/core/GetSessionTensor.java | 2 +- .../tensorflow/op/core/GuaranteeConst.java | 2 +- .../org/tensorflow/op/core/HashTable.java | 2 +- .../op/core/HistogramFixedWidth.java | 2 +- .../java/org/tensorflow/op/core/Identity.java | 2 +- .../org/tensorflow/op/core/IdentityN.java | 2 +- .../tensorflow/op/core/ImmutableConst.java | 2 +- .../tensorflow/op/core/InitializeTable.java | 2 +- .../op/core/InitializeTableFromTextFile.java | 2 +- .../org/tensorflow/op/core/InplaceAdd.java | 2 +- .../org/tensorflow/op/core/InplaceSub.java | 2 +- .../org/tensorflow/op/core/InplaceUpdate.java | 2 +- .../op/core/IsVariableInitialized.java | 2 +- .../tensorflow/op/core/KthOrderStatistic.java | 2 +- .../java/org/tensorflow/op/core/LinSpace.java | 2 +- .../tensorflow/op/core/LookupTableExport.java | 2 +- .../tensorflow/op/core/LookupTableFind.java | 2 +- .../tensorflow/op/core/LookupTableImport.java | 2 +- .../tensorflow/op/core/LookupTableInsert.java | 2 +- .../tensorflow/op/core/LookupTableRemove.java | 2 +- .../tensorflow/op/core/LookupTableSize.java | 2 +- .../java/org/tensorflow/op/core/LoopCond.java | 2 +- .../org/tensorflow/op/core/LowerBound.java | 2 +- .../org/tensorflow/op/core/MakeUnique.java | 2 +- .../java/org/tensorflow/op/core/MapClear.java | 2 +- .../tensorflow/op/core/MapIncompleteSize.java | 2 +- .../java/org/tensorflow/op/core/MapPeek.java | 2 +- .../java/org/tensorflow/op/core/MapSize.java | 2 +- .../java/org/tensorflow/op/core/MapStage.java | 2 +- .../org/tensorflow/op/core/MapUnstage.java | 2 +- .../tensorflow/op/core/MapUnstageNoKey.java | 2 +- .../gen/java/org/tensorflow/op/core/Max.java | 2 +- .../java/org/tensorflow/op/core/Merge.java | 2 +- .../gen/java/org/tensorflow/op/core/Min.java | 2 +- .../org/tensorflow/op/core/MirrorPad.java | 2 +- .../org/tensorflow/op/core/MirrorPadGrad.java | 2 +- .../tensorflow/op/core/MlirPassthroughOp.java | 2 +- .../op/core/MutableDenseHashTable.java | 2 +- .../tensorflow/op/core/MutableHashTable.java | 2 +- .../op/core/MutableHashTableOfTensors.java | 2 +- .../java/org/tensorflow/op/core/Mutex.java | 2 +- .../org/tensorflow/op/core/MutexLock.java | 2 +- .../org/tensorflow/op/core/NcclAllReduce.java | 2 +- .../org/tensorflow/op/core/NcclBroadcast.java | 2 +- .../org/tensorflow/op/core/NcclReduce.java | 2 +- .../org/tensorflow/op/core/NextIteration.java | 2 +- .../gen/java/org/tensorflow/op/core/NoOp.java | 2 +- .../java/org/tensorflow/op/core/OneHot.java | 2 +- .../java/org/tensorflow/op/core/OnesLike.java | 2 +- .../tensorflow/op/core/OrderedMapClear.java | 2 +- .../op/core/OrderedMapIncompleteSize.java | 2 +- .../tensorflow/op/core/OrderedMapPeek.java | 2 +- .../tensorflow/op/core/OrderedMapSize.java | 2 +- .../tensorflow/op/core/OrderedMapStage.java | 2 +- .../tensorflow/op/core/OrderedMapUnstage.java | 2 +- .../op/core/OrderedMapUnstageNoKey.java | 2 +- .../gen/java/org/tensorflow/op/core/Pad.java | 2 +- .../tensorflow/op/core/ParallelConcat.java | 2 +- .../op/core/ParallelDynamicStitch.java | 2 +- .../org/tensorflow/op/core/Placeholder.java | 2 +- .../op/core/PlaceholderWithDefault.java | 2 +- .../java/org/tensorflow/op/core/Print.java | 2 +- .../gen/java/org/tensorflow/op/core/Prod.java | 2 +- .../tensorflow/op/core/QuantizedReshape.java | 2 +- .../java/org/tensorflow/op/core/Range.java | 2 +- .../gen/java/org/tensorflow/op/core/Rank.java | 2 +- .../tensorflow/op/core/ReadVariableOp.java | 2 +- .../gen/java/org/tensorflow/op/core/Recv.java | 2 +- .../org/tensorflow/op/core/ReduceAll.java | 2 +- .../org/tensorflow/op/core/ReduceAny.java | 2 +- .../org/tensorflow/op/core/ReduceMax.java | 2 +- .../org/tensorflow/op/core/ReduceMin.java | 2 +- .../org/tensorflow/op/core/ReduceProd.java | 2 +- .../org/tensorflow/op/core/ReduceSum.java | 2 +- .../java/org/tensorflow/op/core/RefEnter.java | 2 +- .../java/org/tensorflow/op/core/RefExit.java | 2 +- .../org/tensorflow/op/core/RefIdentity.java | 2 +- .../java/org/tensorflow/op/core/RefMerge.java | 2 +- .../tensorflow/op/core/RefNextIteration.java | 2 +- .../org/tensorflow/op/core/RefSelect.java | 2 +- .../org/tensorflow/op/core/RefSwitch.java | 2 +- .../java/org/tensorflow/op/core/Reshape.java | 2 +- .../tensorflow/op/core/ResourceCountUpTo.java | 2 +- .../tensorflow/op/core/ResourceGather.java | 2 +- .../tensorflow/op/core/ResourceGatherNd.java | 2 +- .../op/core/ResourceScatterAdd.java | 2 +- .../op/core/ResourceScatterDiv.java | 2 +- .../op/core/ResourceScatterMax.java | 2 +- .../op/core/ResourceScatterMin.java | 2 +- .../op/core/ResourceScatterMul.java | 2 +- .../op/core/ResourceScatterNdAdd.java | 2 +- .../op/core/ResourceScatterNdMax.java | 2 +- .../op/core/ResourceScatterNdMin.java | 2 +- .../op/core/ResourceScatterNdSub.java | 2 +- .../op/core/ResourceScatterNdUpdate.java | 2 +- .../op/core/ResourceScatterSub.java | 2 +- .../op/core/ResourceScatterUpdate.java | 2 +- .../op/core/ResourceStridedSliceAssign.java | 2 +- .../java/org/tensorflow/op/core/Reverse.java | 2 +- .../tensorflow/op/core/ReverseSequence.java | 2 +- .../gen/java/org/tensorflow/op/core/Roll.java | 2 +- .../org/tensorflow/op/core/ScatterAdd.java | 2 +- .../org/tensorflow/op/core/ScatterDiv.java | 2 +- .../org/tensorflow/op/core/ScatterMax.java | 2 +- .../org/tensorflow/op/core/ScatterMin.java | 2 +- .../org/tensorflow/op/core/ScatterMul.java | 2 +- .../org/tensorflow/op/core/ScatterNd.java | 2 +- .../org/tensorflow/op/core/ScatterNdAdd.java | 2 +- .../org/tensorflow/op/core/ScatterNdMax.java | 2 +- .../org/tensorflow/op/core/ScatterNdMin.java | 2 +- .../op/core/ScatterNdNonAliasingAdd.java | 2 +- .../org/tensorflow/op/core/ScatterNdSub.java | 2 +- .../tensorflow/op/core/ScatterNdUpdate.java | 2 +- .../org/tensorflow/op/core/ScatterSub.java | 2 +- .../org/tensorflow/op/core/ScatterUpdate.java | 2 +- .../java/org/tensorflow/op/core/Select.java | 2 +- .../gen/java/org/tensorflow/op/core/Send.java | 2 +- .../org/tensorflow/op/core/SetDiff1d.java | 2 +- .../java/org/tensorflow/op/core/SetSize.java | 2 +- .../java/org/tensorflow/op/core/Shape.java | 2 +- .../java/org/tensorflow/op/core/ShapeN.java | 2 +- .../gen/java/org/tensorflow/op/core/Size.java | 2 +- .../java/org/tensorflow/op/core/Skipgram.java | 2 +- .../java/org/tensorflow/op/core/Slice.java | 2 +- .../java/org/tensorflow/op/core/Snapshot.java | 2 +- .../tensorflow/op/core/SpaceToBatchNd.java | 2 +- .../java/org/tensorflow/op/core/Split.java | 2 +- .../java/org/tensorflow/op/core/SplitV.java | 2 +- .../java/org/tensorflow/op/core/Squeeze.java | 2 +- .../java/org/tensorflow/op/core/Stack.java | 2 +- .../java/org/tensorflow/op/core/Stage.java | 2 +- .../org/tensorflow/op/core/StageClear.java | 2 +- .../org/tensorflow/op/core/StagePeek.java | 2 +- .../org/tensorflow/op/core/StageSize.java | 2 +- .../org/tensorflow/op/core/StopGradient.java | 2 +- .../org/tensorflow/op/core/StridedSlice.java | 2 +- .../op/core/StridedSliceAssign.java | 2 +- .../tensorflow/op/core/StridedSliceGrad.java | 2 +- .../gen/java/org/tensorflow/op/core/Sum.java | 2 +- .../org/tensorflow/op/core/SwitchCond.java | 2 +- .../tensorflow/op/core/TemporaryVariable.java | 2 +- .../org/tensorflow/op/core/TensorArray.java | 2 +- .../tensorflow/op/core/TensorArrayClose.java | 2 +- .../tensorflow/op/core/TensorArrayConcat.java | 2 +- .../tensorflow/op/core/TensorArrayGather.java | 2 +- .../tensorflow/op/core/TensorArrayGrad.java | 2 +- .../op/core/TensorArrayGradWithShape.java | 2 +- .../tensorflow/op/core/TensorArrayPack.java | 2 +- .../tensorflow/op/core/TensorArrayRead.java | 2 +- .../op/core/TensorArrayScatter.java | 2 +- .../tensorflow/op/core/TensorArraySize.java | 2 +- .../tensorflow/op/core/TensorArraySplit.java | 2 +- .../tensorflow/op/core/TensorArrayUnpack.java | 2 +- .../tensorflow/op/core/TensorArrayWrite.java | 2 +- .../tensorflow/op/core/TensorListConcat.java | 2 +- .../op/core/TensorListConcatLists.java | 2 +- .../op/core/TensorListElementShape.java | 2 +- .../op/core/TensorListFromTensor.java | 2 +- .../tensorflow/op/core/TensorListGather.java | 2 +- .../tensorflow/op/core/TensorListGetItem.java | 2 +- .../tensorflow/op/core/TensorListLength.java | 2 +- .../tensorflow/op/core/TensorListPopBack.java | 2 +- .../op/core/TensorListPushBack.java | 2 +- .../op/core/TensorListPushBackBatch.java | 2 +- .../tensorflow/op/core/TensorListReserve.java | 2 +- .../tensorflow/op/core/TensorListResize.java | 2 +- .../tensorflow/op/core/TensorListScatter.java | 2 +- .../TensorListScatterIntoExistingList.java | 2 +- .../tensorflow/op/core/TensorListSetItem.java | 2 +- .../tensorflow/op/core/TensorListSplit.java | 2 +- .../tensorflow/op/core/TensorListStack.java | 2 +- .../tensorflow/op/core/TensorMapErase.java | 2 +- .../tensorflow/op/core/TensorMapHasKey.java | 2 +- .../tensorflow/op/core/TensorMapInsert.java | 2 +- .../tensorflow/op/core/TensorMapLookup.java | 2 +- .../org/tensorflow/op/core/TensorMapSize.java | 2 +- .../op/core/TensorMapStackKeys.java | 2 +- .../op/core/TensorScatterNdAdd.java | 2 +- .../op/core/TensorScatterNdMax.java | 2 +- .../op/core/TensorScatterNdMin.java | 2 +- .../op/core/TensorScatterNdSub.java | 2 +- .../op/core/TensorScatterNdUpdate.java | 2 +- .../op/core/TensorStridedSliceUpdate.java | 2 +- .../gen/java/org/tensorflow/op/core/Tile.java | 2 +- .../org/tensorflow/op/core/Timestamp.java | 2 +- .../org/tensorflow/op/core/TopKUnique.java | 2 +- .../tensorflow/op/core/TopKWithUnique.java | 2 +- .../java/org/tensorflow/op/core/Unbatch.java | 2 +- .../org/tensorflow/op/core/UnbatchGrad.java | 2 +- .../java/org/tensorflow/op/core/Unique.java | 2 +- .../tensorflow/op/core/UniqueWithCounts.java | 2 +- .../org/tensorflow/op/core/UnravelIndex.java | 2 +- .../java/org/tensorflow/op/core/Unstack.java | 2 +- .../java/org/tensorflow/op/core/Unstage.java | 2 +- .../org/tensorflow/op/core/UpperBound.java | 2 +- .../org/tensorflow/op/core/VarHandleOp.java | 2 +- .../op/core/VarIsInitializedOp.java | 2 +- .../java/org/tensorflow/op/core/Variable.java | 2 +- .../org/tensorflow/op/core/VariableShape.java | 2 +- .../java/org/tensorflow/op/core/Where.java | 2 +- .../org/tensorflow/op/core/XlaConvV2.java | 2 +- .../java/org/tensorflow/op/core/XlaDotV2.java | 2 +- .../op/core/XlaSetDynamicDimensionSize.java | 2 +- .../op/core/XlaSpmdFullToShardShape.java | 2 +- .../op/core/XlaSpmdShardToFullShape.java | 2 +- .../org/tensorflow/op/core/ZerosLike.java | 2 +- .../tensorflow/op/data/AnonymousIterator.java | 2 +- .../op/data/AnonymousMemoryCache.java | 2 +- .../op/data/AnonymousMultiDeviceIterator.java | 2 +- .../tensorflow/op/data/AssertNextDataset.java | 2 +- .../tensorflow/op/data/AutoShardDataset.java | 2 +- .../org/tensorflow/op/data/BatchDataset.java | 2 +- .../op/data/BytesProducedStatsDataset.java | 2 +- .../org/tensorflow/op/data/CSVDataset.java | 2 +- .../org/tensorflow/op/data/CSVDatasetV2.java | 2 +- .../org/tensorflow/op/data/CacheDataset.java | 2 +- .../tensorflow/op/data/CacheDatasetV2.java | 2 +- .../op/data/ChooseFastestDataset.java | 2 +- .../op/data/ConcatenateDataset.java | 2 +- .../op/data/DatasetCardinality.java | 2 +- .../tensorflow/op/data/DatasetFromGraph.java | 2 +- .../tensorflow/op/data/DatasetToGraph.java | 2 +- .../op/data/DatasetToSingleElement.java | 2 +- .../tensorflow/op/data/DatasetToTfRecord.java | 2 +- .../tensorflow/op/data/DeleteIterator.java | 2 +- .../tensorflow/op/data/DeleteMemoryCache.java | 2 +- .../op/data/DeleteMultiDeviceIterator.java | 2 +- .../op/data/DenseToSparseBatchDataset.java | 2 +- .../op/data/DeserializeIterator.java | 2 +- .../op/data/DirectedInterleaveDataset.java | 2 +- .../op/data/FilterByLastComponentDataset.java | 2 +- .../op/data/FixedLengthRecordDataset.java | 2 +- .../op/data/IgnoreErrorsDataset.java | 2 +- .../op/data/InitializeTableFromDataset.java | 2 +- .../java/org/tensorflow/op/data/Iterator.java | 2 +- .../op/data/IteratorFromStringHandle.java | 2 +- .../tensorflow/op/data/IteratorGetDevice.java | 2 +- .../tensorflow/op/data/IteratorGetNext.java | 2 +- .../op/data/IteratorGetNextAsOptional.java | 2 +- .../op/data/IteratorGetNextSync.java | 2 +- .../op/data/IteratorToStringHandle.java | 2 +- .../org/tensorflow/op/data/LMDBDataset.java | 2 +- .../op/data/LatencyStatsDataset.java | 2 +- .../org/tensorflow/op/data/LeakyReluGrad.java | 2 +- .../org/tensorflow/op/data/MakeIterator.java | 2 +- .../op/data/MatchingFilesDataset.java | 2 +- .../op/data/MaxIntraOpParallelismDataset.java | 2 +- .../org/tensorflow/op/data/ModelDataset.java | 2 +- .../op/data/MultiDeviceIterator.java | 2 +- .../MultiDeviceIteratorFromStringHandle.java | 2 +- .../MultiDeviceIteratorGetNextFromShard.java | 2 +- .../op/data/MultiDeviceIteratorInit.java | 2 +- .../MultiDeviceIteratorToStringHandle.java | 2 +- .../op/data/NonSerializableDataset.java | 2 +- .../tensorflow/op/data/OptimizeDataset.java | 2 +- .../tensorflow/op/data/OptimizeDatasetV2.java | 2 +- .../tensorflow/op/data/OptionalFromValue.java | 2 +- .../tensorflow/op/data/OptionalGetValue.java | 2 +- .../tensorflow/op/data/OptionalHasValue.java | 2 +- .../org/tensorflow/op/data/OptionalNone.java | 2 +- .../op/data/PaddedBatchDataset.java | 2 +- .../tensorflow/op/data/PrefetchDataset.java | 2 +- .../op/data/PrivateThreadPoolDataset.java | 2 +- .../org/tensorflow/op/data/RandomDataset.java | 2 +- .../org/tensorflow/op/data/RangeDataset.java | 2 +- .../tensorflow/op/data/RebatchDataset.java | 2 +- .../tensorflow/op/data/RebatchDatasetV2.java | 2 +- .../tensorflow/op/data/RegisterDataset.java | 2 +- .../org/tensorflow/op/data/RepeatDataset.java | 2 +- .../tensorflow/op/data/SamplingDataset.java | 2 +- .../tensorflow/op/data/SerializeIterator.java | 2 +- .../op/data/SetStatsAggregatorDataset.java | 2 +- .../org/tensorflow/op/data/ShardDataset.java | 2 +- .../op/data/ShuffleAndRepeatDataset.java | 2 +- .../tensorflow/op/data/ShuffleDataset.java | 2 +- .../org/tensorflow/op/data/SkipDataset.java | 2 +- .../org/tensorflow/op/data/SleepDataset.java | 2 +- .../op/data/SlidingWindowDataset.java | 2 +- .../op/data/SparseTensorSliceDataset.java | 2 +- .../org/tensorflow/op/data/SqlDataset.java | 2 +- .../op/data/StatsAggregatorHandle.java | 2 +- .../org/tensorflow/op/data/TakeDataset.java | 2 +- .../org/tensorflow/op/data/TensorDataset.java | 2 +- .../op/data/TensorSliceDataset.java | 2 +- .../tensorflow/op/data/TextLineDataset.java | 2 +- .../tensorflow/op/data/TfRecordDataset.java | 2 +- .../tensorflow/op/data/ThreadPoolDataset.java | 2 +- .../tensorflow/op/data/ThreadPoolHandle.java | 2 +- .../tensorflow/op/data/UnbatchDataset.java | 2 +- .../org/tensorflow/op/data/UniqueDataset.java | 2 +- .../op/data/UnwrapDatasetVariant.java | 2 +- .../org/tensorflow/op/data/WindowDataset.java | 2 +- .../op/data/WrapDatasetVariant.java | 2 +- .../org/tensorflow/op/data/ZipDataset.java | 2 +- .../AssertCardinalityDataset.java | 2 +- .../data/experimental/AssertNextDataset.java | 2 +- .../data/experimental/AutoShardDataset.java | 2 +- .../BytesProducedStatsDataset.java | 2 +- .../op/data/experimental/CSVDataset.java | 2 +- .../experimental/ChooseFastestDataset.java | 2 +- .../op/data/experimental/CompressElement.java | 2 +- .../data/experimental/DataServiceDataset.java | 2 +- .../data/experimental/DatasetCardinality.java | 2 +- .../data/experimental/DatasetToTFRecord.java | 2 +- .../DenseToSparseBatchDataset.java | 2 +- .../DirectedInterleaveDataset.java | 2 +- .../experimental/DummyIterationCounter.java | 2 +- .../experimental/IgnoreErrorsDataset.java | 2 +- .../data/experimental/IteratorGetDevice.java | 2 +- .../experimental/LatencyStatsDataset.java | 2 +- .../op/data/experimental/LmdbDataset.java | 2 +- .../experimental/MatchingFilesDataset.java | 2 +- .../MaxIntraOpParallelismDataset.java | 2 +- .../experimental/NonSerializableDataset.java | 2 +- .../experimental/ParseExampleDataset.java | 2 +- .../PrivateThreadPoolDataset.java | 2 +- .../op/data/experimental/RandomDataset.java | 2 +- .../op/data/experimental/RebatchDataset.java | 2 +- .../SetStatsAggregatorDataset.java | 2 +- .../op/data/experimental/SleepDataset.java | 2 +- .../experimental/SlidingWindowDataset.java | 2 +- .../op/data/experimental/SqlDataset.java | 2 +- .../experimental/StatsAggregatorHandle.java | 2 +- .../StatsAggregatorSetSummaryWriter.java | 2 +- .../experimental/StatsAggregatorSummary.java | 2 +- .../data/experimental/ThreadPoolDataset.java | 2 +- .../data/experimental/ThreadPoolHandle.java | 2 +- .../op/data/experimental/UnbatchDataset.java | 2 +- .../data/experimental/UncompressElement.java | 2 +- .../op/data/experimental/UniqueDataset.java | 2 +- .../op/debugging/CheckNumerics.java | 2 +- .../op/debugging/DebugGradientIdentity.java | 2 +- .../debugging/DebugGradientRefIdentity.java | 2 +- .../op/debugging/DebugIdentity.java | 2 +- .../op/debugging/DebugNanCount.java | 2 +- .../op/debugging/DebugNumericsSummary.java | 2 +- .../op/distribute/NcclAllReduce.java | 2 +- .../op/distribute/NcclBroadcast.java | 2 +- .../tensorflow/op/distribute/NcclReduce.java | 2 +- .../org/tensorflow/op/dtypes/AsString.java | 2 +- .../java/org/tensorflow/op/dtypes/Cast.java | 2 +- .../org/tensorflow/op/dtypes/Complex.java | 2 +- .../java/org/tensorflow/op/dtypes/ToBool.java | 2 +- .../estimator/BoostedTreesAggregateStats.java | 2 +- .../op/estimator/BoostedTreesBucketize.java | 2 +- ...BoostedTreesCalculateBestFeatureSplit.java | 2 +- ...ostedTreesCalculateBestFeatureSplitV2.java | 2 +- ...stedTreesCalculateBestGainsPerFeature.java | 2 +- .../op/estimator/BoostedTreesCenterBias.java | 2 +- .../estimator/BoostedTreesCreateEnsemble.java | 2 +- ...stedTreesCreateQuantileStreamResource.java | 2 +- .../BoostedTreesDeserializeEnsemble.java | 2 +- .../BoostedTreesEnsembleResourceHandleOp.java | 2 +- .../BoostedTreesExampleDebugOutputs.java | 2 +- .../BoostedTreesFlushQuantileSummaries.java | 2 +- .../BoostedTreesGetEnsembleStates.java | 2 +- .../BoostedTreesMakeQuantileSummaries.java | 2 +- .../BoostedTreesMakeStatsSummary.java | 2 +- .../op/estimator/BoostedTreesPredict.java | 2 +- ...eesQuantileStreamResourceAddSummaries.java | 2 +- ...reesQuantileStreamResourceDeserialize.java | 2 +- ...ostedTreesQuantileStreamResourceFlush.java | 2 +- ...tileStreamResourceGetBucketBoundaries.java | 2 +- ...edTreesQuantileStreamResourceHandleOp.java | 2 +- .../BoostedTreesSerializeEnsemble.java | 2 +- .../BoostedTreesSparseAggregateStats.java | 2 +- ...dTreesSparseCalculateBestFeatureSplit.java | 2 +- .../BoostedTreesTrainingPredict.java | 2 +- .../estimator/BoostedTreesUpdateEnsemble.java | 2 +- .../BoostedTreesUpdateEnsembleV2.java | 2 +- .../IsBoostedTreesEnsembleInitialized.java | 2 +- ...reesQuantileStreamResourceInitialized.java | 2 +- .../tensorflow/op/image/AdjustContrast.java | 2 +- .../org/tensorflow/op/image/AdjustHue.java | 2 +- .../tensorflow/op/image/AdjustSaturation.java | 2 +- .../op/image/CombinedNonMaxSuppression.java | 2 +- .../tensorflow/op/image/CropAndResize.java | 2 +- .../op/image/CropAndResizeGradBoxes.java | 2 +- .../op/image/CropAndResizeGradImage.java | 2 +- .../op/image/DecodeAndCropJpeg.java | 2 +- .../org/tensorflow/op/image/DecodeBmp.java | 2 +- .../org/tensorflow/op/image/DecodeGif.java | 2 +- .../org/tensorflow/op/image/DecodeImage.java | 2 +- .../org/tensorflow/op/image/DecodeJpeg.java | 2 +- .../org/tensorflow/op/image/DecodePng.java | 2 +- .../op/image/DrawBoundingBoxes.java | 2 +- .../org/tensorflow/op/image/EncodeJpeg.java | 2 +- .../op/image/EncodeJpegVariableQuality.java | 2 +- .../org/tensorflow/op/image/EncodePng.java | 2 +- .../tensorflow/op/image/ExtractGlimpse.java | 2 +- .../op/image/ExtractImagePatches.java | 2 +- .../tensorflow/op/image/ExtractJpegShape.java | 2 +- .../image/GenerateBoundingBoxProposals.java | 2 +- .../org/tensorflow/op/image/HsvToRgb.java | 2 +- .../op/image/ImageProjectiveTransformV2.java | 2 +- .../op/image/ImageProjectiveTransformV3.java | 2 +- .../tensorflow/op/image/NearestNeighbors.java | 2 +- .../op/image/NonMaxSuppression.java | 2 +- .../image/NonMaxSuppressionWithOverlaps.java | 2 +- .../op/image/QuantizedResizeBilinear.java | 2 +- .../org/tensorflow/op/image/RandomCrop.java | 2 +- .../org/tensorflow/op/image/ResizeArea.java | 2 +- .../tensorflow/op/image/ResizeBicubic.java | 2 +- .../op/image/ResizeBicubicGrad.java | 2 +- .../tensorflow/op/image/ResizeBilinear.java | 2 +- .../op/image/ResizeBilinearGrad.java | 2 +- .../op/image/ResizeNearestNeighbor.java | 2 +- .../op/image/ResizeNearestNeighborGrad.java | 2 +- .../org/tensorflow/op/image/RgbToHsv.java | 2 +- .../op/image/SampleDistortedBoundingBox.java | 2 +- .../op/image/ScaleAndTranslate.java | 2 +- .../op/image/ScaleAndTranslateGrad.java | 2 +- .../StatelessSampleDistortedBoundingBox.java | 2 +- .../org/tensorflow/op/io/DecodeBase64.java | 2 +- .../tensorflow/op/io/DecodeCompressed.java | 2 +- .../java/org/tensorflow/op/io/DecodeCsv.java | 2 +- .../tensorflow/op/io/DecodeJsonExample.java | 2 +- .../org/tensorflow/op/io/DecodePaddedRaw.java | 2 +- .../java/org/tensorflow/op/io/DecodeRaw.java | 2 +- .../op/io/DeserializeManySparse.java | 2 +- .../org/tensorflow/op/io/EncodeBase64.java | 2 +- .../java/org/tensorflow/op/io/FifoQueue.java | 2 +- .../op/io/FixedLengthRecordReader.java | 2 +- .../org/tensorflow/op/io/IdentityReader.java | 2 +- .../java/org/tensorflow/op/io/LmdbReader.java | 2 +- .../org/tensorflow/op/io/MatchingFiles.java | 2 +- .../tensorflow/op/io/PaddingFifoQueue.java | 2 +- .../org/tensorflow/op/io/ParseExample.java | 2 +- .../op/io/ParseSequenceExample.java | 2 +- .../tensorflow/op/io/ParseSingleExample.java | 2 +- .../op/io/ParseSingleSequenceExample.java | 2 +- .../org/tensorflow/op/io/ParseTensor.java | 2 +- .../org/tensorflow/op/io/PriorityQueue.java | 2 +- .../java/org/tensorflow/op/io/QueueClose.java | 2 +- .../org/tensorflow/op/io/QueueDequeue.java | 2 +- .../tensorflow/op/io/QueueDequeueMany.java | 2 +- .../tensorflow/op/io/QueueDequeueUpTo.java | 2 +- .../org/tensorflow/op/io/QueueEnqueue.java | 2 +- .../tensorflow/op/io/QueueEnqueueMany.java | 2 +- .../org/tensorflow/op/io/QueueIsClosed.java | 2 +- .../java/org/tensorflow/op/io/QueueSize.java | 2 +- .../tensorflow/op/io/RandomShuffleQueue.java | 2 +- .../java/org/tensorflow/op/io/ReadFile.java | 2 +- .../op/io/ReaderNumRecordsProduced.java | 2 +- .../op/io/ReaderNumWorkUnitsCompleted.java | 2 +- .../java/org/tensorflow/op/io/ReaderRead.java | 2 +- .../org/tensorflow/op/io/ReaderReadUpTo.java | 2 +- .../org/tensorflow/op/io/ReaderReset.java | 2 +- .../tensorflow/op/io/ReaderRestoreState.java | 2 +- .../op/io/ReaderSerializeState.java | 2 +- .../tensorflow/op/io/SerializeManySparse.java | 2 +- .../org/tensorflow/op/io/SerializeSparse.java | 2 +- .../org/tensorflow/op/io/SerializeTensor.java | 2 +- .../org/tensorflow/op/io/ShardedFilename.java | 2 +- .../org/tensorflow/op/io/ShardedFilespec.java | 2 +- .../org/tensorflow/op/io/TextLineReader.java | 2 +- .../org/tensorflow/op/io/TfRecordReader.java | 2 +- .../org/tensorflow/op/io/WholeFileReader.java | 2 +- .../java/org/tensorflow/op/io/WriteFile.java | 2 +- .../org/tensorflow/op/linalg/BandPart.java | 2 +- .../op/linalg/BandedTriangularSolve.java | 2 +- .../tensorflow/op/linalg/BatchCholesky.java | 2 +- .../op/linalg/BatchCholeskyGrad.java | 2 +- .../op/linalg/BatchMatrixBandPart.java | 2 +- .../op/linalg/BatchMatrixDeterminant.java | 2 +- .../tensorflow/op/linalg/BatchMatrixDiag.java | 2 +- .../op/linalg/BatchMatrixDiagPart.java | 2 +- .../op/linalg/BatchMatrixInverse.java | 2 +- .../op/linalg/BatchMatrixSetDiag.java | 2 +- .../op/linalg/BatchMatrixSolve.java | 2 +- .../op/linalg/BatchMatrixSolveLs.java | 2 +- .../op/linalg/BatchMatrixTriangularSolve.java | 2 +- .../op/linalg/BatchSelfAdjointEig.java | 2 +- .../org/tensorflow/op/linalg/BatchSvd.java | 2 +- .../org/tensorflow/op/linalg/Cholesky.java | 2 +- .../tensorflow/op/linalg/CholeskyGrad.java | 2 +- .../op/linalg/ConjugateTranspose.java | 2 +- .../java/org/tensorflow/op/linalg/Cross.java | 2 +- .../java/org/tensorflow/op/linalg/Det.java | 2 +- .../java/org/tensorflow/op/linalg/Eig.java | 2 +- .../java/org/tensorflow/op/linalg/Einsum.java | 2 +- .../tensorflow/op/linalg/EuclideanNorm.java | 2 +- .../java/org/tensorflow/op/linalg/Inv.java | 2 +- .../op/linalg/LoadAndRemapMatrix.java | 2 +- .../op/linalg/LogMatrixDeterminant.java | 2 +- .../gen/java/org/tensorflow/op/linalg/Lu.java | 2 +- .../java/org/tensorflow/op/linalg/MatMul.java | 2 +- .../org/tensorflow/op/linalg/MatrixDiag.java | 2 +- .../tensorflow/op/linalg/MatrixDiagPart.java | 2 +- .../op/linalg/MatrixDiagPartV3.java | 2 +- .../tensorflow/op/linalg/MatrixDiagV3.java | 2 +- .../tensorflow/op/linalg/MatrixLogarithm.java | 2 +- .../tensorflow/op/linalg/MatrixSetDiag.java | 2 +- .../tensorflow/op/linalg/MatrixSolveLs.java | 2 +- .../gen/java/org/tensorflow/op/linalg/Qr.java | 2 +- .../tensorflow/op/linalg/QuantizedMatMul.java | 2 +- .../op/linalg/QuantizedMatMulWithBias.java | 2 +- .../QuantizedMatMulWithBiasAndRelu.java | 2 +- ...zedMatMulWithBiasAndReluAndRequantize.java | 2 +- .../tensorflow/op/linalg/SelfAdjointEig.java | 2 +- .../java/org/tensorflow/op/linalg/Solve.java | 2 +- .../java/org/tensorflow/op/linalg/Sqrtm.java | 2 +- .../java/org/tensorflow/op/linalg/Svd.java | 2 +- .../org/tensorflow/op/linalg/TensorDiag.java | 2 +- .../tensorflow/op/linalg/TensorDiagPart.java | 2 +- .../org/tensorflow/op/linalg/Transpose.java | 2 +- .../tensorflow/op/linalg/TriangularSolve.java | 2 +- .../op/linalg/TridiagonalMatMul.java | 2 +- .../op/linalg/TridiagonalSolve.java | 2 +- .../sparse/CSRSparseMatrixComponents.java | 2 +- .../linalg/sparse/CSRSparseMatrixToDense.java | 2 +- .../sparse/CSRSparseMatrixToSparseTensor.java | 2 +- .../linalg/sparse/DenseToCSRSparseMatrix.java | 2 +- .../op/linalg/sparse/SparseMatrixAdd.java | 2 +- .../op/linalg/sparse/SparseMatrixMatMul.java | 2 +- .../op/linalg/sparse/SparseMatrixMul.java | 2 +- .../op/linalg/sparse/SparseMatrixNNZ.java | 2 +- .../sparse/SparseMatrixOrderingAMD.java | 2 +- .../op/linalg/sparse/SparseMatrixSoftmax.java | 2 +- .../sparse/SparseMatrixSoftmaxGrad.java | 2 +- .../sparse/SparseMatrixSparseCholesky.java | 2 +- .../sparse/SparseMatrixSparseMatMul.java | 2 +- .../linalg/sparse/SparseMatrixTranspose.java | 2 +- .../op/linalg/sparse/SparseMatrixZeros.java | 2 +- .../sparse/SparseTensorToCSRSparseMatrix.java | 2 +- .../gen/java/org/tensorflow/op/math/Abs.java | 2 +- .../org/tensorflow/op/math/AccumulateN.java | 2 +- .../gen/java/org/tensorflow/op/math/Acos.java | 2 +- .../java/org/tensorflow/op/math/Acosh.java | 2 +- .../gen/java/org/tensorflow/op/math/Add.java | 2 +- .../gen/java/org/tensorflow/op/math/AddN.java | 2 +- .../java/org/tensorflow/op/math/Angle.java | 2 +- .../tensorflow/op/math/ApproximateEqual.java | 2 +- .../java/org/tensorflow/op/math/ArgMax.java | 2 +- .../java/org/tensorflow/op/math/ArgMin.java | 2 +- .../gen/java/org/tensorflow/op/math/Asin.java | 2 +- .../java/org/tensorflow/op/math/Asinh.java | 2 +- .../gen/java/org/tensorflow/op/math/Atan.java | 2 +- .../java/org/tensorflow/op/math/Atan2.java | 2 +- .../java/org/tensorflow/op/math/Atanh.java | 2 +- .../java/org/tensorflow/op/math/BesselI0.java | 2 +- .../org/tensorflow/op/math/BesselI0e.java | 2 +- .../java/org/tensorflow/op/math/BesselI1.java | 2 +- .../org/tensorflow/op/math/BesselI1e.java | 2 +- .../java/org/tensorflow/op/math/Betainc.java | 2 +- .../java/org/tensorflow/op/math/Bincount.java | 2 +- .../gen/java/org/tensorflow/op/math/Ceil.java | 2 +- .../tensorflow/op/math/CompareAndBitpack.java | 2 +- .../org/tensorflow/op/math/ComplexAbs.java | 2 +- .../gen/java/org/tensorflow/op/math/Conj.java | 2 +- .../gen/java/org/tensorflow/op/math/Cos.java | 2 +- .../gen/java/org/tensorflow/op/math/Cosh.java | 2 +- .../java/org/tensorflow/op/math/Cumprod.java | 2 +- .../java/org/tensorflow/op/math/Cumsum.java | 2 +- .../op/math/CumulativeLogsumexp.java | 2 +- .../org/tensorflow/op/math/DenseBincount.java | 2 +- .../java/org/tensorflow/op/math/Digamma.java | 2 +- .../gen/java/org/tensorflow/op/math/Div.java | 2 +- .../java/org/tensorflow/op/math/DivNoNan.java | 2 +- .../java/org/tensorflow/op/math/Equal.java | 2 +- .../gen/java/org/tensorflow/op/math/Erf.java | 2 +- .../gen/java/org/tensorflow/op/math/Erfc.java | 2 +- .../gen/java/org/tensorflow/op/math/Exp.java | 2 +- .../java/org/tensorflow/op/math/Expm1.java | 2 +- .../gen/java/org/tensorflow/op/math/Fact.java | 2 +- .../java/org/tensorflow/op/math/Floor.java | 2 +- .../java/org/tensorflow/op/math/FloorDiv.java | 2 +- .../java/org/tensorflow/op/math/FloorMod.java | 2 +- .../java/org/tensorflow/op/math/Greater.java | 2 +- .../org/tensorflow/op/math/GreaterEqual.java | 2 +- .../java/org/tensorflow/op/math/Igamma.java | 2 +- .../org/tensorflow/op/math/IgammaGradA.java | 2 +- .../java/org/tensorflow/op/math/Igammac.java | 2 +- .../gen/java/org/tensorflow/op/math/Imag.java | 2 +- .../tensorflow/op/math/InvertPermutation.java | 2 +- .../java/org/tensorflow/op/math/IsFinite.java | 2 +- .../java/org/tensorflow/op/math/IsInf.java | 2 +- .../java/org/tensorflow/op/math/IsNan.java | 2 +- .../gen/java/org/tensorflow/op/math/Less.java | 2 +- .../org/tensorflow/op/math/LessEqual.java | 2 +- .../java/org/tensorflow/op/math/Lgamma.java | 2 +- .../gen/java/org/tensorflow/op/math/Log.java | 2 +- .../java/org/tensorflow/op/math/Log1p.java | 2 +- .../org/tensorflow/op/math/LogicalAnd.java | 2 +- .../org/tensorflow/op/math/LogicalNot.java | 2 +- .../org/tensorflow/op/math/LogicalOr.java | 2 +- .../java/org/tensorflow/op/math/Maximum.java | 2 +- .../gen/java/org/tensorflow/op/math/Mean.java | 2 +- .../java/org/tensorflow/op/math/Minimum.java | 2 +- .../gen/java/org/tensorflow/op/math/Mod.java | 2 +- .../gen/java/org/tensorflow/op/math/Mul.java | 2 +- .../java/org/tensorflow/op/math/MulNoNan.java | 2 +- .../java/org/tensorflow/op/math/Ndtri.java | 2 +- .../gen/java/org/tensorflow/op/math/Neg.java | 2 +- .../org/tensorflow/op/math/NextAfter.java | 2 +- .../java/org/tensorflow/op/math/NotEqual.java | 2 +- .../org/tensorflow/op/math/Polygamma.java | 2 +- .../tensorflow/op/math/PopulationCount.java | 2 +- .../gen/java/org/tensorflow/op/math/Pow.java | 2 +- .../org/tensorflow/op/math/QuantizedAdd.java | 2 +- .../org/tensorflow/op/math/QuantizedMul.java | 2 +- .../gen/java/org/tensorflow/op/math/Real.java | 2 +- .../java/org/tensorflow/op/math/RealDiv.java | 2 +- .../org/tensorflow/op/math/Reciprocal.java | 2 +- .../tensorflow/op/math/ReciprocalGrad.java | 2 +- .../math/RequantizationRangePerChannel.java | 2 +- .../op/math/RequantizePerChannel.java | 2 +- .../gen/java/org/tensorflow/op/math/Rint.java | 2 +- .../java/org/tensorflow/op/math/Round.java | 2 +- .../java/org/tensorflow/op/math/Rsqrt.java | 2 +- .../org/tensorflow/op/math/RsqrtGrad.java | 2 +- .../org/tensorflow/op/math/SegmentMax.java | 2 +- .../org/tensorflow/op/math/SegmentMean.java | 2 +- .../org/tensorflow/op/math/SegmentMin.java | 2 +- .../org/tensorflow/op/math/SegmentProd.java | 2 +- .../org/tensorflow/op/math/SegmentSum.java | 2 +- .../java/org/tensorflow/op/math/Sigmoid.java | 2 +- .../org/tensorflow/op/math/SigmoidGrad.java | 2 +- .../gen/java/org/tensorflow/op/math/Sign.java | 2 +- .../gen/java/org/tensorflow/op/math/Sin.java | 2 +- .../gen/java/org/tensorflow/op/math/Sinh.java | 2 +- .../org/tensorflow/op/math/SobolSample.java | 2 +- .../java/org/tensorflow/op/math/Softplus.java | 2 +- .../org/tensorflow/op/math/SoftplusGrad.java | 2 +- .../gen/java/org/tensorflow/op/math/Sqrt.java | 2 +- .../java/org/tensorflow/op/math/SqrtGrad.java | 2 +- .../java/org/tensorflow/op/math/Square.java | 2 +- .../tensorflow/op/math/SquaredDifference.java | 2 +- .../gen/java/org/tensorflow/op/math/Sub.java | 2 +- .../gen/java/org/tensorflow/op/math/Tan.java | 2 +- .../gen/java/org/tensorflow/op/math/Tanh.java | 2 +- .../java/org/tensorflow/op/math/TanhGrad.java | 2 +- .../org/tensorflow/op/math/TruncateDiv.java | 2 +- .../org/tensorflow/op/math/TruncateMod.java | 2 +- .../op/math/UnsortedSegmentMax.java | 2 +- .../op/math/UnsortedSegmentMin.java | 2 +- .../op/math/UnsortedSegmentProd.java | 2 +- .../op/math/UnsortedSegmentSum.java | 2 +- .../java/org/tensorflow/op/math/Xdivy.java | 2 +- .../java/org/tensorflow/op/math/Xlog1py.java | 2 +- .../java/org/tensorflow/op/math/Xlogy.java | 2 +- .../gen/java/org/tensorflow/op/math/Zeta.java | 2 +- .../java/org/tensorflow/op/math/erfinv.java | 2 +- .../tensorflow/op/math/special/BesselJ0.java | 2 +- .../tensorflow/op/math/special/BesselJ1.java | 2 +- .../tensorflow/op/math/special/BesselK0.java | 2 +- .../tensorflow/op/math/special/BesselK0e.java | 2 +- .../tensorflow/op/math/special/BesselK1.java | 2 +- .../tensorflow/op/math/special/BesselK1e.java | 2 +- .../tensorflow/op/math/special/BesselY0.java | 2 +- .../tensorflow/op/math/special/BesselY1.java | 2 +- .../org/tensorflow/op/math/special/Dawsn.java | 2 +- .../tensorflow/op/math/special/Expint.java | 2 +- .../op/math/special/FresnelCos.java | 2 +- .../op/math/special/FresnelSin.java | 2 +- .../tensorflow/op/math/special/Spence.java | 2 +- .../java/org/tensorflow/op/nn/AvgPool.java | 2 +- .../java/org/tensorflow/op/nn/AvgPool3d.java | 2 +- .../org/tensorflow/op/nn/AvgPool3dGrad.java | 2 +- .../org/tensorflow/op/nn/AvgPoolGrad.java | 2 +- .../nn/BatchNormWithGlobalNormalization.java | 2 +- .../BatchNormWithGlobalNormalizationGrad.java | 2 +- .../java/org/tensorflow/op/nn/BiasAdd.java | 2 +- .../org/tensorflow/op/nn/BiasAddGrad.java | 2 +- .../java/org/tensorflow/op/nn/BlockLSTM.java | 2 +- .../org/tensorflow/op/nn/BlockLSTMGrad.java | 2 +- .../java/org/tensorflow/op/nn/CTCLossV2.java | 2 +- .../op/nn/ComputeAccidentalHits.java | 2 +- .../gen/java/org/tensorflow/op/nn/Conv2d.java | 2 +- .../op/nn/Conv2dBackpropFilter.java | 2 +- .../tensorflow/op/nn/Conv2dBackpropInput.java | 2 +- .../gen/java/org/tensorflow/op/nn/Conv3d.java | 2 +- .../op/nn/Conv3dBackpropFilter.java | 2 +- .../tensorflow/op/nn/Conv3dBackpropInput.java | 2 +- .../op/nn/CtcBeamSearchDecoder.java | 2 +- .../tensorflow/op/nn/CtcGreedyDecoder.java | 2 +- .../java/org/tensorflow/op/nn/CtcLoss.java | 2 +- .../java/org/tensorflow/op/nn/CudnnRNN.java | 2 +- .../tensorflow/op/nn/CudnnRNNBackprop.java | 2 +- .../op/nn/CudnnRNNCanonicalToParams.java | 2 +- .../op/nn/CudnnRNNParamsToCanonical.java | 2 +- .../tensorflow/op/nn/CudnnRnnParamsSize.java | 2 +- .../tensorflow/op/nn/DataFormatDimMap.java | 2 +- .../op/nn/DataFormatVecPermute.java | 2 +- .../org/tensorflow/op/nn/DepthToSpace.java | 2 +- .../op/nn/DepthwiseConv2dNative.java | 2 +- .../DepthwiseConv2dNativeBackpropFilter.java | 2 +- .../DepthwiseConv2dNativeBackpropInput.java | 2 +- .../java/org/tensorflow/op/nn/Dilation2d.java | 2 +- .../op/nn/Dilation2dBackpropFilter.java | 2 +- .../op/nn/Dilation2dBackpropInput.java | 2 +- .../gen/java/org/tensorflow/op/nn/Elu.java | 2 +- .../java/org/tensorflow/op/nn/EluGrad.java | 2 +- .../op/nn/FixedUnigramCandidateSampler.java | 2 +- .../tensorflow/op/nn/FractionalAvgPool.java | 2 +- .../op/nn/FractionalAvgPoolGrad.java | 2 +- .../tensorflow/op/nn/FractionalMaxPool.java | 2 +- .../op/nn/FractionalMaxPoolGrad.java | 2 +- .../org/tensorflow/op/nn/FusedBatchNorm.java | 2 +- .../tensorflow/op/nn/FusedBatchNormGrad.java | 2 +- .../org/tensorflow/op/nn/FusedPadConv2d.java | 2 +- .../op/nn/FusedResizeAndPadConv2d.java | 2 +- .../org/tensorflow/op/nn/GRUBlockCell.java | 2 +- .../tensorflow/op/nn/GRUBlockCellGrad.java | 2 +- .../gen/java/org/tensorflow/op/nn/InTopK.java | 2 +- .../java/org/tensorflow/op/nn/InvGrad.java | 2 +- .../tensorflow/op/nn/IsotonicRegression.java | 2 +- .../gen/java/org/tensorflow/op/nn/L2Loss.java | 2 +- .../org/tensorflow/op/nn/LSTMBlockCell.java | 2 +- .../tensorflow/op/nn/LSTMBlockCellGrad.java | 2 +- .../java/org/tensorflow/op/nn/LeakyRelu.java | 2 +- .../op/nn/LearnedUnigramCandidateSampler.java | 2 +- .../op/nn/LocalResponseNormalization.java | 2 +- .../op/nn/LocalResponseNormalizationGrad.java | 2 +- .../java/org/tensorflow/op/nn/LogSoftmax.java | 2 +- .../java/org/tensorflow/op/nn/MaxPool.java | 2 +- .../java/org/tensorflow/op/nn/MaxPool3d.java | 2 +- .../org/tensorflow/op/nn/MaxPool3dGrad.java | 2 +- .../tensorflow/op/nn/MaxPool3dGradGrad.java | 2 +- .../org/tensorflow/op/nn/MaxPoolGrad.java | 2 +- .../org/tensorflow/op/nn/MaxPoolGradGrad.java | 2 +- .../op/nn/MaxPoolGradGradWithArgmax.java | 2 +- .../op/nn/MaxPoolGradWithArgmax.java | 2 +- .../tensorflow/op/nn/MaxPoolWithArgmax.java | 2 +- .../java/org/tensorflow/op/nn/NthElement.java | 2 +- .../tensorflow/op/nn/QuantizedAvgPool.java | 2 +- ...tizedBatchNormWithGlobalNormalization.java | 2 +- .../tensorflow/op/nn/QuantizedBiasAdd.java | 2 +- .../op/nn/QuantizedConv2DAndRelu.java | 2 +- .../QuantizedConv2DAndReluAndRequantize.java | 2 +- .../op/nn/QuantizedConv2DAndRequantize.java | 2 +- .../op/nn/QuantizedConv2DPerChannel.java | 2 +- .../op/nn/QuantizedConv2DWithBias.java | 2 +- .../op/nn/QuantizedConv2DWithBiasAndRelu.java | 2 +- ...zedConv2DWithBiasAndReluAndRequantize.java | 2 +- .../QuantizedConv2DWithBiasAndRequantize.java | 2 +- ...WithBiasSignedSumAndReluAndRequantize.java | 2 +- .../nn/QuantizedConv2DWithBiasSumAndRelu.java | 2 +- ...Conv2DWithBiasSumAndReluAndRequantize.java | 2 +- .../org/tensorflow/op/nn/QuantizedConv2d.java | 2 +- .../op/nn/QuantizedDepthwiseConv2D.java | 2 +- .../nn/QuantizedDepthwiseConv2DWithBias.java | 2 +- ...antizedDepthwiseConv2DWithBiasAndRelu.java | 2 +- ...iseConv2DWithBiasAndReluAndRequantize.java | 2 +- .../op/nn/QuantizedInstanceNorm.java | 2 +- .../tensorflow/op/nn/QuantizedMaxPool.java | 2 +- .../org/tensorflow/op/nn/QuantizedRelu.java | 2 +- .../org/tensorflow/op/nn/QuantizedRelu6.java | 2 +- .../org/tensorflow/op/nn/QuantizedReluX.java | 2 +- .../gen/java/org/tensorflow/op/nn/Relu.java | 2 +- .../gen/java/org/tensorflow/op/nn/Relu6.java | 2 +- .../java/org/tensorflow/op/nn/Relu6Grad.java | 2 +- .../java/org/tensorflow/op/nn/ReluGrad.java | 2 +- .../gen/java/org/tensorflow/op/nn/Selu.java | 2 +- .../java/org/tensorflow/op/nn/SeluGrad.java | 2 +- .../java/org/tensorflow/op/nn/Softmax.java | 2 +- .../java/org/tensorflow/op/nn/Softsign.java | 2 +- .../org/tensorflow/op/nn/SoftsignGrad.java | 2 +- .../org/tensorflow/op/nn/SpaceToBatch.java | 2 +- .../org/tensorflow/op/nn/SpaceToDepth.java | 2 +- .../gen/java/org/tensorflow/op/nn/TopK.java | 2 +- .../nn/raw/SoftmaxCrossEntropyWithLogits.java | 2 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 2 +- .../op/quantization/Dequantize.java | 2 +- .../quantization/FakeQuantWithMinMaxArgs.java | 2 +- .../FakeQuantWithMinMaxArgsGradient.java | 2 +- .../quantization/FakeQuantWithMinMaxVars.java | 2 +- .../FakeQuantWithMinMaxVarsGradient.java | 2 +- .../FakeQuantWithMinMaxVarsPerChannel.java | 2 +- ...QuantWithMinMaxVarsPerChannelGradient.java | 2 +- .../tensorflow/op/quantization/Quantize.java | 2 +- .../quantization/QuantizeAndDequantize.java | 2 +- .../quantization/QuantizeAndDequantizeV3.java | 2 +- .../quantization/QuantizeAndDequantizeV4.java | 2 +- .../QuantizeAndDequantizeV4Grad.java | 2 +- .../QuantizeDownAndShrinkRange.java | 2 +- .../op/quantization/QuantizedConcat.java | 2 +- .../QuantizedMatMulWithBiasAndDequantize.java | 2 +- .../QuantizedMatMulWithBiasAndRequantize.java | 2 +- .../op/quantization/RequantizationRange.java | 2 +- .../op/quantization/Requantize.java | 2 +- .../tensorflow/op/ragged/RaggedBincount.java | 2 +- .../op/ragged/RaggedCountSparseOutput.java | 2 +- .../org/tensorflow/op/ragged/RaggedCross.java | 2 +- .../tensorflow/op/ragged/RaggedGather.java | 2 +- .../org/tensorflow/op/ragged/RaggedRange.java | 2 +- .../op/ragged/RaggedTensorFromVariant.java | 2 +- .../op/ragged/RaggedTensorToSparse.java | 2 +- .../op/ragged/RaggedTensorToTensor.java | 2 +- .../op/ragged/RaggedTensorToVariant.java | 2 +- .../ragged/RaggedTensorToVariantGradient.java | 2 +- .../op/random/AllCandidateSampler.java | 2 +- .../random/AnonymousRandomSeedGenerator.java | 2 +- .../op/random/AnonymousSeedGenerator.java | 2 +- .../op/random/DeleteRandomSeedGenerator.java | 2 +- .../op/random/DeleteSeedGenerator.java | 2 +- .../op/random/LogUniformCandidateSampler.java | 2 +- .../org/tensorflow/op/random/Multinomial.java | 2 +- .../op/random/NonDeterministicInts.java | 2 +- .../random/ParameterizedTruncatedNormal.java | 2 +- .../org/tensorflow/op/random/RandomGamma.java | 2 +- .../tensorflow/op/random/RandomGammaGrad.java | 2 +- .../tensorflow/op/random/RandomPoisson.java | 2 +- .../tensorflow/op/random/RandomShuffle.java | 2 +- .../op/random/RandomStandardNormal.java | 2 +- .../tensorflow/op/random/RandomUniform.java | 2 +- .../op/random/RandomUniformInt.java | 2 +- .../org/tensorflow/op/random/RecordInput.java | 2 +- .../tensorflow/op/random/RngReadAndSkip.java | 2 +- .../org/tensorflow/op/random/RngSkip.java | 2 +- .../op/random/StatefulRandomBinomial.java | 2 +- .../op/random/StatefulStandardNormal.java | 2 +- .../op/random/StatefulTruncatedNormal.java | 2 +- .../tensorflow/op/random/StatefulUniform.java | 2 +- .../op/random/StatefulUniformFullInt.java | 2 +- .../op/random/StatefulUniformInt.java | 2 +- .../op/random/StatelessMultinomial.java | 2 +- ...StatelessParameterizedTruncatedNormal.java | 2 +- .../op/random/StatelessRandomBinomial.java | 2 +- .../op/random/StatelessRandomGamma.java | 2 +- .../StatelessRandomGetKeyCounterAlg.java | 2 +- .../op/random/StatelessRandomNormal.java | 2 +- .../op/random/StatelessRandomNormalV2.java | 2 +- .../op/random/StatelessRandomPoisson.java | 2 +- .../op/random/StatelessRandomUniform.java | 2 +- .../random/StatelessRandomUniformFullInt.java | 2 +- .../StatelessRandomUniformFullIntV2.java | 2 +- .../op/random/StatelessRandomUniformInt.java | 2 +- .../random/StatelessRandomUniformIntV2.java | 2 +- .../op/random/StatelessRandomUniformV2.java | 2 +- .../op/random/StatelessTruncatedNormal.java | 2 +- .../op/random/StatelessTruncatedNormalV2.java | 2 +- .../tensorflow/op/random/TruncatedNormal.java | 2 +- .../op/random/UniformCandidateSampler.java | 2 +- .../experimental/DummySeedGenerator.java | 2 +- .../op/rawops/CollectiveBcastRecvV2.java | 2 +- .../op/rawops/CollectiveBcastSendV2.java | 2 +- .../op/rawops/DataServiceDatasetV2.java | 2 +- .../tensorflow/op/rawops/FinalizeDataset.java | 2 +- .../org/tensorflow/op/rawops/GetOptions.java | 2 +- ...EmbeddingFrequencyEstimatorParameters.java | 2 +- ...encyEstimatorParametersGradAccumDebug.java | 2 +- .../tensorflow/op/rawops/OptionsDataset.java | 2 +- .../op/rawops/ParallelBatchDataset.java | 2 +- ...EmbeddingFrequencyEstimatorParameters.java | 2 +- ...encyEstimatorParametersGradAccumDebug.java | 2 +- .../op/rawops/StatelessRandomGetAlg.java | 2 +- .../rawops/StatelessRandomGetKeyCounter.java | 2 +- .../java/org/tensorflow/op/risc/RiscAbs.java | 2 +- .../java/org/tensorflow/op/risc/RiscAdd.java | 2 +- .../op/risc/RiscBinaryArithmetic.java | 2 +- .../op/risc/RiscBinaryComparison.java | 2 +- .../org/tensorflow/op/risc/RiscBitcast.java | 2 +- .../org/tensorflow/op/risc/RiscBroadcast.java | 2 +- .../java/org/tensorflow/op/risc/RiscCast.java | 2 +- .../java/org/tensorflow/op/risc/RiscCeil.java | 2 +- .../org/tensorflow/op/risc/RiscCholesky.java | 2 +- .../org/tensorflow/op/risc/RiscConcat.java | 2 +- .../java/org/tensorflow/op/risc/RiscConv.java | 2 +- .../java/org/tensorflow/op/risc/RiscCos.java | 2 +- .../java/org/tensorflow/op/risc/RiscDiv.java | 2 +- .../java/org/tensorflow/op/risc/RiscDot.java | 2 +- .../java/org/tensorflow/op/risc/RiscExp.java | 2 +- .../java/org/tensorflow/op/risc/RiscFft.java | 2 +- .../org/tensorflow/op/risc/RiscFloor.java | 2 +- .../org/tensorflow/op/risc/RiscGather.java | 2 +- .../java/org/tensorflow/op/risc/RiscImag.java | 2 +- .../org/tensorflow/op/risc/RiscIsFinite.java | 2 +- .../java/org/tensorflow/op/risc/RiscLog.java | 2 +- .../tensorflow/op/risc/RiscLogicalAnd.java | 2 +- .../tensorflow/op/risc/RiscLogicalNot.java | 2 +- .../org/tensorflow/op/risc/RiscLogicalOr.java | 2 +- .../java/org/tensorflow/op/risc/RiscMax.java | 2 +- .../java/org/tensorflow/op/risc/RiscMin.java | 2 +- .../java/org/tensorflow/op/risc/RiscMul.java | 2 +- .../java/org/tensorflow/op/risc/RiscNeg.java | 2 +- .../java/org/tensorflow/op/risc/RiscPad.java | 2 +- .../java/org/tensorflow/op/risc/RiscPool.java | 2 +- .../java/org/tensorflow/op/risc/RiscPow.java | 2 +- .../tensorflow/op/risc/RiscRandomUniform.java | 2 +- .../java/org/tensorflow/op/risc/RiscReal.java | 2 +- .../org/tensorflow/op/risc/RiscReduce.java | 2 +- .../java/org/tensorflow/op/risc/RiscRem.java | 2 +- .../org/tensorflow/op/risc/RiscReshape.java | 2 +- .../org/tensorflow/op/risc/RiscReverse.java | 2 +- .../org/tensorflow/op/risc/RiscScatter.java | 2 +- .../org/tensorflow/op/risc/RiscShape.java | 2 +- .../java/org/tensorflow/op/risc/RiscSign.java | 2 +- .../org/tensorflow/op/risc/RiscSlice.java | 2 +- .../java/org/tensorflow/op/risc/RiscSort.java | 2 +- .../org/tensorflow/op/risc/RiscSqueeze.java | 2 +- .../java/org/tensorflow/op/risc/RiscSub.java | 2 +- .../org/tensorflow/op/risc/RiscTranspose.java | 2 +- .../op/risc/RiscTriangularSolve.java | 2 +- .../org/tensorflow/op/risc/RiscUnary.java | 2 +- .../org/tensorflow/op/signal/BatchFft.java | 2 +- .../org/tensorflow/op/signal/BatchFft2d.java | 2 +- .../org/tensorflow/op/signal/BatchFft3d.java | 2 +- .../org/tensorflow/op/signal/BatchIfft.java | 2 +- .../org/tensorflow/op/signal/BatchIfft2d.java | 2 +- .../org/tensorflow/op/signal/BatchIfft3d.java | 2 +- .../java/org/tensorflow/op/signal/Fft.java | 2 +- .../java/org/tensorflow/op/signal/Fft2d.java | 2 +- .../java/org/tensorflow/op/signal/Fft3d.java | 2 +- .../java/org/tensorflow/op/signal/Ifft.java | 2 +- .../java/org/tensorflow/op/signal/Ifft2d.java | 2 +- .../java/org/tensorflow/op/signal/Ifft3d.java | 2 +- .../java/org/tensorflow/op/signal/Irfft.java | 2 +- .../org/tensorflow/op/signal/Irfft2d.java | 2 +- .../org/tensorflow/op/signal/Irfft3d.java | 2 +- .../java/org/tensorflow/op/signal/Rfft.java | 2 +- .../java/org/tensorflow/op/signal/Rfft2d.java | 2 +- .../java/org/tensorflow/op/signal/Rfft3d.java | 2 +- .../op/sparse/AddManySparseToTensorsMap.java | 2 +- .../op/sparse/AddSparseToTensorsMap.java | 2 +- .../op/sparse/DenseCountSparseOutput.java | 2 +- .../op/sparse/DenseToDenseSetOperation.java | 2 +- .../op/sparse/DenseToSparseSetOperation.java | 2 +- .../op/sparse/DeserializeSparse.java | 2 +- .../SparseAccumulatorApplyGradient.java | 2 +- .../sparse/SparseAccumulatorTakeGradient.java | 2 +- .../org/tensorflow/op/sparse/SparseAdd.java | 2 +- .../tensorflow/op/sparse/SparseAddGrad.java | 2 +- .../tensorflow/op/sparse/SparseBincount.java | 2 +- .../tensorflow/op/sparse/SparseConcat.java | 2 +- .../sparse/SparseConditionalAccumulator.java | 2 +- .../op/sparse/SparseCountSparseOutput.java | 2 +- .../org/tensorflow/op/sparse/SparseCross.java | 2 +- .../op/sparse/SparseCrossHashed.java | 2 +- .../op/sparse/SparseDenseCwiseAdd.java | 2 +- .../op/sparse/SparseDenseCwiseDiv.java | 2 +- .../op/sparse/SparseDenseCwiseMul.java | 2 +- .../op/sparse/SparseFillEmptyRows.java | 2 +- .../op/sparse/SparseFillEmptyRowsGrad.java | 2 +- .../tensorflow/op/sparse/SparseMatMul.java | 2 +- .../tensorflow/op/sparse/SparseReduceMax.java | 2 +- .../op/sparse/SparseReduceMaxSparse.java | 2 +- .../tensorflow/op/sparse/SparseReduceSum.java | 2 +- .../op/sparse/SparseReduceSumSparse.java | 2 +- .../tensorflow/op/sparse/SparseReorder.java | 2 +- .../tensorflow/op/sparse/SparseReshape.java | 2 +- .../op/sparse/SparseSegmentMean.java | 2 +- .../op/sparse/SparseSegmentMeanGrad.java | 2 +- .../SparseSegmentMeanWithNumSegments.java | 2 +- .../op/sparse/SparseSegmentSqrtN.java | 2 +- .../op/sparse/SparseSegmentSqrtNGrad.java | 2 +- .../SparseSegmentSqrtNWithNumSegments.java | 2 +- .../op/sparse/SparseSegmentSum.java | 2 +- .../SparseSegmentSumWithNumSegments.java | 2 +- .../org/tensorflow/op/sparse/SparseSlice.java | 2 +- .../tensorflow/op/sparse/SparseSliceGrad.java | 2 +- .../tensorflow/op/sparse/SparseSoftmax.java | 2 +- .../op/sparse/SparseSparseMaximum.java | 2 +- .../op/sparse/SparseSparseMinimum.java | 2 +- .../org/tensorflow/op/sparse/SparseSplit.java | 2 +- .../op/sparse/SparseTensorDenseAdd.java | 2 +- .../op/sparse/SparseTensorDenseMatMul.java | 2 +- .../tensorflow/op/sparse/SparseToDense.java | 2 +- .../op/sparse/SparseToSparseSetOperation.java | 2 +- .../sparse/TakeManySparseFromTensorsMap.java | 2 +- .../java/org/tensorflow/op/strings/Join.java | 2 +- .../java/org/tensorflow/op/strings/Lower.java | 2 +- .../org/tensorflow/op/strings/ReduceJoin.java | 2 +- .../tensorflow/op/strings/RegexFullMatch.java | 2 +- .../tensorflow/op/strings/RegexReplace.java | 2 +- .../op/strings/StaticRegexFullMatch.java | 2 +- .../op/strings/StaticRegexReplace.java | 2 +- .../tensorflow/op/strings/StringFormat.java | 2 +- .../tensorflow/op/strings/StringLength.java | 2 +- .../tensorflow/op/strings/StringNGrams.java | 2 +- .../tensorflow/op/strings/StringSplit.java | 2 +- .../java/org/tensorflow/op/strings/Strip.java | 2 +- .../org/tensorflow/op/strings/Substr.java | 2 +- .../tensorflow/op/strings/ToHashBucket.java | 2 +- .../op/strings/ToHashBucketFast.java | 2 +- .../op/strings/ToHashBucketStrong.java | 2 +- .../org/tensorflow/op/strings/ToNumber.java | 2 +- .../tensorflow/op/strings/UnicodeDecode.java | 2 +- .../op/strings/UnicodeDecodeWithOffsets.java | 2 +- .../tensorflow/op/strings/UnicodeEncode.java | 2 +- .../tensorflow/op/strings/UnicodeScript.java | 2 +- .../op/strings/UnicodeTranscode.java | 2 +- .../op/strings/UnsortedSegmentJoin.java | 2 +- .../java/org/tensorflow/op/strings/Upper.java | 2 +- .../tensorflow/op/summary/AudioSummary.java | 2 +- .../op/summary/CloseSummaryWriter.java | 2 +- .../op/summary/CreateSummaryDbWriter.java | 2 +- .../op/summary/CreateSummaryFileWriter.java | 2 +- .../op/summary/FlushSummaryWriter.java | 2 +- .../op/summary/HistogramSummary.java | 2 +- .../tensorflow/op/summary/ImageSummary.java | 2 +- .../tensorflow/op/summary/ImportEvent.java | 2 +- .../tensorflow/op/summary/MergeSummary.java | 2 +- .../tensorflow/op/summary/ScalarSummary.java | 2 +- .../op/summary/StatsAggregatorSummary.java | 2 +- .../tensorflow/op/summary/SummaryWriter.java | 2 +- .../tensorflow/op/summary/TensorSummary.java | 2 +- .../op/summary/WriteAudioSummary.java | 2 +- .../op/summary/WriteGraphSummary.java | 2 +- .../op/summary/WriteHistogramSummary.java | 2 +- .../op/summary/WriteImageSummary.java | 2 +- .../op/summary/WriteRawProtoSummary.java | 2 +- .../op/summary/WriteScalarSummary.java | 2 +- .../tensorflow/op/summary/WriteSummary.java | 2 +- .../java/org/tensorflow/op/tpu/AllToAll.java | 2 +- .../tensorflow/op/tpu/CollectivePermute.java | 2 +- .../tensorflow/op/tpu/CompilationResult.java | 2 +- .../op/tpu/CompileSucceededAssert.java | 2 +- .../op/tpu/ConfigureDistributedTPU.java | 2 +- .../op/tpu/ConfigureTPUEmbedding.java | 2 +- .../tensorflow/op/tpu/CrossReplicaSum.java | 2 +- .../op/tpu/EmbeddingActivations.java | 2 +- .../tpu/EnqueueTPUEmbeddingIntegerBatch.java | 2 +- .../EnqueueTPUEmbeddingRaggedTensorBatch.java | 2 +- .../tpu/EnqueueTPUEmbeddingSparseBatch.java | 2 +- .../EnqueueTPUEmbeddingSparseTensorBatch.java | 2 +- .../java/org/tensorflow/op/tpu/Execute.java | 2 +- .../op/tpu/ExecuteAndUpdateVariables.java | 2 +- .../org/tensorflow/op/tpu/InfeedDequeue.java | 2 +- .../tensorflow/op/tpu/InfeedDequeueTuple.java | 2 +- .../org/tensorflow/op/tpu/InfeedEnqueue.java | 2 +- .../tpu/InfeedEnqueuePrelinearizedBuffer.java | 2 +- .../tensorflow/op/tpu/InfeedEnqueueTuple.java | 2 +- .../tpu/LoadTPUEmbeddingADAMParameters.java | 2 +- ...EmbeddingADAMParametersGradAccumDebug.java | 2 +- .../LoadTPUEmbeddingAdadeltaParameters.java | 2 +- ...ddingAdadeltaParametersGradAccumDebug.java | 2 +- .../LoadTPUEmbeddingAdagradParameters.java | 2 +- ...eddingAdagradParametersGradAccumDebug.java | 2 +- ...TPUEmbeddingCenteredRMSPropParameters.java | 2 +- .../tpu/LoadTPUEmbeddingFTRLParameters.java | 2 +- ...EmbeddingFTRLParametersGradAccumDebug.java | 2 +- ...TPUEmbeddingMDLAdagradLightParameters.java | 2 +- .../LoadTPUEmbeddingMomentumParameters.java | 2 +- ...ddingMomentumParametersGradAccumDebug.java | 2 +- ...TPUEmbeddingProximalAdagradParameters.java | 2 +- ...oximalAdagradParametersGradAccumDebug.java | 2 +- ...oadTPUEmbeddingProximalYogiParameters.java | 2 +- ...gProximalYogiParametersGradAccumDebug.java | 2 +- .../LoadTPUEmbeddingRMSPropParameters.java | 2 +- ...eddingRMSPropParametersGradAccumDebug.java | 2 +- ...ngStochasticGradientDescentParameters.java | 2 +- ...adientDescentParametersGradAccumDebug.java | 2 +- .../tensorflow/op/tpu/OrdinalSelector.java | 2 +- .../org/tensorflow/op/tpu/OutfeedDequeue.java | 2 +- .../op/tpu/OutfeedDequeueTuple.java | 2 +- .../op/tpu/OutfeedDequeueTupleV2.java | 2 +- .../tensorflow/op/tpu/OutfeedDequeueV2.java | 2 +- .../org/tensorflow/op/tpu/OutfeedEnqueue.java | 2 +- .../op/tpu/OutfeedEnqueueTuple.java | 2 +- .../tensorflow/op/tpu/PartitionedInput.java | 2 +- .../tensorflow/op/tpu/PartitionedOutput.java | 2 +- .../org/tensorflow/op/tpu/Prelinearize.java | 2 +- .../tensorflow/op/tpu/PrelinearizeTuple.java | 2 +- .../op/tpu/RecvTPUEmbeddingActivations.java | 2 +- .../tensorflow/op/tpu/ReplicateMetadata.java | 2 +- .../tensorflow/op/tpu/ReplicatedInput.java | 2 +- .../tensorflow/op/tpu/ReplicatedOutput.java | 2 +- .../RetrieveTPUEmbeddingADAMParameters.java | 2 +- ...EmbeddingADAMParametersGradAccumDebug.java | 2 +- ...etrieveTPUEmbeddingAdadeltaParameters.java | 2 +- ...ddingAdadeltaParametersGradAccumDebug.java | 2 +- ...RetrieveTPUEmbeddingAdagradParameters.java | 2 +- ...eddingAdagradParametersGradAccumDebug.java | 2 +- ...TPUEmbeddingCenteredRMSPropParameters.java | 2 +- .../RetrieveTPUEmbeddingFTRLParameters.java | 2 +- ...EmbeddingFTRLParametersGradAccumDebug.java | 2 +- ...TPUEmbeddingMDLAdagradLightParameters.java | 2 +- ...etrieveTPUEmbeddingMomentumParameters.java | 2 +- ...ddingMomentumParametersGradAccumDebug.java | 2 +- ...TPUEmbeddingProximalAdagradParameters.java | 2 +- ...oximalAdagradParametersGradAccumDebug.java | 2 +- ...eveTPUEmbeddingProximalYogiParameters.java | 2 +- ...gProximalYogiParametersGradAccumDebug.java | 2 +- ...RetrieveTPUEmbeddingRMSPropParameters.java | 2 +- ...eddingRMSPropParametersGradAccumDebug.java | 2 +- ...ngStochasticGradientDescentParameters.java | 2 +- ...adientDescentParametersGradAccumDebug.java | 2 +- .../op/tpu/SendTPUEmbeddingGradients.java | 2 +- .../op/tpu/ShutdownDistributedTPU.java | 2 +- .../op/tpu/TPUCompilationResult.java | 2 +- .../op/tpu/TPUEmbeddingActivations.java | 2 +- .../op/tpu/TPUReplicateMetadata.java | 2 +- .../tensorflow/op/tpu/TPUReplicatedInput.java | 2 +- .../op/tpu/TPUReplicatedOutput.java | 2 +- .../op/tpu/TPUReshardVariables.java | 2 +- .../tensorflow/op/tpu/WorkerHeartbeat.java | 2 +- .../op/train/AccumulatorApplyGradient.java | 2 +- .../op/train/AccumulatorNumAccumulated.java | 2 +- .../op/train/AccumulatorSetGlobalStep.java | 2 +- .../op/train/AccumulatorTakeGradient.java | 2 +- .../org/tensorflow/op/train/ApplyAdaMax.java | 2 +- .../tensorflow/op/train/ApplyAdadelta.java | 2 +- .../org/tensorflow/op/train/ApplyAdagrad.java | 2 +- .../tensorflow/op/train/ApplyAdagradDa.java | 2 +- .../tensorflow/op/train/ApplyAdagradV2.java | 2 +- .../org/tensorflow/op/train/ApplyAdam.java | 2 +- .../org/tensorflow/op/train/ApplyAddSign.java | 2 +- .../op/train/ApplyCenteredRmsProp.java | 2 +- .../org/tensorflow/op/train/ApplyFtrl.java | 2 +- .../op/train/ApplyGradientDescent.java | 2 +- .../tensorflow/op/train/ApplyMomentum.java | 2 +- .../tensorflow/op/train/ApplyPowerSign.java | 2 +- .../op/train/ApplyProximalAdagrad.java | 2 +- .../train/ApplyProximalGradientDescent.java | 2 +- .../org/tensorflow/op/train/ApplyRmsProp.java | 2 +- .../org/tensorflow/op/train/BatchMatMul.java | 2 +- .../tensorflow/op/train/ComputeBatchSize.java | 2 +- .../op/train/ConditionalAccumulator.java | 2 +- .../op/train/GenerateVocabRemapping.java | 2 +- .../op/train/MergeV2Checkpoints.java | 2 +- .../org/tensorflow/op/train/NegTrain.java | 2 +- .../tensorflow/op/train/PreventGradient.java | 2 +- .../ResourceAccumulatorApplyGradient.java | 2 +- .../ResourceAccumulatorNumAccumulated.java | 2 +- .../ResourceAccumulatorSetGlobalStep.java | 2 +- .../ResourceAccumulatorTakeGradient.java | 2 +- .../op/train/ResourceApplyAdaMax.java | 2 +- .../op/train/ResourceApplyAdadelta.java | 2 +- .../op/train/ResourceApplyAdagrad.java | 2 +- .../op/train/ResourceApplyAdagradDa.java | 2 +- .../op/train/ResourceApplyAdam.java | 2 +- .../train/ResourceApplyAdamWithAmsgrad.java | 2 +- .../op/train/ResourceApplyAddSign.java | 2 +- .../train/ResourceApplyCenteredRmsProp.java | 2 +- .../op/train/ResourceApplyFtrl.java | 2 +- .../train/ResourceApplyGradientDescent.java | 2 +- .../op/train/ResourceApplyKerasMomentum.java | 2 +- .../op/train/ResourceApplyMomentum.java | 2 +- .../op/train/ResourceApplyPowerSign.java | 2 +- .../train/ResourceApplyProximalAdagrad.java | 2 +- .../ResourceApplyProximalGradientDescent.java | 2 +- .../op/train/ResourceApplyRmsProp.java | 2 +- .../train/ResourceConditionalAccumulator.java | 2 +- .../op/train/ResourceSparseApplyAdadelta.java | 2 +- .../op/train/ResourceSparseApplyAdagrad.java | 2 +- .../train/ResourceSparseApplyAdagradDa.java | 2 +- .../train/ResourceSparseApplyAdagradV2.java | 2 +- .../ResourceSparseApplyCenteredRmsProp.java | 2 +- .../op/train/ResourceSparseApplyFtrl.java | 2 +- .../ResourceSparseApplyKerasMomentum.java | 2 +- .../op/train/ResourceSparseApplyMomentum.java | 2 +- .../ResourceSparseApplyProximalAdagrad.java | 2 +- ...rceSparseApplyProximalGradientDescent.java | 2 +- .../op/train/ResourceSparseApplyRmsProp.java | 2 +- .../java/org/tensorflow/op/train/Restore.java | 2 +- .../org/tensorflow/op/train/RestoreSlice.java | 2 +- .../java/org/tensorflow/op/train/Save.java | 2 +- .../org/tensorflow/op/train/SaveSlices.java | 2 +- .../org/tensorflow/op/train/SdcaFprint.java | 2 +- .../tensorflow/op/train/SdcaOptimizer.java | 2 +- .../org/tensorflow/op/train/SdcaShrinkL1.java | 2 +- .../op/train/SparseApplyAdadelta.java | 2 +- .../op/train/SparseApplyAdagrad.java | 2 +- .../op/train/SparseApplyAdagradDa.java | 2 +- .../op/train/SparseApplyCenteredRmsProp.java | 2 +- .../tensorflow/op/train/SparseApplyFtrl.java | 2 +- .../op/train/SparseApplyMomentum.java | 2 +- .../op/train/SparseApplyProximalAdagrad.java | 2 +- .../SparseApplyProximalGradientDescent.java | 2 +- .../op/train/SparseApplyRmsProp.java | 2 +- .../org/tensorflow/op/train/TileGrad.java | 2 +- .../tensorflow/op/xla/BroadcastHelper.java | 2 +- .../org/tensorflow/op/xla/ClusterOutput.java | 2 +- .../gen/java/org/tensorflow/op/xla/Conv.java | 2 +- .../org/tensorflow/op/xla/Dequantize.java | 2 +- .../gen/java/org/tensorflow/op/xla/Dot.java | 2 +- .../org/tensorflow/op/xla/DynamicSlice.java | 2 +- .../tensorflow/op/xla/DynamicUpdateSlice.java | 2 +- .../java/org/tensorflow/op/xla/Einsum.java | 2 +- .../java/org/tensorflow/op/xla/Gather.java | 2 +- .../org/tensorflow/op/xla/KeyValueSort.java | 2 +- .../gen/java/org/tensorflow/op/xla/Pad.java | 2 +- .../gen/java/org/tensorflow/op/xla/Recv.java | 2 +- .../java/org/tensorflow/op/xla/ReplicaId.java | 2 +- .../org/tensorflow/op/xla/SelfAdjointEig.java | 2 +- .../gen/java/org/tensorflow/op/xla/Send.java | 2 +- .../java/org/tensorflow/op/xla/Sharding.java | 2 +- .../gen/java/org/tensorflow/op/xla/Sort.java | 2 +- .../gen/java/org/tensorflow/op/xla/Svd.java | 2 +- .../tensorflow/op/xla/XlaRecvFromHost.java | 2 +- .../org/tensorflow/op/xla/XlaSendToHost.java | 2 +- .../org/tensorflow/op/xla/XlaSetBound.java | 2 +- .../op/generator/ClassGenerator.java | 364 +++++++++--------- 1259 files changed, 1447 insertions(+), 1433 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/AudioSpectrogram.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/AudioSpectrogram.java index def97e77e47..953453ab63b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/AudioSpectrogram.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/AudioSpectrogram.java @@ -84,7 +84,7 @@ private AudioSpectrogram(Operation operation) { ) public static AudioSpectrogram create(Scope scope, Operand input, Long windowSize, Long stride, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AudioSpectrogram", scope.makeOpName("AudioSpectrogram")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AudioSpectrogram")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("window_size", windowSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/DecodeWav.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/DecodeWav.java index 2d69fa63df2..7c52e765255 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/DecodeWav.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/DecodeWav.java @@ -75,7 +75,7 @@ private DecodeWav(Operation operation) { describeByClass = true ) public static DecodeWav create(Scope scope, Operand contents, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeWav", scope.makeOpName("DecodeWav")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeWav")); opBuilder.addInput(contents.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/EncodeWav.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/EncodeWav.java index 4eca7ec84fd..33007a9dd56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/EncodeWav.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/EncodeWav.java @@ -67,7 +67,7 @@ private EncodeWav(Operation operation) { describeByClass = true ) public static EncodeWav create(Scope scope, Operand audio, Operand sampleRate) { - OperationBuilder opBuilder = scope.env().opBuilder("EncodeWav", scope.makeOpName("EncodeWav")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EncodeWav")); opBuilder.addInput(audio.asOutput()); opBuilder.addInput(sampleRate.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/Mfcc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/Mfcc.java index 22ae99ad3c8..7ed896394aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/Mfcc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/Mfcc.java @@ -69,7 +69,7 @@ private Mfcc(Operation operation) { ) public static Mfcc create(Scope scope, Operand spectrogram, Operand sampleRate, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Mfcc", scope.makeOpName("Mfcc")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Mfcc")); opBuilder.addInput(spectrogram.asOutput()); opBuilder.addInput(sampleRate.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java index 4cfde6b7c7a..130d6f5179b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java @@ -79,7 +79,7 @@ private BitwiseAnd(Operation operation) { describeByClass = true ) public static BitwiseAnd create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("BitwiseAnd", scope.makeOpName("BitwiseAnd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BitwiseAnd")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java index d8d0eb67edf..ce511138fd1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java @@ -79,7 +79,7 @@ private BitwiseOr(Operation operation) { describeByClass = true ) public static BitwiseOr create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("BitwiseOr", scope.makeOpName("BitwiseOr")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BitwiseOr")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java index 6749b9427dd..cd843a187b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java @@ -79,7 +79,7 @@ private BitwiseXor(Operation operation) { describeByClass = true ) public static BitwiseXor create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("BitwiseXor", scope.makeOpName("BitwiseXor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BitwiseXor")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java index f3e64ead598..5b6c115c237 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java @@ -99,7 +99,7 @@ private Invert(Operation operation) { describeByClass = true ) public static Invert create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Invert", scope.makeOpName("Invert")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Invert")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Invert<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java index ad6bfc18f5c..6bc1d9e6db5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java @@ -90,7 +90,7 @@ private LeftShift(Operation operation) { describeByClass = true ) public static LeftShift create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("LeftShift", scope.makeOpName("LeftShift")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LeftShift")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java index 5660c65ea14..54e2d7ad3c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java @@ -92,7 +92,7 @@ private RightShift(Operation operation) { describeByClass = true ) public static RightShift create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RightShift", scope.makeOpName("RightShift")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RightShift")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java index b8cf65899ca..420db07958b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java @@ -62,7 +62,7 @@ private KMC2ChainInitialization(Operation operation) { ) public static KMC2ChainInitialization create(Scope scope, Operand distances, Operand seed) { - OperationBuilder opBuilder = scope.env().opBuilder("KMC2ChainInitialization", scope.makeOpName("KMC2ChainInitialization")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("KMC2ChainInitialization")); opBuilder.addInput(distances.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java index 79ebec34735..4bb144c3006 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java @@ -66,7 +66,7 @@ private KmeansPlusPlusInitialization(Operation operation) { ) public static KmeansPlusPlusInitialization create(Scope scope, Operand points, Operand numToSample, Operand seed, Operand numRetriesPerSample) { - OperationBuilder opBuilder = scope.env().opBuilder("KmeansPlusPlusInitialization", scope.makeOpName("KmeansPlusPlusInitialization")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("KmeansPlusPlusInitialization")); opBuilder.addInput(points.asOutput()); opBuilder.addInput(numToSample.asOutput()); opBuilder.addInput(seed.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java index f6eef64971b..e3c2b791e16 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java @@ -71,7 +71,7 @@ private AllReduce(Operation operation) { public static AllReduce create(Scope scope, Operand input, Long groupSize, Long groupKey, Long instanceKey, String mergeOp, String finalOp, List subdivOffsets, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveReduce", scope.makeOpName("AllReduce")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AllReduce")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("group_size", groupSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java index 94999f5884f..922b66c56d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java @@ -65,7 +65,7 @@ private BroadcastRecv(Operation operation) { ) public static BroadcastRecv create(Scope scope, Class T, Long groupSize, Long groupKey, Long instanceKey, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveBcastRecv", scope.makeOpName("BroadcastRecv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BroadcastRecv")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("T", Operands.toDataType(T)); opBuilder.setAttr("group_size", groupSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java index f2290763758..784246929ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java @@ -64,7 +64,7 @@ private BroadcastSend(Operation operation) { ) public static BroadcastSend create(Scope scope, Operand input, Long groupSize, Long groupKey, Long instanceKey, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveBcastSend", scope.makeOpName("BroadcastSend")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BroadcastSend")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("group_size", groupSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java index 8d8f7cc3cbb..3acd91096b2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java @@ -64,7 +64,7 @@ private Gather(Operation operation) { ) public static Gather create(Scope scope, Operand input, Long groupSize, Long groupKey, Long instanceKey, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveGather", scope.makeOpName("Gather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Gather")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("group_size", groupSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java index 346216e935b..abc7d8728d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java @@ -67,7 +67,7 @@ private GatherV2(Operation operation) { public static GatherV2 create(Scope scope, Operand input, Operand groupSize, Operand groupKey, Operand instanceKey, Iterable> orderingToken, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveGatherV2", scope.makeOpName("GatherV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GatherV2")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java index ea031c4e91d..62d913030c1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java @@ -68,7 +68,7 @@ private Reduce(Operation operation) { public static Reduce create(Scope scope, Operand input, Long groupSize, Long groupKey, Long instanceKey, String mergeOp, String finalOp, List subdivOffsets, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveReduce", scope.makeOpName("Reduce")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Reduce")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("group_size", groupSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java index f6b7321ac66..42912daf5f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java @@ -70,7 +70,7 @@ public static ReduceV2 create(Scope scope, Operand inp Operand groupSize, Operand groupKey, Operand instanceKey, Iterable> orderingToken, String mergeOp, String finalOp, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveReduceV2", scope.makeOpName("ReduceV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReduceV2")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Abort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Abort.java index acc328c114b..4fc67af11fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Abort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Abort.java @@ -52,7 +52,7 @@ private Abort(Operation operation) { describeByClass = true ) public static Abort create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Abort", scope.makeOpName("Abort")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Abort")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/All.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/All.java index 73f3fa1af9f..b0f51547809 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/All.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/All.java @@ -65,7 +65,7 @@ private All(Operation operation) { ) public static All create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("All", scope.makeOpName("All")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("All")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Any.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Any.java index 63f4c45dcd9..5f8eb0cfdbd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Any.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Any.java @@ -65,7 +65,7 @@ private Any(Operation operation) { ) public static Any create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Any", scope.makeOpName("Any")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Any")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssertThat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssertThat.java index f0aa019e759..e3cb2b4dcfe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssertThat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssertThat.java @@ -57,7 +57,7 @@ private AssertThat(Operation operation) { ) public static AssertThat create(Scope scope, Operand condition, Iterable> data, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Assert", scope.makeOpName("AssertThat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssertThat")); opBuilder.addInput(condition.asOutput()); opBuilder.addInputList(Operands.asOutputs(data)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java index c20b3d4f0c9..aed03cfb7a1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java @@ -64,7 +64,7 @@ private Assign(Operation operation) { ) public static Assign create(Scope scope, Operand ref, Operand value, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Assign", scope.makeOpName("Assign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Assign")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java index 1b5e0e89d40..427cea1db9a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java @@ -64,7 +64,7 @@ private AssignAdd(Operation operation) { ) public static AssignAdd create(Scope scope, Operand ref, Operand value, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AssignAdd", scope.makeOpName("AssignAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssignAdd")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAddVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAddVariableOp.java index eabf77fddae..aabcd9e756b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAddVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAddVariableOp.java @@ -55,7 +55,7 @@ private AssignAddVariableOp(Operation operation) { ) public static AssignAddVariableOp create(Scope scope, Operand resource, Operand value) { - OperationBuilder opBuilder = scope.env().opBuilder("AssignAddVariableOp", scope.makeOpName("AssignAddVariableOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssignAddVariableOp")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java index 97483964362..68569e36fc5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java @@ -64,7 +64,7 @@ private AssignSub(Operation operation) { ) public static AssignSub create(Scope scope, Operand ref, Operand value, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AssignSub", scope.makeOpName("AssignSub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssignSub")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSubVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSubVariableOp.java index 37fb16f97ce..6501d4c070f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSubVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSubVariableOp.java @@ -55,7 +55,7 @@ private AssignSubVariableOp(Operation operation) { ) public static AssignSubVariableOp create(Scope scope, Operand resource, Operand value) { - OperationBuilder opBuilder = scope.env().opBuilder("AssignSubVariableOp", scope.makeOpName("AssignSubVariableOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssignSubVariableOp")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java index 47423220ecb..f186649e329 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java @@ -55,7 +55,7 @@ private AssignVariableOp(Operation operation) { ) public static AssignVariableOp create(Scope scope, Operand resource, Operand value) { - OperationBuilder opBuilder = scope.env().opBuilder("AssignVariableOp", scope.makeOpName("AssignVariableOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssignVariableOp")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Barrier.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Barrier.java index b18d8ad1b1c..4cbd38c0b15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Barrier.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Barrier.java @@ -70,7 +70,7 @@ private Barrier(Operation operation) { ) public static Barrier create(Scope scope, List> componentTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Barrier", scope.makeOpName("Barrier")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Barrier")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("component_types", Operands.toDataTypes(componentTypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierClose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierClose.java index 09ad79d51bd..ec1e6abb01d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierClose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierClose.java @@ -58,7 +58,7 @@ private BarrierClose(Operation operation) { describeByClass = true ) public static BarrierClose create(Scope scope, Operand handle, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BarrierClose", scope.makeOpName("BarrierClose")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BarrierClose")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierIncompleteSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierIncompleteSize.java index 7c30abc15f1..e08c9d8479d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierIncompleteSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierIncompleteSize.java @@ -57,7 +57,7 @@ private BarrierIncompleteSize(Operation operation) { describeByClass = true ) public static BarrierIncompleteSize create(Scope scope, Operand handle) { - OperationBuilder opBuilder = scope.env().opBuilder("BarrierIncompleteSize", scope.makeOpName("BarrierIncompleteSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BarrierIncompleteSize")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); return new BarrierIncompleteSize(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierInsertMany.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierInsertMany.java index f321bf67018..5569ea03f02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierInsertMany.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierInsertMany.java @@ -61,7 +61,7 @@ private BarrierInsertMany(Operation operation) { ) public static BarrierInsertMany create(Scope scope, Operand handle, Operand keys, Operand values, Long componentIndex) { - OperationBuilder opBuilder = scope.env().opBuilder("BarrierInsertMany", scope.makeOpName("BarrierInsertMany")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BarrierInsertMany")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(keys.asOutput()); opBuilder.addInput(values.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierReadySize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierReadySize.java index 6f296a79f28..8be08271082 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierReadySize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierReadySize.java @@ -57,7 +57,7 @@ private BarrierReadySize(Operation operation) { describeByClass = true ) public static BarrierReadySize create(Scope scope, Operand handle) { - OperationBuilder opBuilder = scope.env().opBuilder("BarrierReadySize", scope.makeOpName("BarrierReadySize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BarrierReadySize")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); return new BarrierReadySize(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierTakeMany.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierTakeMany.java index 69499753169..2b480131f06 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierTakeMany.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierTakeMany.java @@ -83,7 +83,7 @@ private BarrierTakeMany(Operation operation) { public static BarrierTakeMany create(Scope scope, Operand handle, Operand numElements, List> componentTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BarrierTakeMany", scope.makeOpName("BarrierTakeMany")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BarrierTakeMany")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(numElements.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Batch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Batch.java index 066c44a18b4..7b155fcbcb2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Batch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Batch.java @@ -105,7 +105,7 @@ private Batch(Operation operation) { ) public static Batch create(Scope scope, Iterable> inTensors, Long numBatchThreads, Long maxBatchSize, Long batchTimeoutMicros, Long gradTimeoutMicros, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Batch", scope.makeOpName("Batch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Batch")); opBuilder.addInputList(Operands.asOutputs(inTensors)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_batch_threads", numBatchThreads); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java index 1e706ba8f9e..e908a1bb72a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java @@ -76,7 +76,7 @@ private BatchToSpace(Operation operation) { ) public static BatchToSpace create(Scope scope, Operand input, Operand crops, Long blockSize) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchToSpace", scope.makeOpName("BatchToSpace")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchToSpace")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(crops.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java index f8ffc060c71..5f5067ebebe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java @@ -160,7 +160,7 @@ private BatchToSpaceNd(Operation operation) { ) public static BatchToSpaceNd create(Scope scope, Operand input, Operand blockShape, Operand crops) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchToSpaceND", scope.makeOpName("BatchToSpaceNd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchToSpaceNd")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(blockShape.asOutput()); opBuilder.addInput(crops.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java index 88536734ce8..cb2cffd1dd5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java @@ -120,7 +120,7 @@ private Bitcast(Operation operation) { ) public static Bitcast create(Scope scope, Operand input, Class type) { - OperationBuilder opBuilder = scope.env().opBuilder("Bitcast", scope.makeOpName("Bitcast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Bitcast")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("type", Operands.toDataType(type)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java index 8aa3a3a7151..d081f39c50f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java @@ -63,7 +63,7 @@ private BroadcastDynamicShape(Operation operation) { ) public static BroadcastDynamicShape create(Scope scope, Operand s0, Operand s1) { - OperationBuilder opBuilder = scope.env().opBuilder("BroadcastArgs", scope.makeOpName("BroadcastDynamicShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BroadcastDynamicShape")); opBuilder.addInput(s0.asOutput()); opBuilder.addInput(s1.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java index 0da00a87d58..6264a26ccbf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java @@ -63,7 +63,7 @@ private BroadcastGradientArgs(Operation operation) { ) public static BroadcastGradientArgs create(Scope scope, Operand s0, Operand s1) { - OperationBuilder opBuilder = scope.env().opBuilder("BroadcastGradientArgs", scope.makeOpName("BroadcastGradientArgs")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BroadcastGradientArgs")); opBuilder.addInput(s0.asOutput()); opBuilder.addInput(s1.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java index 670ac6cc074..2e8dd8c5668 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java @@ -90,7 +90,7 @@ private BroadcastTo(Operation operation) { ) public static BroadcastTo create(Scope scope, Operand input, Operand shape) { - OperationBuilder opBuilder = scope.env().opBuilder("BroadcastTo", scope.makeOpName("BroadcastTo")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BroadcastTo")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bucketize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bucketize.java index 603a8624fd0..cfe1db8d428 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bucketize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bucketize.java @@ -69,7 +69,7 @@ private Bucketize(Operation operation) { ) public static Bucketize create(Scope scope, Operand input, List boundaries) { - OperationBuilder opBuilder = scope.env().opBuilder("Bucketize", scope.makeOpName("Bucketize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Bucketize")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); float[] boundariesArray = new float[boundaries.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java index 5d5a0309f73..8e26025c2d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java @@ -68,7 +68,7 @@ private ClipByValue(Operation operation) { ) public static ClipByValue create(Scope scope, Operand t, Operand clipValueMin, Operand clipValueMax) { - OperationBuilder opBuilder = scope.env().opBuilder("ClipByValue", scope.makeOpName("ClipByValue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ClipByValue")); opBuilder.addInput(t.asOutput()); opBuilder.addInput(clipValueMin.asOutput()); opBuilder.addInput(clipValueMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java index f6351158b4c..84df176b3aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java @@ -67,7 +67,7 @@ private CollectiveGather(Operation operation) { ) public static CollectiveGather create(Scope scope, Operand input, Long groupSize, Long groupKey, Long instanceKey, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveGather", scope.makeOpName("CollectiveGather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CollectiveGather")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("group_size", groupSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java index 5e7a33542b4..3244d20c649 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java @@ -65,7 +65,7 @@ private Concat(Operation operation) { ) public static Concat create(Scope scope, Iterable> values, Operand axis) { - OperationBuilder opBuilder = scope.env().opBuilder("ConcatV2", scope.makeOpName("Concat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Concat")); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConsumeMutexLock.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConsumeMutexLock.java index f32dd58b6bc..a48e91b6ebd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConsumeMutexLock.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConsumeMutexLock.java @@ -57,7 +57,7 @@ private ConsumeMutexLock(Operation operation) { describeByClass = true ) public static ConsumeMutexLock create(Scope scope, Operand mutexLock) { - OperationBuilder opBuilder = scope.env().opBuilder("ConsumeMutexLock", scope.makeOpName("ConsumeMutexLock")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ConsumeMutexLock")); opBuilder.addInput(mutexLock.asOutput()); opBuilder = scope.apply(opBuilder); return new ConsumeMutexLock(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ControlTrigger.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ControlTrigger.java index a5c5037acf0..a83912d608d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ControlTrigger.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ControlTrigger.java @@ -49,7 +49,7 @@ private ControlTrigger(Operation operation) { describeByClass = true ) public static ControlTrigger create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("ControlTrigger", scope.makeOpName("ControlTrigger")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ControlTrigger")); opBuilder = scope.apply(opBuilder); return new ControlTrigger(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java index d038d538036..f31cc35dc83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java @@ -68,7 +68,7 @@ private Copy(Operation operation) { ) public static Copy create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Copy", scope.makeOpName("Copy")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Copy")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java index 03bb0ca7ee2..6e7495f17f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java @@ -66,7 +66,7 @@ private CopyHost(Operation operation) { ) public static CopyHost create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CopyHost", scope.makeOpName("CopyHost")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CopyHost")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java index ffa579e9b04..17db07035b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java @@ -61,7 +61,7 @@ private CountUpTo(Operation operation) { describeByClass = true ) public static CountUpTo create(Scope scope, Operand ref, Long limit) { - OperationBuilder opBuilder = scope.env().opBuilder("CountUpTo", scope.makeOpName("CountUpTo")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CountUpTo")); opBuilder.addInput(ref.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("limit", limit); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java index dbf9a464425..d0c32ff2601 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java @@ -124,7 +124,7 @@ private DecodeProto(Operation operation) { ) public static DecodeProto create(Scope scope, Operand bytes, String messageType, List fieldNames, List> outputTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeProtoV2", scope.makeOpName("DecodeProto")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeProto")); opBuilder.addInput(bytes.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("message_type", messageType); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java index a5da8c99bbd..b5662c7ae3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java @@ -59,7 +59,7 @@ private DeepCopy(Operation operation) { describeByClass = true ) public static DeepCopy create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("DeepCopy", scope.makeOpName("DeepCopy")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeepCopy")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new DeepCopy<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeleteSessionTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeleteSessionTensor.java index 6526e917d48..bd971e462e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeleteSessionTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeleteSessionTensor.java @@ -51,7 +51,7 @@ private DeleteSessionTensor(Operation operation) { describeByClass = true ) public static DeleteSessionTensor create(Scope scope, Operand handle) { - OperationBuilder opBuilder = scope.env().opBuilder("DeleteSessionTensor", scope.makeOpName("DeleteSessionTensor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeleteSessionTensor")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); return new DeleteSessionTensor(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyResourceOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyResourceOp.java index 68c9ec3f0fb..d421e61e929 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyResourceOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyResourceOp.java @@ -55,7 +55,7 @@ private DestroyResourceOp(Operation operation) { ) public static DestroyResourceOp create(Scope scope, Operand resource, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DestroyResourceOp", scope.makeOpName("DestroyResourceOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DestroyResourceOp")); opBuilder.addInput(resource.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java index 556966198fb..cd6a18100fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java @@ -68,7 +68,7 @@ private DestroyTemporaryVariable(Operation operation) { ) public static DestroyTemporaryVariable create(Scope scope, Operand ref, String varName) { - OperationBuilder opBuilder = scope.env().opBuilder("DestroyTemporaryVariable", scope.makeOpName("DestroyTemporaryVariable")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DestroyTemporaryVariable")); opBuilder.addInput(ref.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("var_name", varName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java index f3eb69df194..4e947cb254a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java @@ -59,7 +59,7 @@ private DeviceIndex(Operation operation) { describeByClass = true ) public static DeviceIndex create(Scope scope, List deviceNames) { - OperationBuilder opBuilder = scope.env().opBuilder("DeviceIndex", scope.makeOpName("DeviceIndex")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeviceIndex")); opBuilder = scope.apply(opBuilder); String[] deviceNamesArray = new String[deviceNames.size()]; for (int i = 0 ; i < deviceNamesArray.length ; i++) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java index 23b589d33da..32e5713b0b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java @@ -54,7 +54,7 @@ private DummyMemoryCache(Operation operation) { describeByClass = true ) public static DummyMemoryCache create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("DummyMemoryCache", scope.makeOpName("DummyMemoryCache")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DummyMemoryCache")); opBuilder = scope.apply(opBuilder); return new DummyMemoryCache(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java index c3048d289b8..277f44372db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java @@ -100,7 +100,7 @@ private DynamicPartition(Operation operation) { ) public static DynamicPartition create(Scope scope, Operand data, Operand partitions, Long numPartitions) { - OperationBuilder opBuilder = scope.env().opBuilder("DynamicPartition", scope.makeOpName("DynamicPartition")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DynamicPartition")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(partitions.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java index 61263d0e65c..c314c0b2440 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java @@ -116,7 +116,7 @@ private DynamicStitch(Operation operation) { ) public static DynamicStitch create(Scope scope, Iterable> indices, Iterable> data) { - OperationBuilder opBuilder = scope.env().opBuilder("DynamicStitch", scope.makeOpName("DynamicStitch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DynamicStitch")); opBuilder.addInputList(Operands.asOutputs(indices)); opBuilder.addInputList(Operands.asOutputs(data)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java index 7d233a705ea..a302596aea3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java @@ -78,7 +78,7 @@ public static EditDistance create(Scope scope, Operand hypothesisIndices, Operand hypothesisValues, Operand hypothesisShape, Operand truthIndices, Operand truthValues, Operand truthShape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EditDistance", scope.makeOpName("EditDistance")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EditDistance")); opBuilder.addInput(hypothesisIndices.asOutput()); opBuilder.addInput(hypothesisValues.asOutput()); opBuilder.addInput(hypothesisShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java index e49c9330262..c59b5088a57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java @@ -65,7 +65,7 @@ private Empty(Operation operation) { ) public static Empty create(Scope scope, Operand shape, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Empty", scope.makeOpName("Empty")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Empty")); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorList.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorList.java index 974c8c4a4fe..f7ca73ecba5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorList.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorList.java @@ -70,7 +70,7 @@ private EmptyTensorList(Operation operation) { public static EmptyTensorList create(Scope scope, Operand elementShape, Operand maxNumElements, Class elementDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("EmptyTensorList", scope.makeOpName("EmptyTensorList")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EmptyTensorList")); opBuilder.addInput(elementShape.asOutput()); opBuilder.addInput(maxNumElements.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorMap.java index 3602dd5b457..336ec9ae30c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorMap.java @@ -57,7 +57,7 @@ private EmptyTensorMap(Operation operation) { describeByClass = true ) public static EmptyTensorMap create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("EmptyTensorMap", scope.makeOpName("EmptyTensorMap")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EmptyTensorMap")); opBuilder = scope.apply(opBuilder); return new EmptyTensorMap(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EncodeProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EncodeProto.java index d8d4cd12ef8..b640e15d503 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EncodeProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EncodeProto.java @@ -109,7 +109,7 @@ private EncodeProto(Operation operation) { ) public static EncodeProto create(Scope scope, Operand sizes, Iterable> values, List fieldNames, String messageType, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EncodeProto", scope.makeOpName("EncodeProto")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EncodeProto")); opBuilder.addInput(sizes.asOutput()); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java index 113f88c1238..dbe6942d163 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java @@ -64,7 +64,7 @@ private EnsureShape(Operation operation) { ) public static EnsureShape create(Scope scope, Operand input, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("EnsureShape", scope.makeOpName("EnsureShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EnsureShape")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java index f65f5cff7e2..a7612acd1c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java @@ -65,7 +65,7 @@ private Enter(Operation operation) { ) public static Enter create(Scope scope, Operand data, String frameName, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Enter", scope.makeOpName("Enter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Enter")); opBuilder.addInput(data.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("frame_name", frameName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java index 2d2c1499c29..26ecefa0494 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java @@ -58,7 +58,7 @@ private Exit(Operation operation) { describeByClass = true ) public static Exit create(Scope scope, Operand data) { - OperationBuilder opBuilder = scope.env().opBuilder("Exit", scope.makeOpName("Exit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Exit")); opBuilder.addInput(data.asOutput()); opBuilder = scope.apply(opBuilder); return new Exit<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java index f36d595669c..36442b72024 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java @@ -87,7 +87,7 @@ private ExpandDims(Operation operation) { ) public static ExpandDims create(Scope scope, Operand input, Operand axis) { - OperationBuilder opBuilder = scope.env().opBuilder("ExpandDims", scope.makeOpName("ExpandDims")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ExpandDims")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java index eb6f8b4bc34..ea431071365 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java @@ -70,7 +70,7 @@ private ExtractVolumePatches(Operation operation) { ) public static ExtractVolumePatches create(Scope scope, Operand input, List ksizes, List strides, String padding) { - OperationBuilder opBuilder = scope.env().opBuilder("ExtractVolumePatches", scope.makeOpName("ExtractVolumePatches")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ExtractVolumePatches")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); long[] ksizesArray = new long[ksizes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java index 1aacfb1bd75..1405a16d0be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java @@ -82,7 +82,7 @@ private Fill(Operation operation) { ) public static Fill create(Scope scope, Operand dims, Operand value) { - OperationBuilder opBuilder = scope.env().opBuilder("Fill", scope.makeOpName("Fill")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Fill")); opBuilder.addInput(dims.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fingerprint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fingerprint.java index 0e4dfcb233d..337dc5b85c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fingerprint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fingerprint.java @@ -84,7 +84,7 @@ private Fingerprint(Operation operation) { ) public static Fingerprint create(Scope scope, Operand data, Operand method) { - OperationBuilder opBuilder = scope.env().opBuilder("Fingerprint", scope.makeOpName("Fingerprint")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Fingerprint")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(method.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java index cd090bfee84..d2b330568f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java @@ -88,7 +88,7 @@ private Gather(Operation operation) { ) public static Gather create(Scope scope, Operand params, Operand indices, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("GatherV2", scope.makeOpName("Gather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Gather")); opBuilder.addInput(params.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(axis.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java index c61a86dcd5f..f57c56e41cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java @@ -151,7 +151,7 @@ private GatherNd(Operation operation) { ) public static GatherNd create(Scope scope, Operand params, Operand indices) { - OperationBuilder opBuilder = scope.env().opBuilder("GatherNd", scope.makeOpName("GatherNd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GatherNd")); opBuilder.addInput(params.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionHandle.java index be00df796d7..876072b7222 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionHandle.java @@ -57,7 +57,7 @@ private GetSessionHandle(Operation operation) { describeByClass = true ) public static GetSessionHandle create(Scope scope, Operand value) { - OperationBuilder opBuilder = scope.env().opBuilder("GetSessionHandleV2", scope.makeOpName("GetSessionHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GetSessionHandle")); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); return new GetSessionHandle(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java index c4b4d9603d1..319ac2b0ac9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java @@ -63,7 +63,7 @@ private GetSessionTensor(Operation operation) { ) public static GetSessionTensor create(Scope scope, Operand handle, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("GetSessionTensor", scope.makeOpName("GetSessionTensor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GetSessionTensor")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java index e03e5079309..5b600d66af7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java @@ -63,7 +63,7 @@ private GuaranteeConst(Operation operation) { describeByClass = true ) public static GuaranteeConst create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("GuaranteeConst", scope.makeOpName("GuaranteeConst")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GuaranteeConst")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new GuaranteeConst<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HashTable.java index bd0b930f591..5063ab9d291 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HashTable.java @@ -66,7 +66,7 @@ private HashTable(Operation operation) { ) public static HashTable create(Scope scope, Class keyDtype, Class valueDtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("HashTableV2", scope.makeOpName("HashTable")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("HashTable")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("key_dtype", Operands.toDataType(keyDtype)); opBuilder.setAttr("value_dtype", Operands.toDataType(valueDtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java index b6ac22168d1..c7417683e00 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java @@ -82,7 +82,7 @@ private HistogramFixedWidth(Operation operation) { ) public static HistogramFixedWidth create(Scope scope, Operand values, Operand valueRange, Operand nbins, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("HistogramFixedWidth", scope.makeOpName("HistogramFixedWidth")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("HistogramFixedWidth")); opBuilder.addInput(values.asOutput()); opBuilder.addInput(valueRange.asOutput()); opBuilder.addInput(nbins.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java index aaed04c8743..9e0f7be5a22 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java @@ -59,7 +59,7 @@ private Identity(Operation operation) { describeByClass = true ) public static Identity create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("Identity", scope.makeOpName("Identity")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Identity")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Identity<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IdentityN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IdentityN.java index dc658b90eb8..d70d42698a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IdentityN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IdentityN.java @@ -76,7 +76,7 @@ private IdentityN(Operation operation) { describeByClass = true ) public static IdentityN create(Scope scope, Iterable> input) { - OperationBuilder opBuilder = scope.env().opBuilder("IdentityN", scope.makeOpName("IdentityN")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IdentityN")); opBuilder.addInputList(Operands.asOutputs(input)); opBuilder = scope.apply(opBuilder); return new IdentityN(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java index 7dd2c608b9f..8c49af16350 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java @@ -66,7 +66,7 @@ private ImmutableConst(Operation operation) { ) public static ImmutableConst create(Scope scope, Class dtype, Shape shape, String memoryRegionName) { - OperationBuilder opBuilder = scope.env().opBuilder("ImmutableConst", scope.makeOpName("ImmutableConst")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ImmutableConst")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTable.java index 20ef82fc0b6..23c86e955c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTable.java @@ -54,7 +54,7 @@ private InitializeTable(Operation operation) { ) public static InitializeTable create(Scope scope, Operand tableHandle, Operand keys, Operand values) { - OperationBuilder opBuilder = scope.env().opBuilder("InitializeTableV2", scope.makeOpName("InitializeTable")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InitializeTable")); opBuilder.addInput(tableHandle.asOutput()); opBuilder.addInput(keys.asOutput()); opBuilder.addInput(values.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java index eb3cf4e2c03..ef81b6bb3d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java @@ -70,7 +70,7 @@ private InitializeTableFromTextFile(Operation operation) { public static InitializeTableFromTextFile create(Scope scope, Operand tableHandle, Operand filename, Long keyIndex, Long valueIndex, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("InitializeTableFromTextFileV2", scope.makeOpName("InitializeTableFromTextFile")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InitializeTableFromTextFile")); opBuilder.addInput(tableHandle.asOutput()); opBuilder.addInput(filename.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java index c7cc00f87cf..fe81153c9db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java @@ -66,7 +66,7 @@ private InplaceAdd(Operation operation) { ) public static InplaceAdd create(Scope scope, Operand x, Operand i, Operand v) { - OperationBuilder opBuilder = scope.env().opBuilder("InplaceAdd", scope.makeOpName("InplaceAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InplaceAdd")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(i.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java index f84e7ece5e7..9ed3b0011af 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java @@ -67,7 +67,7 @@ private InplaceSub(Operation operation) { ) public static InplaceSub create(Scope scope, Operand x, Operand i, Operand v) { - OperationBuilder opBuilder = scope.env().opBuilder("InplaceSub", scope.makeOpName("InplaceSub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InplaceSub")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(i.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java index c7543a8b780..69fca893ec2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java @@ -66,7 +66,7 @@ private InplaceUpdate(Operation operation) { ) public static InplaceUpdate create(Scope scope, Operand x, Operand i, Operand v) { - OperationBuilder opBuilder = scope.env().opBuilder("InplaceUpdate", scope.makeOpName("InplaceUpdate")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InplaceUpdate")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(i.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IsVariableInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IsVariableInitialized.java index e18d8d61178..9b2e1217dd9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IsVariableInitialized.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IsVariableInitialized.java @@ -58,7 +58,7 @@ private IsVariableInitialized(Operation operation) { describeByClass = true ) public static IsVariableInitialized create(Scope scope, Operand ref) { - OperationBuilder opBuilder = scope.env().opBuilder("IsVariableInitialized", scope.makeOpName("IsVariableInitialized")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IsVariableInitialized")); opBuilder.addInput(ref.asOutput()); opBuilder = scope.apply(opBuilder); return new IsVariableInitialized(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/KthOrderStatistic.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/KthOrderStatistic.java index b67996f7a77..a576cc8541e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/KthOrderStatistic.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/KthOrderStatistic.java @@ -71,7 +71,7 @@ private KthOrderStatistic(Operation operation) { describeByClass = true ) public static KthOrderStatistic create(Scope scope, Operand input, Long k) { - OperationBuilder opBuilder = scope.env().opBuilder("KthOrderStatistic", scope.makeOpName("KthOrderStatistic")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("KthOrderStatistic")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("k", k); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java index 14764b62003..8b852ac10e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java @@ -67,7 +67,7 @@ private LinSpace(Operation operation) { ) public static LinSpace create(Scope scope, Operand start, Operand stop, Operand num) { - OperationBuilder opBuilder = scope.env().opBuilder("LinSpace", scope.makeOpName("LinSpace")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LinSpace")); opBuilder.addInput(start.asOutput()); opBuilder.addInput(stop.asOutput()); opBuilder.addInput(num.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java index 524064d6b52..7d662618b0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java @@ -69,7 +69,7 @@ private LookupTableExport(Operation operation) { ) public static LookupTableExport create(Scope scope, Operand tableHandle, Class Tkeys, Class Tvalues) { - OperationBuilder opBuilder = scope.env().opBuilder("LookupTableExportV2", scope.makeOpName("LookupTableExport")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LookupTableExport")); opBuilder.addInput(tableHandle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Tkeys", Operands.toDataType(Tkeys)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java index 6e4f6f597ba..16c773922c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java @@ -67,7 +67,7 @@ private LookupTableFind(Operation operation) { public static LookupTableFind create(Scope scope, Operand tableHandle, Operand keys, Operand defaultValue) { - OperationBuilder opBuilder = scope.env().opBuilder("LookupTableFindV2", scope.makeOpName("LookupTableFind")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LookupTableFind")); opBuilder.addInput(tableHandle.asOutput()); opBuilder.addInput(keys.asOutput()); opBuilder.addInput(defaultValue.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableImport.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableImport.java index 3a670d9de68..1c76d9056dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableImport.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableImport.java @@ -56,7 +56,7 @@ private LookupTableImport(Operation operation) { ) public static LookupTableImport create(Scope scope, Operand tableHandle, Operand keys, Operand values) { - OperationBuilder opBuilder = scope.env().opBuilder("LookupTableImportV2", scope.makeOpName("LookupTableImport")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LookupTableImport")); opBuilder.addInput(tableHandle.asOutput()); opBuilder.addInput(keys.asOutput()); opBuilder.addInput(values.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableInsert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableInsert.java index c235064e1bf..e7b94e49d9d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableInsert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableInsert.java @@ -56,7 +56,7 @@ private LookupTableInsert(Operation operation) { ) public static LookupTableInsert create(Scope scope, Operand tableHandle, Operand keys, Operand values) { - OperationBuilder opBuilder = scope.env().opBuilder("LookupTableInsertV2", scope.makeOpName("LookupTableInsert")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LookupTableInsert")); opBuilder.addInput(tableHandle.asOutput()); opBuilder.addInput(keys.asOutput()); opBuilder.addInput(values.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java index 5d3ca1e8507..8c9c0201259 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java @@ -53,7 +53,7 @@ private LookupTableRemove(Operation operation) { ) public static LookupTableRemove create(Scope scope, Operand tableHandle, Operand keys) { - OperationBuilder opBuilder = scope.env().opBuilder("LookupTableRemoveV2", scope.makeOpName("LookupTableRemove")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LookupTableRemove")); opBuilder.addInput(tableHandle.asOutput()); opBuilder.addInput(keys.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableSize.java index eb41130753f..e0cd2a31ed3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableSize.java @@ -57,7 +57,7 @@ private LookupTableSize(Operation operation) { describeByClass = true ) public static LookupTableSize create(Scope scope, Operand tableHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("LookupTableSizeV2", scope.makeOpName("LookupTableSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LookupTableSize")); opBuilder.addInput(tableHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new LookupTableSize(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LoopCond.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LoopCond.java index 0c6321bafcc..f5cbb248d29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LoopCond.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LoopCond.java @@ -58,7 +58,7 @@ private LoopCond(Operation operation) { describeByClass = true ) public static LoopCond create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("LoopCond", scope.makeOpName("LoopCond")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoopCond")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new LoopCond(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java index 9f16a1bcc93..14f14ecb6a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java @@ -78,7 +78,7 @@ private LowerBound(Operation operation) { ) public static LowerBound create(Scope scope, Operand sortedInputs, Operand values, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("LowerBound", scope.makeOpName("LowerBound")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LowerBound")); opBuilder.addInput(sortedInputs.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MakeUnique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MakeUnique.java index 3d0e3be8a16..ad2274a2f8d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MakeUnique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MakeUnique.java @@ -60,7 +60,7 @@ private MakeUnique(Operation operation) { describeByClass = true ) public static MakeUnique create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("MakeUnique", scope.makeOpName("MakeUnique")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MakeUnique")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new MakeUnique(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapClear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapClear.java index ea67839c337..0708a1f9515 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapClear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapClear.java @@ -54,7 +54,7 @@ private MapClear(Operation operation) { ) public static MapClear create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MapClear", scope.makeOpName("MapClear")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MapClear")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapIncompleteSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapIncompleteSize.java index 0a533d39d69..c6e0947f969 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapIncompleteSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapIncompleteSize.java @@ -61,7 +61,7 @@ private MapIncompleteSize(Operation operation) { ) public static MapIncompleteSize create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MapIncompleteSize", scope.makeOpName("MapIncompleteSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MapIncompleteSize")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapPeek.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapPeek.java index b6a9896d220..06f322f2b8c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapPeek.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapPeek.java @@ -71,7 +71,7 @@ private MapPeek(Operation operation) { ) public static MapPeek create(Scope scope, Operand key, Operand indices, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MapPeek", scope.makeOpName("MapPeek")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MapPeek")); opBuilder.addInput(key.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapSize.java index 874a1e86618..d1f01833a66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapSize.java @@ -61,7 +61,7 @@ private MapSize(Operation operation) { ) public static MapSize create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MapSize", scope.makeOpName("MapSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MapSize")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java index 1b6835d287a..3da1ee55f5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java @@ -61,7 +61,7 @@ private MapStage(Operation operation) { ) public static MapStage create(Scope scope, Operand key, Operand indices, Iterable> values, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MapStage", scope.makeOpName("MapStage")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MapStage")); opBuilder.addInput(key.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInputList(Operands.asOutputs(values)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstage.java index c3c9bc34cdb..9e5290c695d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstage.java @@ -71,7 +71,7 @@ private MapUnstage(Operation operation) { ) public static MapUnstage create(Scope scope, Operand key, Operand indices, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MapUnstage", scope.makeOpName("MapUnstage")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MapUnstage")); opBuilder.addInput(key.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstageNoKey.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstageNoKey.java index aab2555eb9b..9d5dafbb305 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstageNoKey.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstageNoKey.java @@ -72,7 +72,7 @@ private MapUnstageNoKey(Operation operation) { ) public static MapUnstageNoKey create(Scope scope, Operand indices, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MapUnstageNoKey", scope.makeOpName("MapUnstageNoKey")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MapUnstageNoKey")); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java index 4109afa098f..8d56d0ff5fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java @@ -67,7 +67,7 @@ private Max(Operation operation) { ) public static Max create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Max", scope.makeOpName("Max")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Max")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java index e8ce85de4cc..a6bc464d73c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java @@ -68,7 +68,7 @@ private Merge(Operation operation) { describeByClass = true ) public static Merge create(Scope scope, Iterable> inputs) { - OperationBuilder opBuilder = scope.env().opBuilder("Merge", scope.makeOpName("Merge")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Merge")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); return new Merge<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java index b438709b808..660cfc923e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java @@ -67,7 +67,7 @@ private Min(Operation operation) { ) public static Min create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Min", scope.makeOpName("Min")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Min")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java index 793b4a133fc..a8a74e51839 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java @@ -89,7 +89,7 @@ private MirrorPad(Operation operation) { ) public static MirrorPad create(Scope scope, Operand input, Operand paddings, String mode) { - OperationBuilder opBuilder = scope.env().opBuilder("MirrorPad", scope.makeOpName("MirrorPad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MirrorPad")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(paddings.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java index 6e87eaa45ac..cb8ffb0e203 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java @@ -76,7 +76,7 @@ private MirrorPadGrad(Operation operation) { ) public static MirrorPadGrad create(Scope scope, Operand input, Operand paddings, String mode) { - OperationBuilder opBuilder = scope.env().opBuilder("MirrorPadGrad", scope.makeOpName("MirrorPadGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MirrorPadGrad")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(paddings.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MlirPassthroughOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MlirPassthroughOp.java index 648bd01bf02..fef5ae4f0e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MlirPassthroughOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MlirPassthroughOp.java @@ -94,7 +94,7 @@ private MlirPassthroughOp(Operation operation) { ) public static MlirPassthroughOp create(Scope scope, Iterable> inputs, String mlirModule, List> Toutputs) { - OperationBuilder opBuilder = scope.env().opBuilder("MlirPassthroughOp", scope.makeOpName("MlirPassthroughOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MlirPassthroughOp")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("mlir_module", mlirModule); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableDenseHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableDenseHashTable.java index b94faeb89c2..b1fae2e37ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableDenseHashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableDenseHashTable.java @@ -71,7 +71,7 @@ private MutableDenseHashTable(Operation operation) { ) public static MutableDenseHashTable create(Scope scope, Operand emptyKey, Operand deletedKey, Class valueDtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MutableDenseHashTableV2", scope.makeOpName("MutableDenseHashTable")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MutableDenseHashTable")); opBuilder.addInput(emptyKey.asOutput()); opBuilder.addInput(deletedKey.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTable.java index 3d37d0a4746..432aa2b40fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTable.java @@ -66,7 +66,7 @@ private MutableHashTable(Operation operation) { ) public static MutableHashTable create(Scope scope, Class keyDtype, Class valueDtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MutableHashTableV2", scope.makeOpName("MutableHashTable")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MutableHashTable")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("key_dtype", Operands.toDataType(keyDtype)); opBuilder.setAttr("value_dtype", Operands.toDataType(valueDtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTableOfTensors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTableOfTensors.java index 60afac5f7a2..ba82e946164 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTableOfTensors.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTableOfTensors.java @@ -67,7 +67,7 @@ private MutableHashTableOfTensors(Operation operation) { ) public static MutableHashTableOfTensors create(Scope scope, Class keyDtype, Class valueDtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MutableHashTableOfTensorsV2", scope.makeOpName("MutableHashTableOfTensors")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MutableHashTableOfTensors")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("key_dtype", Operands.toDataType(keyDtype)); opBuilder.setAttr("value_dtype", Operands.toDataType(valueDtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Mutex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Mutex.java index 36d7656478c..1082c127774 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Mutex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Mutex.java @@ -57,7 +57,7 @@ private Mutex(Operation operation) { describeByClass = true ) public static Mutex create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MutexV2", scope.makeOpName("Mutex")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Mutex")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutexLock.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutexLock.java index d3e8ae93897..dc24a517b7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutexLock.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutexLock.java @@ -90,7 +90,7 @@ private MutexLock(Operation operation) { describeByClass = true ) public static MutexLock create(Scope scope, Operand mutex) { - OperationBuilder opBuilder = scope.env().opBuilder("MutexLock", scope.makeOpName("MutexLock")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MutexLock")); opBuilder.addInput(mutex.asOutput()); opBuilder = scope.apply(opBuilder); return new MutexLock(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java index 20cdf1e184b..ac392938fdf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java @@ -74,7 +74,7 @@ private NcclAllReduce(Operation operation) { ) public static NcclAllReduce create(Scope scope, Operand input, String reduction, Long numDevices, String sharedName) { - OperationBuilder opBuilder = scope.env().opBuilder("NcclAllReduce", scope.makeOpName("NcclAllReduce")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NcclAllReduce")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("reduction", reduction); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java index 7b3240b9119..e7c25c28823 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java @@ -69,7 +69,7 @@ private NcclBroadcast(Operation operation) { ) public static NcclBroadcast create(Scope scope, Operand input, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("NcclBroadcast", scope.makeOpName("NcclBroadcast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NcclBroadcast")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java index cda500edda0..a17b786d775 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java @@ -69,7 +69,7 @@ private NcclReduce(Operation operation) { ) public static NcclReduce create(Scope scope, Iterable> input, String reduction) { - OperationBuilder opBuilder = scope.env().opBuilder("NcclReduce", scope.makeOpName("NcclReduce")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NcclReduce")); opBuilder.addInputList(Operands.asOutputs(input)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("reduction", reduction); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java index 1685840610b..04da6e134c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java @@ -59,7 +59,7 @@ private NextIteration(Operation operation) { describeByClass = true ) public static NextIteration create(Scope scope, Operand data) { - OperationBuilder opBuilder = scope.env().opBuilder("NextIteration", scope.makeOpName("NextIteration")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NextIteration")); opBuilder.addInput(data.asOutput()); opBuilder = scope.apply(opBuilder); return new NextIteration<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NoOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NoOp.java index 741e9397e69..b5934d56a0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NoOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NoOp.java @@ -48,7 +48,7 @@ private NoOp(Operation operation) { describeByClass = true ) public static NoOp create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("NoOp", scope.makeOpName("NoOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NoOp")); opBuilder = scope.apply(opBuilder); return new NoOp(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java index 0cb7522274b..5792ff7118d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java @@ -140,7 +140,7 @@ private OneHot(Operation operation) { ) public static OneHot create(Scope scope, Operand indices, Operand depth, Operand onValue, Operand offValue, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OneHot", scope.makeOpName("OneHot")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OneHot")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(depth.asOutput()); opBuilder.addInput(onValue.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java index bc94f4bdd88..45edcd7a34f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java @@ -59,7 +59,7 @@ private OnesLike(Operation operation) { describeByClass = true ) public static OnesLike create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("OnesLike", scope.makeOpName("OnesLike")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OnesLike")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new OnesLike<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapClear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapClear.java index 1d487caaa1f..eba41545c22 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapClear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapClear.java @@ -54,7 +54,7 @@ private OrderedMapClear(Operation operation) { ) public static OrderedMapClear create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OrderedMapClear", scope.makeOpName("OrderedMapClear")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OrderedMapClear")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapIncompleteSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapIncompleteSize.java index 81b4b4d8e00..c5d30ab0923 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapIncompleteSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapIncompleteSize.java @@ -61,7 +61,7 @@ private OrderedMapIncompleteSize(Operation operation) { ) public static OrderedMapIncompleteSize create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OrderedMapIncompleteSize", scope.makeOpName("OrderedMapIncompleteSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OrderedMapIncompleteSize")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapPeek.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapPeek.java index 3073a97acbf..a6f8795b3cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapPeek.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapPeek.java @@ -72,7 +72,7 @@ private OrderedMapPeek(Operation operation) { ) public static OrderedMapPeek create(Scope scope, Operand key, Operand indices, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OrderedMapPeek", scope.makeOpName("OrderedMapPeek")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OrderedMapPeek")); opBuilder.addInput(key.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapSize.java index 97cb7da14ca..41a0bad3554 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapSize.java @@ -61,7 +61,7 @@ private OrderedMapSize(Operation operation) { ) public static OrderedMapSize create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OrderedMapSize", scope.makeOpName("OrderedMapSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OrderedMapSize")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java index b4df966b71f..a5b84ec8e64 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java @@ -62,7 +62,7 @@ private OrderedMapStage(Operation operation) { ) public static OrderedMapStage create(Scope scope, Operand key, Operand indices, Iterable> values, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OrderedMapStage", scope.makeOpName("OrderedMapStage")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OrderedMapStage")); opBuilder.addInput(key.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInputList(Operands.asOutputs(values)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstage.java index 761f9e3c705..fcf11ce38bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstage.java @@ -71,7 +71,7 @@ private OrderedMapUnstage(Operation operation) { ) public static OrderedMapUnstage create(Scope scope, Operand key, Operand indices, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OrderedMapUnstage", scope.makeOpName("OrderedMapUnstage")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OrderedMapUnstage")); opBuilder.addInput(key.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstageNoKey.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstageNoKey.java index 952704844f8..9656eaaa302 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstageNoKey.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstageNoKey.java @@ -72,7 +72,7 @@ private OrderedMapUnstageNoKey(Operation operation) { ) public static OrderedMapUnstageNoKey create(Scope scope, Operand indices, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OrderedMapUnstageNoKey", scope.makeOpName("OrderedMapUnstageNoKey")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OrderedMapUnstageNoKey")); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java index 7dde13cf5ee..5590bf808e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java @@ -83,7 +83,7 @@ private Pad(Operation operation) { ) public static Pad create(Scope scope, Operand input, Operand paddings, Operand constantValues) { - OperationBuilder opBuilder = scope.env().opBuilder("PadV2", scope.makeOpName("Pad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Pad")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(paddings.asOutput()); opBuilder.addInput(constantValues.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java index c82ed8c6d3d..5ecabefbf81 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java @@ -78,7 +78,7 @@ private ParallelConcat(Operation operation) { ) public static ParallelConcat create(Scope scope, Iterable> values, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("ParallelConcat", scope.makeOpName("ParallelConcat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParallelConcat")); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java index 7cdaf7186dc..8ca88c679ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java @@ -115,7 +115,7 @@ private ParallelDynamicStitch(Operation operation) { ) public static ParallelDynamicStitch create(Scope scope, Iterable> indices, Iterable> data) { - OperationBuilder opBuilder = scope.env().opBuilder("ParallelDynamicStitch", scope.makeOpName("ParallelDynamicStitch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParallelDynamicStitch")); opBuilder.addInputList(Operands.asOutputs(indices)); opBuilder.addInputList(Operands.asOutputs(data)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java index ce982c9fec3..084c7e60ca5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java @@ -66,7 +66,7 @@ private Placeholder(Operation operation) { ) public static Placeholder create(Scope scope, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Placeholder", scope.makeOpName("Placeholder")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Placeholder")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java index aaf92d4b7d1..6c4f5ea8cf8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java @@ -62,7 +62,7 @@ private PlaceholderWithDefault(Operation operation) { ) public static PlaceholderWithDefault create(Scope scope, Operand input, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("PlaceholderWithDefault", scope.makeOpName("PlaceholderWithDefault")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PlaceholderWithDefault")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Print.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Print.java index e453803464a..7984706a37f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Print.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Print.java @@ -53,7 +53,7 @@ private Print(Operation operation) { describeByClass = true ) public static Print create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("PrintV2", scope.makeOpName("Print")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Print")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java index 4644a025a92..3f9aadce98b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java @@ -68,7 +68,7 @@ private Prod(Operation operation) { ) public static Prod create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Prod", scope.makeOpName("Prod")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Prod")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java index 2b449817340..962d31d0c66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java @@ -71,7 +71,7 @@ private QuantizedReshape(Operation operation) { ) public static QuantizedReshape create(Scope scope, Operand tensor, Operand shape, Operand inputMin, Operand inputMax) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedReshape", scope.makeOpName("QuantizedReshape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedReshape")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(inputMin.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java index ea9bafe214d..9e957e384db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java @@ -71,7 +71,7 @@ private Range(Operation operation) { ) public static Range create(Scope scope, Operand start, Operand limit, Operand delta) { - OperationBuilder opBuilder = scope.env().opBuilder("Range", scope.makeOpName("Range")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Range")); opBuilder.addInput(start.asOutput()); opBuilder.addInput(limit.asOutput()); opBuilder.addInput(delta.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rank.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rank.java index c32961a10b4..1a1643d3a66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rank.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rank.java @@ -67,7 +67,7 @@ private Rank(Operation operation) { describeByClass = true ) public static Rank create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("Rank", scope.makeOpName("Rank")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Rank")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Rank(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java index d7d329f37f3..0839d68b8dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java @@ -67,7 +67,7 @@ private ReadVariableOp(Operation operation) { ) public static ReadVariableOp create(Scope scope, Operand resource, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("ReadVariableOp", scope.makeOpName("ReadVariableOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReadVariableOp")); opBuilder.addInput(resource.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java index dfc29a0cdc7..5c4792ea1f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java @@ -65,7 +65,7 @@ private Recv(Operation operation) { public static Recv create(Scope scope, Class tensorType, String tensorName, String sendDevice, Long sendDeviceIncarnation, String recvDevice, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Recv", scope.makeOpName("Recv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Recv")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("tensor_type", Operands.toDataType(tensorType)); opBuilder.setAttr("tensor_name", tensorName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAll.java index b684ba9d6be..93750a4f95a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAll.java @@ -65,7 +65,7 @@ private ReduceAll(Operation operation) { ) public static ReduceAll create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("All", scope.makeOpName("ReduceAll")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReduceAll")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAny.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAny.java index aa446a167f3..cf53d3a2071 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAny.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAny.java @@ -65,7 +65,7 @@ private ReduceAny(Operation operation) { ) public static ReduceAny create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Any", scope.makeOpName("ReduceAny")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReduceAny")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java index 5f561d2e5dd..66090f91d32 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java @@ -67,7 +67,7 @@ private ReduceMax(Operation operation) { ) public static ReduceMax create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Max", scope.makeOpName("ReduceMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReduceMax")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java index 92ec0761a02..91d1d46b9b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java @@ -67,7 +67,7 @@ private ReduceMin(Operation operation) { ) public static ReduceMin create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Min", scope.makeOpName("ReduceMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReduceMin")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java index 3d7b22863b7..cde85542fe4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java @@ -68,7 +68,7 @@ private ReduceProd(Operation operation) { ) public static ReduceProd create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Prod", scope.makeOpName("ReduceProd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReduceProd")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java index 4928188d4cd..d39d5549be3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java @@ -68,7 +68,7 @@ private ReduceSum(Operation operation) { ) public static ReduceSum create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Sum", scope.makeOpName("ReduceSum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReduceSum")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java index 7f399c80d94..fcd9dbce973 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java @@ -64,7 +64,7 @@ private RefEnter(Operation operation) { ) public static RefEnter create(Scope scope, Operand data, String frameName, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RefEnter", scope.makeOpName("RefEnter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RefEnter")); opBuilder.addInput(data.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("frame_name", frameName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java index 82e201ae0d9..10f17674b87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java @@ -58,7 +58,7 @@ private RefExit(Operation operation) { describeByClass = true ) public static RefExit create(Scope scope, Operand data) { - OperationBuilder opBuilder = scope.env().opBuilder("RefExit", scope.makeOpName("RefExit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RefExit")); opBuilder.addInput(data.asOutput()); opBuilder = scope.apply(opBuilder); return new RefExit<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java index 35360fe8d89..b5c66e12811 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java @@ -57,7 +57,7 @@ private RefIdentity(Operation operation) { describeByClass = true ) public static RefIdentity create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("RefIdentity", scope.makeOpName("RefIdentity")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RefIdentity")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new RefIdentity<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java index 5e671132995..6fb364c6d6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java @@ -66,7 +66,7 @@ private RefMerge(Operation operation) { describeByClass = true ) public static RefMerge create(Scope scope, Iterable> inputs) { - OperationBuilder opBuilder = scope.env().opBuilder("RefMerge", scope.makeOpName("RefMerge")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RefMerge")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); return new RefMerge<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java index 76bb5185e08..3276aa0be6b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java @@ -59,7 +59,7 @@ private RefNextIteration(Operation operation) { describeByClass = true ) public static RefNextIteration create(Scope scope, Operand data) { - OperationBuilder opBuilder = scope.env().opBuilder("RefNextIteration", scope.makeOpName("RefNextIteration")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RefNextIteration")); opBuilder.addInput(data.asOutput()); opBuilder = scope.apply(opBuilder); return new RefNextIteration<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java index a030084acaa..67bdc1e2644 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java @@ -63,7 +63,7 @@ private RefSelect(Operation operation) { ) public static RefSelect create(Scope scope, Operand index, Iterable> inputs) { - OperationBuilder opBuilder = scope.env().opBuilder("RefSelect", scope.makeOpName("RefSelect")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RefSelect")); opBuilder.addInput(index.asOutput()); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java index a94d172fdd6..1ed139d0ae6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java @@ -68,7 +68,7 @@ private RefSwitch(Operation operation) { ) public static RefSwitch create(Scope scope, Operand data, Operand pred) { - OperationBuilder opBuilder = scope.env().opBuilder("RefSwitch", scope.makeOpName("RefSwitch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RefSwitch")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(pred.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java index f59c0efb442..9d2348c8426 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java @@ -116,7 +116,7 @@ private Reshape(Operation operation) { ) public static Reshape create(Scope scope, Operand tensor, Operand shape) { - OperationBuilder opBuilder = scope.env().opBuilder("Reshape", scope.makeOpName("Reshape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Reshape")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java index 4da07c2c8f5..7453e07c77e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java @@ -65,7 +65,7 @@ private ResourceCountUpTo(Operation operation) { ) public static ResourceCountUpTo create(Scope scope, Operand resource, Long limit, Class T) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceCountUpTo", scope.makeOpName("ResourceCountUpTo")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceCountUpTo")); opBuilder.addInput(resource.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("limit", limit); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java index 274b8c0216b..683b025e2ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java @@ -78,7 +78,7 @@ private ResourceGather(Operation operation) { public static ResourceGather create(Scope scope, Operand resource, Operand indices, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceGather", scope.makeOpName("ResourceGather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceGather")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java index 1669af384fa..679b7d18472 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java @@ -64,7 +64,7 @@ private ResourceGatherNd(Operation operation) { ) public static ResourceGatherNd create(Scope scope, Operand resource, Operand indices, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceGatherNd", scope.makeOpName("ResourceGatherNd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceGatherNd")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterAdd.java index 5b2c431bc55..255192d11c7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterAdd.java @@ -72,7 +72,7 @@ private ResourceScatterAdd(Operation operation) { ) public static ResourceScatterAdd create(Scope scope, Operand resource, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterAdd", scope.makeOpName("ResourceScatterAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterAdd")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterDiv.java index 9d6115a34d0..b804e2dea4a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterDiv.java @@ -72,7 +72,7 @@ private ResourceScatterDiv(Operation operation) { ) public static ResourceScatterDiv create(Scope scope, Operand resource, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterDiv", scope.makeOpName("ResourceScatterDiv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterDiv")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMax.java index 8b1912ee994..fe4b43b8d21 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMax.java @@ -72,7 +72,7 @@ private ResourceScatterMax(Operation operation) { ) public static ResourceScatterMax create(Scope scope, Operand resource, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterMax", scope.makeOpName("ResourceScatterMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterMax")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMin.java index 6e51c6fa2a5..57c888412a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMin.java @@ -72,7 +72,7 @@ private ResourceScatterMin(Operation operation) { ) public static ResourceScatterMin create(Scope scope, Operand resource, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterMin", scope.makeOpName("ResourceScatterMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterMin")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMul.java index 8891aaf555a..8bb5007d450 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMul.java @@ -72,7 +72,7 @@ private ResourceScatterMul(Operation operation) { ) public static ResourceScatterMul create(Scope scope, Operand resource, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterMul", scope.makeOpName("ResourceScatterMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterMul")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java index f846f0105df..b53c3fc4adb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java @@ -84,7 +84,7 @@ private ResourceScatterNdAdd(Operation operation) { ) public static ResourceScatterNdAdd create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterNdAdd", scope.makeOpName("ResourceScatterNdAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterNdAdd")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java index 60604e40901..d15a9286cba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java @@ -58,7 +58,7 @@ private ResourceScatterNdMax(Operation operation) { ) public static ResourceScatterNdMax create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterNdMax", scope.makeOpName("ResourceScatterNdMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterNdMax")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java index 3f227154962..1c5f45b9a18 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java @@ -58,7 +58,7 @@ private ResourceScatterNdMin(Operation operation) { ) public static ResourceScatterNdMin create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterNdMin", scope.makeOpName("ResourceScatterNdMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterNdMin")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java index 0b8b305105f..c7ee744bc80 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java @@ -84,7 +84,7 @@ private ResourceScatterNdSub(Operation operation) { ) public static ResourceScatterNdSub create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterNdSub", scope.makeOpName("ResourceScatterNdSub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterNdSub")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java index 73bad12a3d4..036b760d68f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java @@ -85,7 +85,7 @@ private ResourceScatterNdUpdate(Operation operation) { ) public static ResourceScatterNdUpdate create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterNdUpdate", scope.makeOpName("ResourceScatterNdUpdate")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterNdUpdate")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterSub.java index 58bffab6d01..f747d5d01a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterSub.java @@ -72,7 +72,7 @@ private ResourceScatterSub(Operation operation) { ) public static ResourceScatterSub create(Scope scope, Operand resource, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterSub", scope.makeOpName("ResourceScatterSub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterSub")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterUpdate.java index fe524c62ed3..46c6911cb82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterUpdate.java @@ -66,7 +66,7 @@ private ResourceScatterUpdate(Operation operation) { ) public static ResourceScatterUpdate create(Scope scope, Operand resource, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceScatterUpdate", scope.makeOpName("ResourceScatterUpdate")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceScatterUpdate")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceStridedSliceAssign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceStridedSliceAssign.java index 3751916d6e3..8b7842cd447 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceStridedSliceAssign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceStridedSliceAssign.java @@ -65,7 +65,7 @@ private ResourceStridedSliceAssign(Operation operation) { public static ResourceStridedSliceAssign create(Scope scope, Operand ref, Operand begin, Operand end, Operand strides, Operand value, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceStridedSliceAssign", scope.makeOpName("ResourceStridedSliceAssign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceStridedSliceAssign")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(begin.asOutput()); opBuilder.addInput(end.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java index c5227f60573..5b40a326bf0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java @@ -105,7 +105,7 @@ private Reverse(Operation operation) { ) public static Reverse create(Scope scope, Operand tensor, Operand axis) { - OperationBuilder opBuilder = scope.env().opBuilder("ReverseV2", scope.makeOpName("Reverse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Reverse")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java index f90bce00b11..e28d178d8f3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java @@ -113,7 +113,7 @@ private ReverseSequence(Operation operation) { ) public static ReverseSequence create(Scope scope, Operand input, Operand seqLengths, Long seqDim, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ReverseSequence", scope.makeOpName("ReverseSequence")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReverseSequence")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(seqLengths.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java index 46c1105525b..f278c3544e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java @@ -87,7 +87,7 @@ private Roll(Operation operation) { ) public static Roll create(Scope scope, Operand input, Operand shift, Operand axis) { - OperationBuilder opBuilder = scope.env().opBuilder("Roll", scope.makeOpName("Roll")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Roll")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(shift.asOutput()); opBuilder.addInput(axis.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java index fb99ba72e15..0f5b6446f10 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java @@ -83,7 +83,7 @@ private ScatterAdd(Operation operation) { ) public static ScatterAdd create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterAdd", scope.makeOpName("ScatterAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterAdd")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java index e4a2aee18e3..a4ce6cb7081 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java @@ -80,7 +80,7 @@ private ScatterDiv(Operation operation) { ) public static ScatterDiv create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterDiv", scope.makeOpName("ScatterDiv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterDiv")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java index 0151d58d5e4..d2c7d88e595 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java @@ -82,7 +82,7 @@ private ScatterMax(Operation operation) { ) public static ScatterMax create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterMax", scope.makeOpName("ScatterMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterMax")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java index f8de1ab981f..573c22dd90b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java @@ -82,7 +82,7 @@ private ScatterMin(Operation operation) { ) public static ScatterMin create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterMin", scope.makeOpName("ScatterMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterMin")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java index e3c60debba5..db7eb4911aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java @@ -80,7 +80,7 @@ private ScatterMul(Operation operation) { ) public static ScatterMul create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterMul", scope.makeOpName("ScatterMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterMul")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java index 0590363853f..f56f759952b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java @@ -132,7 +132,7 @@ private ScatterNd(Operation operation) { ) public static ScatterNd create(Scope scope, Operand indices, Operand updates, Operand shape) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterNd", scope.makeOpName("ScatterNd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterNd")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java index c84b4db7ed3..dffda533284 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java @@ -92,7 +92,7 @@ private ScatterNdAdd(Operation operation) { ) public static ScatterNdAdd create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterNdAdd", scope.makeOpName("ScatterNdAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterNdAdd")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java index bfe7e8470db..09ee60144cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java @@ -64,7 +64,7 @@ private ScatterNdMax(Operation operation) { ) public static ScatterNdMax create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterNdMax", scope.makeOpName("ScatterNdMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterNdMax")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java index 98663edd311..149b18ce7d8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java @@ -64,7 +64,7 @@ private ScatterNdMin(Operation operation) { ) public static ScatterNdMin create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterNdMin", scope.makeOpName("ScatterNdMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterNdMin")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java index e9d2f0586e0..5e1c718bd6a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java @@ -92,7 +92,7 @@ private ScatterNdNonAliasingAdd(Operation operation) { ) public static ScatterNdNonAliasingAdd create(Scope scope, Operand input, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterNdNonAliasingAdd", scope.makeOpName("ScatterNdNonAliasingAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterNdNonAliasingAdd")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java index 23e1d53e85d..cd5edc87ba1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java @@ -93,7 +93,7 @@ private ScatterNdSub(Operation operation) { ) public static ScatterNdSub create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterNdSub", scope.makeOpName("ScatterNdSub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterNdSub")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java index 1d93219a76d..b40a4240311 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java @@ -92,7 +92,7 @@ private ScatterNdUpdate(Operation operation) { ) public static ScatterNdUpdate create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterNdUpdate", scope.makeOpName("ScatterNdUpdate")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterNdUpdate")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java index 8dcec648560..af59a6d916c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java @@ -82,7 +82,7 @@ private ScatterSub(Operation operation) { ) public static ScatterSub create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterSub", scope.makeOpName("ScatterSub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterSub")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java index 9e6e6fa106b..0ab648a0619 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java @@ -85,7 +85,7 @@ private ScatterUpdate(Operation operation) { ) public static ScatterUpdate create(Scope scope, Operand ref, Operand indices, Operand updates, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScatterUpdate", scope.makeOpName("ScatterUpdate")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScatterUpdate")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java index 20e13a1131b..51697076712 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java @@ -63,7 +63,7 @@ private Select(Operation operation) { ) public static Select create(Scope scope, Operand condition, Operand t, Operand e) { - OperationBuilder opBuilder = scope.env().opBuilder("SelectV2", scope.makeOpName("Select")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Select")); opBuilder.addInput(condition.asOutput()); opBuilder.addInput(t.asOutput()); opBuilder.addInput(e.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java index 2a666ce3f1c..6898ac5d212 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java @@ -55,7 +55,7 @@ private Send(Operation operation) { ) public static Send create(Scope scope, Operand tensor, String tensorName, String sendDevice, Long sendDeviceIncarnation, String recvDevice, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Send", scope.makeOpName("Send")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Send")); opBuilder.addInput(tensor.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("tensor_name", tensorName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java index cfd64e9f8f5..e5279c5d733 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java @@ -87,7 +87,7 @@ private SetDiff1d(Operation operation) { ) public static SetDiff1d create(Scope scope, Operand x, Operand y, Class outIdx) { - OperationBuilder opBuilder = scope.env().opBuilder("ListDiff", scope.makeOpName("SetDiff1d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SetDiff1d")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetSize.java index 26a705b8d1f..28bc5ffaf95 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetSize.java @@ -67,7 +67,7 @@ private SetSize(Operation operation) { ) public static SetSize create(Scope scope, Operand setIndices, Operand setValues, Operand setShape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SetSize", scope.makeOpName("SetSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SetSize")); opBuilder.addInput(setIndices.asOutput()); opBuilder.addInput(setValues.asOutput()); opBuilder.addInput(setShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java index 1b111b8643e..90b5b640701 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java @@ -70,7 +70,7 @@ private Shape(Operation operation) { ) public static Shape create(Scope scope, Operand input, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("Shape", scope.makeOpName("Shape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Shape")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("out_type", Operands.toDataType(outType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java index ac06776f9fa..ec88a0524eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java @@ -71,7 +71,7 @@ private ShapeN(Operation operation) { ) public static ShapeN create(Scope scope, Iterable> input, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("ShapeN", scope.makeOpName("ShapeN")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ShapeN")); opBuilder.addInputList(Operands.asOutputs(input)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("out_type", Operands.toDataType(outType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java index d8c5d3fcc88..843ef45b612 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java @@ -71,7 +71,7 @@ private Size(Operation operation) { ) public static Size create(Scope scope, Operand input, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("Size", scope.makeOpName("Size")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Size")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("out_type", Operands.toDataType(outType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Skipgram.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Skipgram.java index 51127563a5b..9798383eac7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Skipgram.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Skipgram.java @@ -77,7 +77,7 @@ private Skipgram(Operation operation) { describeByClass = true ) public static Skipgram create(Scope scope, String filename, Long batchSize, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Skipgram", scope.makeOpName("Skipgram")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Skipgram")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("filename", filename); opBuilder.setAttr("batch_size", batchSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java index ec2adda3e4b..7e3167430b7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java @@ -73,7 +73,7 @@ private Slice(Operation operation) { ) public static Slice create(Scope scope, Operand input, Operand begin, Operand sizeOutput) { - OperationBuilder opBuilder = scope.env().opBuilder("Slice", scope.makeOpName("Slice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Slice")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(begin.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java index 25c8106b0ac..bf7c137713d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java @@ -59,7 +59,7 @@ private Snapshot(Operation operation) { describeByClass = true ) public static Snapshot create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("Snapshot", scope.makeOpName("Snapshot")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Snapshot")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Snapshot<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java index cc66b8943ee..3e2058a0f29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java @@ -163,7 +163,7 @@ private SpaceToBatchNd(Operation operation) { ) public static SpaceToBatchNd create(Scope scope, Operand input, Operand blockShape, Operand paddings) { - OperationBuilder opBuilder = scope.env().opBuilder("SpaceToBatchND", scope.makeOpName("SpaceToBatchNd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SpaceToBatchNd")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(blockShape.asOutput()); opBuilder.addInput(paddings.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java index 98476c4a960..4162d54ea94 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java @@ -71,7 +71,7 @@ private Split(Operation operation) { ) public static Split create(Scope scope, Operand axis, Operand value, Long numSplit) { - OperationBuilder opBuilder = scope.env().opBuilder("Split", scope.makeOpName("Split")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Split")); opBuilder.addInput(axis.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java index b62cb587fa7..78a4d9eeab4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java @@ -74,7 +74,7 @@ private SplitV(Operation operation) { ) public static SplitV create(Scope scope, Operand value, Operand sizeSplits, Operand axis, Long numSplit) { - OperationBuilder opBuilder = scope.env().opBuilder("SplitV", scope.makeOpName("SplitV")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SplitV")); opBuilder.addInput(value.asOutput()); opBuilder.addInput(sizeSplits.asOutput()); opBuilder.addInput(axis.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java index 49fe614adec..21b26dccd20 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java @@ -77,7 +77,7 @@ private Squeeze(Operation operation) { ) public static Squeeze create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Squeeze", scope.makeOpName("Squeeze")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Squeeze")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java index 31ff857d014..6e5b987d0bd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java @@ -77,7 +77,7 @@ private Stack(Operation operation) { ) public static Stack create(Scope scope, Iterable> values, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Pack", scope.makeOpName("Stack")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Stack")); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java index 527fd5f3c53..ce4d35c9563 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java @@ -55,7 +55,7 @@ private Stage(Operation operation) { describeByClass = true ) public static Stage create(Scope scope, Iterable> values, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Stage", scope.makeOpName("Stage")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Stage")); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageClear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageClear.java index ba08b758785..76a4c785f31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageClear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageClear.java @@ -54,7 +54,7 @@ private StageClear(Operation operation) { ) public static StageClear create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StageClear", scope.makeOpName("StageClear")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StageClear")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StagePeek.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StagePeek.java index b431723eb47..dce04d53f5b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StagePeek.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StagePeek.java @@ -70,7 +70,7 @@ private StagePeek(Operation operation) { ) public static StagePeek create(Scope scope, Operand index, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StagePeek", scope.makeOpName("StagePeek")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StagePeek")); opBuilder.addInput(index.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageSize.java index 15c77f0f49b..107d54d890e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageSize.java @@ -61,7 +61,7 @@ private StageSize(Operation operation) { ) public static StageSize create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StageSize", scope.makeOpName("StageSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StageSize")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java index 2ae5a7b5431..ca4fc6a1bf0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java @@ -109,7 +109,7 @@ private StopGradient(Operation operation) { describeByClass = true ) public static StopGradient create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("StopGradient", scope.makeOpName("StopGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StopGradient")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new StopGradient<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java index ea85e807597..7edb1753b86 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java @@ -172,7 +172,7 @@ private StridedSlice(Operation operation) { ) public static StridedSlice create(Scope scope, Operand input, Operand begin, Operand end, Operand strides, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StridedSlice", scope.makeOpName("StridedSlice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StridedSlice")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(begin.asOutput()); opBuilder.addInput(end.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java index b53f429f456..c866bc5bd86 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java @@ -73,7 +73,7 @@ private StridedSliceAssign(Operation operation) { public static StridedSliceAssign create(Scope scope, Operand ref, Operand begin, Operand end, Operand strides, Operand value, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StridedSliceAssign", scope.makeOpName("StridedSliceAssign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StridedSliceAssign")); opBuilder.addInput(ref.asOutput()); opBuilder.addInput(begin.asOutput()); opBuilder.addInput(end.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java index 125b88fb75e..1ad183e57b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java @@ -75,7 +75,7 @@ private StridedSliceGrad(Operation operation) { public static StridedSliceGrad create(Scope scope, Operand shape, Operand begin, Operand end, Operand strides, Operand dy, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StridedSliceGrad", scope.makeOpName("StridedSliceGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StridedSliceGrad")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(begin.asOutput()); opBuilder.addInput(end.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java index a0b4d10e4c3..99da35103ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java @@ -68,7 +68,7 @@ private Sum(Operation operation) { ) public static Sum create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Sum", scope.makeOpName("Sum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sum")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java index 38a7ed866d0..10c505712cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java @@ -68,7 +68,7 @@ private SwitchCond(Operation operation) { ) public static SwitchCond create(Scope scope, Operand data, Operand pred) { - OperationBuilder opBuilder = scope.env().opBuilder("Switch", scope.makeOpName("SwitchCond")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SwitchCond")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(pred.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java index 3b87b135172..831d802c12b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java @@ -75,7 +75,7 @@ private TemporaryVariable(Operation operation) { ) public static TemporaryVariable create(Scope scope, Shape shape, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TemporaryVariable", scope.makeOpName("TemporaryVariable")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TemporaryVariable")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape", shape); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java index 28fe76db26b..b8b5569c61f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java @@ -69,7 +69,7 @@ private TensorArray(Operation operation) { ) public static TensorArray create(Scope scope, Operand sizeOutput, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayV3", scope.makeOpName("TensorArray")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArray")); opBuilder.addInput(sizeOutput.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayClose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayClose.java index 73d7e827b1a..fee13859bc3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayClose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayClose.java @@ -53,7 +53,7 @@ private TensorArrayClose(Operation operation) { describeByClass = true ) public static TensorArrayClose create(Scope scope, Operand handle) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayCloseV3", scope.makeOpName("TensorArrayClose")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayClose")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); return new TensorArrayClose(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java index e280753d74a..cff4355cd2b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java @@ -78,7 +78,7 @@ private TensorArrayConcat(Operation operation) { public static TensorArrayConcat create(Scope scope, Operand handle, Operand flowIn, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayConcatV3", scope.makeOpName("TensorArrayConcat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayConcat")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(flowIn.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java index 31a93247809..a4e2e66c65d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java @@ -70,7 +70,7 @@ private TensorArrayGather(Operation operation) { public static TensorArrayGather create(Scope scope, Operand handle, Operand indices, Operand flowIn, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayGatherV3", scope.makeOpName("TensorArrayGather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayGather")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(flowIn.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGrad.java index 5b6984b2ebe..a3c1b3922bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGrad.java @@ -93,7 +93,7 @@ private TensorArrayGrad(Operation operation) { ) public static TensorArrayGrad create(Scope scope, Operand handle, Operand flowIn, String source) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayGradV3", scope.makeOpName("TensorArrayGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayGrad")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(flowIn.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGradWithShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGradWithShape.java index 4c685c48f45..284787874da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGradWithShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGradWithShape.java @@ -73,7 +73,7 @@ private TensorArrayGradWithShape(Operation operation) { ) public static TensorArrayGradWithShape create(Scope scope, Operand handle, Operand flowIn, Operand shapeToPrepend, String source) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayGradWithShape", scope.makeOpName("TensorArrayGradWithShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayGradWithShape")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(flowIn.asOutput()); opBuilder.addInput(shapeToPrepend.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java index ee1f21e1d26..fd714faff19 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java @@ -67,7 +67,7 @@ private TensorArrayPack(Operation operation) { ) public static TensorArrayPack create(Scope scope, Operand handle, Operand flowIn, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayPack", scope.makeOpName("TensorArrayPack")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayPack")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(flowIn.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java index 60b34119d6b..bc532f9dfba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java @@ -67,7 +67,7 @@ private TensorArrayRead(Operation operation) { public static TensorArrayRead create(Scope scope, Operand handle, Operand index, Operand flowIn, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayReadV3", scope.makeOpName("TensorArrayRead")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayRead")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(index.asOutput()); opBuilder.addInput(flowIn.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayScatter.java index be7ed94d09c..2f0fb588481 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayScatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayScatter.java @@ -63,7 +63,7 @@ private TensorArrayScatter(Operation operation) { ) public static TensorArrayScatter create(Scope scope, Operand handle, Operand indices, Operand value, Operand flowIn) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayScatterV3", scope.makeOpName("TensorArrayScatter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayScatter")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(value.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySize.java index 45a377703a5..45eabcee0d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySize.java @@ -60,7 +60,7 @@ private TensorArraySize(Operation operation) { ) public static TensorArraySize create(Scope scope, Operand handle, Operand flowIn) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArraySizeV3", scope.makeOpName("TensorArraySize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArraySize")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(flowIn.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySplit.java index c23f1a1ee13..f361bc23add 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySplit.java @@ -72,7 +72,7 @@ private TensorArraySplit(Operation operation) { ) public static TensorArraySplit create(Scope scope, Operand handle, Operand value, Operand lengths, Operand flowIn) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArraySplitV3", scope.makeOpName("TensorArraySplit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArraySplit")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder.addInput(lengths.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayUnpack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayUnpack.java index 8dc2dc2e0f9..d7136201b13 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayUnpack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayUnpack.java @@ -61,7 +61,7 @@ private TensorArrayUnpack(Operation operation) { ) public static TensorArrayUnpack create(Scope scope, Operand handle, Operand value, Operand flowIn) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayUnpack", scope.makeOpName("TensorArrayUnpack")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayUnpack")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(value.asOutput()); opBuilder.addInput(flowIn.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayWrite.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayWrite.java index df16154064f..9e3ffca2eb1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayWrite.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayWrite.java @@ -62,7 +62,7 @@ private TensorArrayWrite(Operation operation) { ) public static TensorArrayWrite create(Scope scope, Operand handle, Operand index, Operand value, Operand flowIn) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorArrayWriteV3", scope.makeOpName("TensorArrayWrite")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorArrayWrite")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(index.asOutput()); opBuilder.addInput(value.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java index 146c951c0df..e1626dedff5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java @@ -80,7 +80,7 @@ private TensorListConcat(Operation operation) { public static TensorListConcat create(Scope scope, Operand inputHandle, Operand elementShape, Operand leadingDims, Class elementDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListConcatV2", scope.makeOpName("TensorListConcat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListConcat")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(elementShape.asOutput()); opBuilder.addInput(leadingDims.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcatLists.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcatLists.java index e574d05f613..65a4178818f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcatLists.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcatLists.java @@ -62,7 +62,7 @@ private TensorListConcatLists(Operation operation) { ) public static TensorListConcatLists create(Scope scope, Operand inputA, Operand inputB, Class elementDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListConcatLists", scope.makeOpName("TensorListConcatLists")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListConcatLists")); opBuilder.addInput(inputA.asOutput()); opBuilder.addInput(inputB.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java index dd5910d8d2e..f11f7ba92b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java @@ -65,7 +65,7 @@ private TensorListElementShape(Operation operation) { ) public static TensorListElementShape create(Scope scope, Operand inputHandle, Class shapeType) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListElementShape", scope.makeOpName("TensorListElementShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListElementShape")); opBuilder.addInput(inputHandle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape_type", Operands.toDataType(shapeType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListFromTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListFromTensor.java index 12ae7a9ed70..e648b9e7925 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListFromTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListFromTensor.java @@ -63,7 +63,7 @@ private TensorListFromTensor(Operation operation) { ) public static TensorListFromTensor create(Scope scope, Operand tensor, Operand elementShape) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListFromTensor", scope.makeOpName("TensorListFromTensor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListFromTensor")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(elementShape.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java index bae8f1b33da..b621bc1b849 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java @@ -71,7 +71,7 @@ private TensorListGather(Operation operation) { public static TensorListGather create(Scope scope, Operand inputHandle, Operand indices, Operand elementShape, Class elementDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListGather", scope.makeOpName("TensorListGather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListGather")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(elementShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java index 4d7eba7bbfa..8e60428450d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java @@ -66,7 +66,7 @@ private TensorListGetItem(Operation operation) { public static TensorListGetItem create(Scope scope, Operand inputHandle, Operand index, Operand elementShape, Class elementDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListGetItem", scope.makeOpName("TensorListGetItem")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListGetItem")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(index.asOutput()); opBuilder.addInput(elementShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListLength.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListLength.java index 5614497942b..b1ffcaf318b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListLength.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListLength.java @@ -59,7 +59,7 @@ private TensorListLength(Operation operation) { describeByClass = true ) public static TensorListLength create(Scope scope, Operand inputHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListLength", scope.makeOpName("TensorListLength")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListLength")); opBuilder.addInput(inputHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new TensorListLength(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java index 6926cbf92da..26803f6c969 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java @@ -73,7 +73,7 @@ private TensorListPopBack(Operation operation) { ) public static TensorListPopBack create(Scope scope, Operand inputHandle, Operand elementShape, Class elementDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListPopBack", scope.makeOpName("TensorListPopBack")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListPopBack")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(elementShape.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBack.java index 6df4b4e1ff4..f8430d78de1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBack.java @@ -64,7 +64,7 @@ private TensorListPushBack(Operation operation) { ) public static TensorListPushBack create(Scope scope, Operand inputHandle, Operand tensor) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListPushBack", scope.makeOpName("TensorListPushBack")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListPushBack")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(tensor.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBackBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBackBatch.java index 4191773ce8f..a2422a7eb8a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBackBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBackBatch.java @@ -59,7 +59,7 @@ private TensorListPushBackBatch(Operation operation) { ) public static TensorListPushBackBatch create(Scope scope, Operand inputHandles, Operand tensor) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListPushBackBatch", scope.makeOpName("TensorListPushBackBatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListPushBackBatch")); opBuilder.addInput(inputHandles.asOutput()); opBuilder.addInput(tensor.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListReserve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListReserve.java index 9ab261d5b39..003cf6a7398 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListReserve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListReserve.java @@ -68,7 +68,7 @@ private TensorListReserve(Operation operation) { ) public static TensorListReserve create(Scope scope, Operand elementShape, Operand numElements, Class elementDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListReserve", scope.makeOpName("TensorListReserve")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListReserve")); opBuilder.addInput(elementShape.asOutput()); opBuilder.addInput(numElements.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListResize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListResize.java index ba395731c15..f22b6c53912 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListResize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListResize.java @@ -62,7 +62,7 @@ private TensorListResize(Operation operation) { ) public static TensorListResize create(Scope scope, Operand inputHandle, Operand sizeOutput) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListResize", scope.makeOpName("TensorListResize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListResize")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatter.java index 8bd16951d66..84f160b19a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatter.java @@ -74,7 +74,7 @@ private TensorListScatter(Operation operation) { public static TensorListScatter create(Scope scope, Operand tensor, Operand indices, Operand elementShape, Operand numElements) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListScatterV2", scope.makeOpName("TensorListScatter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListScatter")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(elementShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatterIntoExistingList.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatterIntoExistingList.java index 68dba6e4b53..507d14a13b5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatterIntoExistingList.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatterIntoExistingList.java @@ -68,7 +68,7 @@ private TensorListScatterIntoExistingList(Operation operation) { public static TensorListScatterIntoExistingList create(Scope scope, Operand inputHandle, Operand tensor, Operand indices) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListScatterIntoExistingList", scope.makeOpName("TensorListScatterIntoExistingList")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListScatterIntoExistingList")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSetItem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSetItem.java index ba7ef3ae22a..02a21f13825 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSetItem.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSetItem.java @@ -61,7 +61,7 @@ private TensorListSetItem(Operation operation) { ) public static TensorListSetItem create(Scope scope, Operand inputHandle, Operand index, Operand item) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListSetItem", scope.makeOpName("TensorListSetItem")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListSetItem")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(index.asOutput()); opBuilder.addInput(item.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSplit.java index 2ca49751954..d25f9951ad7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSplit.java @@ -68,7 +68,7 @@ private TensorListSplit(Operation operation) { ) public static TensorListSplit create(Scope scope, Operand tensor, Operand elementShape, Operand lengths) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListSplit", scope.makeOpName("TensorListSplit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListSplit")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(elementShape.asOutput()); opBuilder.addInput(lengths.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java index 9d6b475d625..7c52d4a45b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java @@ -70,7 +70,7 @@ private TensorListStack(Operation operation) { public static TensorListStack create(Scope scope, Operand inputHandle, Operand elementShape, Class elementDtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorListStack", scope.makeOpName("TensorListStack")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorListStack")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(elementShape.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapErase.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapErase.java index 6a5eac721ef..83fa8e612f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapErase.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapErase.java @@ -65,7 +65,7 @@ private TensorMapErase(Operation operation) { ) public static TensorMapErase create(Scope scope, Operand inputHandle, Operand key, Class valueDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorMapErase", scope.makeOpName("TensorMapErase")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorMapErase")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapHasKey.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapHasKey.java index c02b3f84543..ce03b661d5f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapHasKey.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapHasKey.java @@ -62,7 +62,7 @@ private TensorMapHasKey(Operation operation) { ) public static TensorMapHasKey create(Scope scope, Operand inputHandle, Operand key) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorMapHasKey", scope.makeOpName("TensorMapHasKey")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorMapHasKey")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapInsert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapInsert.java index 5b08049cf5f..903d39736c7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapInsert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapInsert.java @@ -64,7 +64,7 @@ private TensorMapInsert(Operation operation) { ) public static TensorMapInsert create(Scope scope, Operand inputHandle, Operand key, Operand value) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorMapInsert", scope.makeOpName("TensorMapInsert")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorMapInsert")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder.addInput(value.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java index 69ebb676047..be0682edec5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java @@ -66,7 +66,7 @@ private TensorMapLookup(Operation operation) { ) public static TensorMapLookup create(Scope scope, Operand inputHandle, Operand key, Class valueDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorMapLookup", scope.makeOpName("TensorMapLookup")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorMapLookup")); opBuilder.addInput(inputHandle.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapSize.java index 6aa230ce27c..31d7d712ac6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapSize.java @@ -59,7 +59,7 @@ private TensorMapSize(Operation operation) { describeByClass = true ) public static TensorMapSize create(Scope scope, Operand inputHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorMapSize", scope.makeOpName("TensorMapSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorMapSize")); opBuilder.addInput(inputHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new TensorMapSize(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java index 5d7ca8458f1..eb57059c093 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java @@ -64,7 +64,7 @@ private TensorMapStackKeys(Operation operation) { ) public static TensorMapStackKeys create(Scope scope, Operand inputHandle, Class keyDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorMapStackKeys", scope.makeOpName("TensorMapStackKeys")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorMapStackKeys")); opBuilder.addInput(inputHandle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("key_dtype", Operands.toDataType(keyDtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java index 7059f28795a..8e105625cb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java @@ -119,7 +119,7 @@ private TensorScatterNdAdd(Operation operation) { ) public static TensorScatterNdAdd create(Scope scope, Operand tensor, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorScatterAdd", scope.makeOpName("TensorScatterNdAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorScatterNdAdd")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java index 5c0b720df48..3bf063821f3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java @@ -63,7 +63,7 @@ private TensorScatterNdMax(Operation operation) { ) public static TensorScatterNdMax create(Scope scope, Operand tensor, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorScatterMax", scope.makeOpName("TensorScatterNdMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorScatterNdMax")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java index 68ca6569d4a..4f81fbdb4c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java @@ -63,7 +63,7 @@ private TensorScatterNdMin(Operation operation) { ) public static TensorScatterNdMin create(Scope scope, Operand tensor, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorScatterMin", scope.makeOpName("TensorScatterNdMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorScatterNdMin")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java index b5748556821..8a0c995c50e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java @@ -118,7 +118,7 @@ private TensorScatterNdSub(Operation operation) { ) public static TensorScatterNdSub create(Scope scope, Operand tensor, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorScatterSub", scope.makeOpName("TensorScatterNdSub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorScatterNdSub")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java index dd6a9c5b775..d62c4ccf4f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java @@ -92,7 +92,7 @@ private TensorScatterNdUpdate(Operation operation) { ) public static TensorScatterNdUpdate create(Scope scope, Operand tensor, Operand indices, Operand updates) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorScatterUpdate", scope.makeOpName("TensorScatterNdUpdate")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorScatterNdUpdate")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java index 00fcc69e613..5fab7af8134 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java @@ -73,7 +73,7 @@ private TensorStridedSliceUpdate(Operation operation) { public static TensorStridedSliceUpdate create(Scope scope, Operand input, Operand begin, Operand end, Operand strides, Operand value, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorStridedSliceUpdate", scope.makeOpName("TensorStridedSliceUpdate")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorStridedSliceUpdate")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(begin.asOutput()); opBuilder.addInput(end.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java index c2c290787a4..b675f1e3375 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java @@ -93,7 +93,7 @@ private Tile(Operation operation) { ) public static Tile create(Scope scope, Operand input, Operand multiples) { - OperationBuilder opBuilder = scope.env().opBuilder("Tile", scope.makeOpName("Tile")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Tile")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(multiples.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Timestamp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Timestamp.java index 200a3627235..cae24f7dee1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Timestamp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Timestamp.java @@ -58,7 +58,7 @@ private Timestamp(Operation operation) { describeByClass = true ) public static Timestamp create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("Timestamp", scope.makeOpName("Timestamp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Timestamp")); opBuilder = scope.apply(opBuilder); return new Timestamp(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java index 0e74b1a7745..ed6c6344bff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java @@ -73,7 +73,7 @@ private TopKUnique(Operation operation) { describeByClass = true ) public static TopKUnique create(Scope scope, Operand input, Long k) { - OperationBuilder opBuilder = scope.env().opBuilder("TopKUnique", scope.makeOpName("TopKUnique")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TopKUnique")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("k", k); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java index 89b0c4ea5dc..571faba5d2e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java @@ -66,7 +66,7 @@ private TopKWithUnique(Operation operation) { describeByClass = true ) public static TopKWithUnique create(Scope scope, Operand input, Long k) { - OperationBuilder opBuilder = scope.env().opBuilder("TopKWithUnique", scope.makeOpName("TopKWithUnique")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TopKWithUnique")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("k", k); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java index cc4c9cbcfba..ec0ea5bacb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java @@ -82,7 +82,7 @@ private Unbatch(Operation operation) { ) public static Unbatch create(Scope scope, Operand batchedTensor, Operand batchIndex, Operand id, Long timeoutMicros, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Unbatch", scope.makeOpName("Unbatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Unbatch")); opBuilder.addInput(batchedTensor.asOutput()); opBuilder.addInput(batchIndex.asOutput()); opBuilder.addInput(id.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java index 910bc87c3fc..7f91db16730 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java @@ -78,7 +78,7 @@ private UnbatchGrad(Operation operation) { ) public static UnbatchGrad create(Scope scope, Operand originalInput, Operand batchIndex, Operand grad, Operand id, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("UnbatchGrad", scope.makeOpName("UnbatchGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnbatchGrad")); opBuilder.addInput(originalInput.asOutput()); opBuilder.addInput(batchIndex.asOutput()); opBuilder.addInput(grad.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java index 59bce512c2a..c7defd3c68e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java @@ -108,7 +108,7 @@ private Unique(Operation operation) { ) public static Unique create(Scope scope, Operand x, Operand axis, Class outIdx) { - OperationBuilder opBuilder = scope.env().opBuilder("UniqueV2", scope.makeOpName("Unique")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Unique")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java index 54a7e8b4e54..d13f73f2434 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java @@ -115,7 +115,7 @@ private UniqueWithCounts(Operation operation) { ) public static UniqueWithCounts create(Scope scope, Operand x, Operand axis, Class outIdx) { - OperationBuilder opBuilder = scope.env().opBuilder("UniqueWithCountsV2", scope.makeOpName("UniqueWithCounts")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UniqueWithCounts")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java index 23a2f0b3f5f..e2529c9468f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java @@ -80,7 +80,7 @@ private UnravelIndex(Operation operation) { ) public static UnravelIndex create(Scope scope, Operand indices, Operand dims) { - OperationBuilder opBuilder = scope.env().opBuilder("UnravelIndex", scope.makeOpName("UnravelIndex")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnravelIndex")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(dims.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java index 64940b36cee..7d2ac838859 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java @@ -77,7 +77,7 @@ private Unstack(Operation operation) { ) public static Unstack create(Scope scope, Operand value, Long num, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Unpack", scope.makeOpName("Unstack")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Unstack")); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num", num); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstage.java index 6daf9ee8b21..36a6a6eedc3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstage.java @@ -67,7 +67,7 @@ private Unstage(Operation operation) { ) public static Unstage create(Scope scope, List> dtypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Unstage", scope.makeOpName("Unstage")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Unstage")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java index 7764cc2220b..8e03195ee42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java @@ -78,7 +78,7 @@ private UpperBound(Operation operation) { ) public static UpperBound create(Scope scope, Operand sortedInputs, Operand values, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("UpperBound", scope.makeOpName("UpperBound")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UpperBound")); opBuilder.addInput(sortedInputs.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarHandleOp.java index a5bef8a017d..8aaf4953410 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarHandleOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarHandleOp.java @@ -66,7 +66,7 @@ private VarHandleOp(Operation operation) { ) public static VarHandleOp create(Scope scope, Class dtype, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("VarHandleOp", scope.makeOpName("VarHandleOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("VarHandleOp")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarIsInitializedOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarIsInitializedOp.java index 708a550be11..2dacf759b88 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarIsInitializedOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarIsInitializedOp.java @@ -57,7 +57,7 @@ private VarIsInitializedOp(Operation operation) { describeByClass = true ) public static VarIsInitializedOp create(Scope scope, Operand resource) { - OperationBuilder opBuilder = scope.env().opBuilder("VarIsInitializedOp", scope.makeOpName("VarIsInitializedOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("VarIsInitializedOp")); opBuilder.addInput(resource.asOutput()); opBuilder = scope.apply(opBuilder); return new VarIsInitializedOp(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java index 8fd5c10363e..ac2d7bb369a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java @@ -67,7 +67,7 @@ private Variable(Operation operation) { ) public static Variable create(Scope scope, Shape shape, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("VariableV2", scope.makeOpName("Variable")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Variable")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape", shape); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java index 57d31b78299..b2c21f4238a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java @@ -70,7 +70,7 @@ private VariableShape(Operation operation) { ) public static VariableShape create(Scope scope, Operand input, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("VariableShape", scope.makeOpName("VariableShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("VariableShape")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("out_type", Operands.toDataType(outType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Where.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Where.java index 0a7dd998332..a3323a8d5d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Where.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Where.java @@ -114,7 +114,7 @@ private Where(Operation operation) { describeByClass = true ) public static Where create(Scope scope, Operand condition) { - OperationBuilder opBuilder = scope.env().opBuilder("Where", scope.makeOpName("Where")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Where")); opBuilder.addInput(condition.asOutput()); opBuilder = scope.apply(opBuilder); return new Where(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaConvV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaConvV2.java index 529a0d092c6..fe95ace9832 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaConvV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaConvV2.java @@ -77,7 +77,7 @@ public static XlaConvV2 create(Scope sco Operand padding, Operand lhsDilation, Operand rhsDilation, Operand featureGroupCount, String dimensionNumbers, String precisionConfig, Class preferredElementType) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaConvV2", scope.makeOpName("XlaConvV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("XlaConvV2")); opBuilder.addInput(lhs.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder.addInput(windowStrides.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaDotV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaDotV2.java index de90ac10d8e..6d94739df68 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaDotV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaDotV2.java @@ -68,7 +68,7 @@ private XlaDotV2(Operation operation) { public static XlaDotV2 create(Scope scope, Operand lhs, Operand rhs, String dimensionNumbers, String precisionConfig, Class preferredElementType) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaDotV2", scope.makeOpName("XlaDotV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("XlaDotV2")); opBuilder.addInput(lhs.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSetDynamicDimensionSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSetDynamicDimensionSize.java index 0fcb7229afa..2ab7f6605ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSetDynamicDimensionSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSetDynamicDimensionSize.java @@ -67,7 +67,7 @@ private XlaSetDynamicDimensionSize(Operation operation) { ) public static XlaSetDynamicDimensionSize create(Scope scope, Operand input, Operand dimIndex, Operand sizeOutput) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSetDynamicDimensionSize", scope.makeOpName("XlaSetDynamicDimensionSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("XlaSetDynamicDimensionSize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(dimIndex.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSpmdFullToShardShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSpmdFullToShardShape.java index cbb7a734ee0..cedee394b63 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSpmdFullToShardShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSpmdFullToShardShape.java @@ -65,7 +65,7 @@ private XlaSpmdFullToShardShape(Operation operation) { ) public static XlaSpmdFullToShardShape create(Scope scope, Operand input, String manualSharding) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSpmdFullToShardShape", scope.makeOpName("XlaSpmdFullToShardShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("XlaSpmdFullToShardShape")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("manual_sharding", manualSharding); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSpmdShardToFullShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSpmdShardToFullShape.java index d2c177cb22e..dc5e3de5834 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSpmdShardToFullShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/XlaSpmdShardToFullShape.java @@ -66,7 +66,7 @@ private XlaSpmdShardToFullShape(Operation operation) { ) public static XlaSpmdShardToFullShape create(Scope scope, Operand input, String manualSharding, Shape fullShape) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSpmdShardToFullShape", scope.makeOpName("XlaSpmdShardToFullShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("XlaSpmdShardToFullShape")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("manual_sharding", manualSharding); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java index 2b95580be17..3a0da9958cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java @@ -59,7 +59,7 @@ private ZerosLike(Operation operation) { describeByClass = true ) public static ZerosLike create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("ZerosLike", scope.makeOpName("ZerosLike")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ZerosLike")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new ZerosLike<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java index d9ea90b5f0a..fc728c1439c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java @@ -66,7 +66,7 @@ private AnonymousIterator(Operation operation) { ) public static AnonymousIterator create(Scope scope, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("AnonymousIteratorV2", scope.makeOpName("AnonymousIterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AnonymousIterator")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); Shape[] outputShapesArray = new Shape[outputShapes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java index 6f1bf3d4e27..438e8eb4374 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java @@ -56,7 +56,7 @@ private AnonymousMemoryCache(Operation operation) { describeByClass = true ) public static AnonymousMemoryCache create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("AnonymousMemoryCache", scope.makeOpName("AnonymousMemoryCache")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AnonymousMemoryCache")); opBuilder = scope.apply(opBuilder); return new AnonymousMemoryCache(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMultiDeviceIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMultiDeviceIterator.java index b398bba3165..ccccad32cf8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMultiDeviceIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMultiDeviceIterator.java @@ -63,7 +63,7 @@ private AnonymousMultiDeviceIterator(Operation operation) { ) public static AnonymousMultiDeviceIterator create(Scope scope, List devices, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("AnonymousMultiDeviceIterator", scope.makeOpName("AnonymousMultiDeviceIterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AnonymousMultiDeviceIterator")); opBuilder = scope.apply(opBuilder); String[] devicesArray = new String[devices.size()]; for (int i = 0 ; i < devicesArray.length ; i++) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertNextDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertNextDataset.java index 683782d9c29..55d04a508aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertNextDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertNextDataset.java @@ -73,7 +73,7 @@ private AssertNextDataset(Operation operation) { public static AssertNextDataset create(Scope scope, Operand inputDataset, Operand transformations, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("AssertNextDataset", scope.makeOpName("AssertNextDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssertNextDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(transformations.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AutoShardDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AutoShardDataset.java index a635d219ea0..901aa906739 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AutoShardDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AutoShardDataset.java @@ -72,7 +72,7 @@ private AutoShardDataset(Operation operation) { public static AutoShardDataset create(Scope scope, Operand inputDataset, Operand numWorkers, Operand index, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AutoShardDataset", scope.makeOpName("AutoShardDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AutoShardDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(numWorkers.asOutput()); opBuilder.addInput(index.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java index 5e04312b18c..b58045697f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java @@ -72,7 +72,7 @@ private BatchDataset(Operation operation) { public static BatchDataset create(Scope scope, Operand inputDataset, Operand batchSize, Operand dropRemainder, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchDatasetV2", scope.makeOpName("BatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(batchSize.asOutput()); opBuilder.addInput(dropRemainder.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BytesProducedStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BytesProducedStatsDataset.java index 8bf3d9de786..d2965b028d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BytesProducedStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BytesProducedStatsDataset.java @@ -63,7 +63,7 @@ private BytesProducedStatsDataset(Operation operation) { ) public static BytesProducedStatsDataset create(Scope scope, Operand inputDataset, Operand tag, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("BytesProducedStatsDataset", scope.makeOpName("BytesProducedStatsDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BytesProducedStatsDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(tag.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDataset.java index 2a04eeb6acd..6eaaf8f621c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDataset.java @@ -73,7 +73,7 @@ public static CSVDataset create(Scope scope, Operand filenames, Operand compressionType, Operand bufferSize, Operand header, Operand fieldDelim, Operand useQuoteDelim, Operand naValue, Operand selectCols, Iterable> recordDefaults, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("CSVDataset", scope.makeOpName("CSVDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CSVDataset")); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(compressionType.asOutput()); opBuilder.addInput(bufferSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDatasetV2.java index 871994f4416..47db26f6dd6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDatasetV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDatasetV2.java @@ -75,7 +75,7 @@ public static CSVDatasetV2 create(Scope scope, Operand filenames, Operand fieldDelim, Operand useQuoteDelim, Operand naValue, Operand selectCols, Iterable> recordDefaults, Operand excludeCols, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("CSVDatasetV2", scope.makeOpName("CSVDatasetV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CSVDatasetV2")); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(compressionType.asOutput()); opBuilder.addInput(bufferSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java index 3552ddc829f..b610b7af19a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java @@ -69,7 +69,7 @@ private CacheDataset(Operation operation) { public static CacheDataset create(Scope scope, Operand inputDataset, Operand filename, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("CacheDataset", scope.makeOpName("CacheDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CacheDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(filename.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDatasetV2.java index 0d9a3da0312..60b371e0f4c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDatasetV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDatasetV2.java @@ -65,7 +65,7 @@ private CacheDatasetV2(Operation operation) { public static CacheDatasetV2 create(Scope scope, Operand inputDataset, Operand filename, Operand cache, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("CacheDatasetV2", scope.makeOpName("CacheDatasetV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CacheDatasetV2")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(filename.asOutput()); opBuilder.addInput(cache.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestDataset.java index b6ed6fa0598..d91ed316fe5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestDataset.java @@ -63,7 +63,7 @@ private ChooseFastestDataset(Operation operation) { public static ChooseFastestDataset create(Scope scope, Iterable> inputDatasets, Long numExperiments, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ChooseFastestDataset", scope.makeOpName("ChooseFastestDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ChooseFastestDataset")); opBuilder.addInputList(Operands.asOutputs(inputDatasets)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_experiments", numExperiments); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java index c5193836610..5e692b0fbc8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java @@ -67,7 +67,7 @@ private ConcatenateDataset(Operation operation) { public static ConcatenateDataset create(Scope scope, Operand inputDataset, Operand anotherDataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ConcatenateDataset", scope.makeOpName("ConcatenateDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ConcatenateDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(anotherDataset.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetCardinality.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetCardinality.java index 69fa37fb80e..2d1b8a24ba5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetCardinality.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetCardinality.java @@ -56,7 +56,7 @@ private DatasetCardinality(Operation operation) { describeByClass = true ) public static DatasetCardinality create(Scope scope, Operand inputDataset) { - OperationBuilder opBuilder = scope.env().opBuilder("DatasetCardinality", scope.makeOpName("DatasetCardinality")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DatasetCardinality")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); return new DatasetCardinality(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFromGraph.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFromGraph.java index 3961ea15eab..0064b4a1efa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFromGraph.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFromGraph.java @@ -57,7 +57,7 @@ private DatasetFromGraph(Operation operation) { describeByClass = true ) public static DatasetFromGraph create(Scope scope, Operand graphDef) { - OperationBuilder opBuilder = scope.env().opBuilder("DatasetFromGraph", scope.makeOpName("DatasetFromGraph")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DatasetFromGraph")); opBuilder.addInput(graphDef.asOutput()); opBuilder = scope.apply(opBuilder); return new DatasetFromGraph(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToGraph.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToGraph.java index 1d0681e31e4..1f550abaf7d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToGraph.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToGraph.java @@ -58,7 +58,7 @@ private DatasetToGraph(Operation operation) { ) public static DatasetToGraph create(Scope scope, Operand inputDataset, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DatasetToGraphV2", scope.makeOpName("DatasetToGraph")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DatasetToGraph")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java index c3ebfccb906..dffd7bebf02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java @@ -65,7 +65,7 @@ private DatasetToSingleElement(Operation operation) { ) public static DatasetToSingleElement create(Scope scope, Operand dataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("DatasetToSingleElement", scope.makeOpName("DatasetToSingleElement")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DatasetToSingleElement")); opBuilder.addInput(dataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToTfRecord.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToTfRecord.java index d3cd2a4d8a0..4ccdc21bafa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToTfRecord.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToTfRecord.java @@ -54,7 +54,7 @@ private DatasetToTfRecord(Operation operation) { ) public static DatasetToTfRecord create(Scope scope, Operand inputDataset, Operand filename, Operand compressionType) { - OperationBuilder opBuilder = scope.env().opBuilder("DatasetToTFRecord", scope.makeOpName("DatasetToTfRecord")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DatasetToTfRecord")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(filename.asOutput()); opBuilder.addInput(compressionType.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteIterator.java index a57a205c616..ce6cc297f2f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteIterator.java @@ -55,7 +55,7 @@ private DeleteIterator(Operation operation) { ) public static DeleteIterator create(Scope scope, Operand handle, Operand deleter) { - OperationBuilder opBuilder = scope.env().opBuilder("DeleteIterator", scope.makeOpName("DeleteIterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeleteIterator")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(deleter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java index 3fd27f09429..80dab58ffca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java @@ -51,7 +51,7 @@ private DeleteMemoryCache(Operation operation) { ) public static DeleteMemoryCache create(Scope scope, Operand handle, Operand deleter) { - OperationBuilder opBuilder = scope.env().opBuilder("DeleteMemoryCache", scope.makeOpName("DeleteMemoryCache")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeleteMemoryCache")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(deleter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java index a8dcedb5ecb..9dc5f5aef82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java @@ -54,7 +54,7 @@ private DeleteMultiDeviceIterator(Operation operation) { public static DeleteMultiDeviceIterator create(Scope scope, Operand multiDeviceIterator, Iterable> iterators, Operand deleter) { - OperationBuilder opBuilder = scope.env().opBuilder("DeleteMultiDeviceIterator", scope.makeOpName("DeleteMultiDeviceIterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeleteMultiDeviceIterator")); opBuilder.addInput(multiDeviceIterator.asOutput()); opBuilder.addInputList(Operands.asOutputs(iterators)); opBuilder.addInput(deleter.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DenseToSparseBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DenseToSparseBatchDataset.java index 6389138e683..07d490b1008 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DenseToSparseBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DenseToSparseBatchDataset.java @@ -68,7 +68,7 @@ private DenseToSparseBatchDataset(Operation operation) { public static DenseToSparseBatchDataset create(Scope scope, Operand inputDataset, Operand batchSize, Operand rowShape, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("DenseToSparseBatchDataset", scope.makeOpName("DenseToSparseBatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DenseToSparseBatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(batchSize.asOutput()); opBuilder.addInput(rowShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeserializeIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeserializeIterator.java index ccd8e16d9df..0a408b40593 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeserializeIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeserializeIterator.java @@ -56,7 +56,7 @@ private DeserializeIterator(Operation operation) { ) public static DeserializeIterator create(Scope scope, Operand resourceHandle, Operand serialized) { - OperationBuilder opBuilder = scope.env().opBuilder("DeserializeIterator", scope.makeOpName("DeserializeIterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeserializeIterator")); opBuilder.addInput(resourceHandle.asOutput()); opBuilder.addInput(serialized.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DirectedInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DirectedInterleaveDataset.java index cd1cef1a070..5f4f262a18c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DirectedInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DirectedInterleaveDataset.java @@ -66,7 +66,7 @@ public static DirectedInterleaveDataset create(Scope scope, Operand selectorInputDataset, Iterable> dataInputDatasets, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("DirectedInterleaveDataset", scope.makeOpName("DirectedInterleaveDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DirectedInterleaveDataset")); opBuilder.addInput(selectorInputDataset.asOutput()); opBuilder.addInputList(Operands.asOutputs(dataInputDatasets)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterByLastComponentDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterByLastComponentDataset.java index 44b4b08a329..22ef6836c25 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterByLastComponentDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterByLastComponentDataset.java @@ -62,7 +62,7 @@ private FilterByLastComponentDataset(Operation operation) { public static FilterByLastComponentDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("FilterByLastComponentDataset", scope.makeOpName("FilterByLastComponentDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FilterByLastComponentDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java index 8d1f567c391..6c5f4ec9fec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java @@ -64,7 +64,7 @@ private FixedLengthRecordDataset(Operation operation) { public static FixedLengthRecordDataset create(Scope scope, Operand filenames, Operand headerBytes, Operand recordBytes, Operand footerBytes, Operand bufferSize, Operand compressionType) { - OperationBuilder opBuilder = scope.env().opBuilder("FixedLengthRecordDatasetV2", scope.makeOpName("FixedLengthRecordDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FixedLengthRecordDataset")); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(headerBytes.asOutput()); opBuilder.addInput(recordBytes.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IgnoreErrorsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IgnoreErrorsDataset.java index 334d2d7ad11..152d5696a91 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IgnoreErrorsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IgnoreErrorsDataset.java @@ -62,7 +62,7 @@ private IgnoreErrorsDataset(Operation operation) { ) public static IgnoreErrorsDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("IgnoreErrorsDataset", scope.makeOpName("IgnoreErrorsDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IgnoreErrorsDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InitializeTableFromDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InitializeTableFromDataset.java index 79f20eff235..b57533279cd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InitializeTableFromDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InitializeTableFromDataset.java @@ -51,7 +51,7 @@ private InitializeTableFromDataset(Operation operation) { ) public static InitializeTableFromDataset create(Scope scope, Operand tableHandle, Operand dataset) { - OperationBuilder opBuilder = scope.env().opBuilder("InitializeTableFromDataset", scope.makeOpName("InitializeTableFromDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InitializeTableFromDataset")); opBuilder.addInput(tableHandle.asOutput()); opBuilder.addInput(dataset.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/Iterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/Iterator.java index 3b3b3a04f6b..8bd9e126fc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/Iterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/Iterator.java @@ -66,7 +66,7 @@ private Iterator(Operation operation) { ) public static Iterator create(Scope scope, String sharedName, String container, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("IteratorV2", scope.makeOpName("Iterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Iterator")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shared_name", sharedName); opBuilder.setAttr("container", container); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java index c218c81909d..cc39d3cf4ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java @@ -63,7 +63,7 @@ private IteratorFromStringHandle(Operation operation) { ) public static IteratorFromStringHandle create(Scope scope, Operand stringHandle, List> outputTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("IteratorFromStringHandleV2", scope.makeOpName("IteratorFromStringHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IteratorFromStringHandle")); opBuilder.addInput(stringHandle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java index 7a7c2bf2652..361ea83a46f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java @@ -55,7 +55,7 @@ private IteratorGetDevice(Operation operation) { describeByClass = true ) public static IteratorGetDevice create(Scope scope, Operand resource) { - OperationBuilder opBuilder = scope.env().opBuilder("IteratorGetDevice", scope.makeOpName("IteratorGetDevice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IteratorGetDevice")); opBuilder.addInput(resource.asOutput()); opBuilder = scope.apply(opBuilder); return new IteratorGetDevice(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNext.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNext.java index e40b54a0426..03a19599760 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNext.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNext.java @@ -69,7 +69,7 @@ private IteratorGetNext(Operation operation) { ) public static IteratorGetNext create(Scope scope, Operand iterator, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("IteratorGetNext", scope.makeOpName("IteratorGetNext")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IteratorGetNext")); opBuilder.addInput(iterator.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextAsOptional.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextAsOptional.java index 1507334e561..250b712fec8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextAsOptional.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextAsOptional.java @@ -65,7 +65,7 @@ private IteratorGetNextAsOptional(Operation operation) { ) public static IteratorGetNextAsOptional create(Scope scope, Operand iterator, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("IteratorGetNextAsOptional", scope.makeOpName("IteratorGetNextAsOptional")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IteratorGetNextAsOptional")); opBuilder.addInput(iterator.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextSync.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextSync.java index 5fa4809cc6d..0d011980d25 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextSync.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextSync.java @@ -73,7 +73,7 @@ private IteratorGetNextSync(Operation operation) { ) public static IteratorGetNextSync create(Scope scope, Operand iterator, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("IteratorGetNextSync", scope.makeOpName("IteratorGetNextSync")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IteratorGetNextSync")); opBuilder.addInput(iterator.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorToStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorToStringHandle.java index ad1d7422cc4..418f2f385a8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorToStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorToStringHandle.java @@ -60,7 +60,7 @@ private IteratorToStringHandle(Operation operation) { ) public static IteratorToStringHandle create(Scope scope, Operand resourceHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("IteratorToStringHandle", scope.makeOpName("IteratorToStringHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IteratorToStringHandle")); opBuilder.addInput(resourceHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new IteratorToStringHandle(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LMDBDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LMDBDataset.java index 0ed1d8ddf75..77d37448de1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LMDBDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LMDBDataset.java @@ -71,7 +71,7 @@ private LMDBDataset(Operation operation) { ) public static LMDBDataset create(Scope scope, Operand filenames, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("LMDBDataset", scope.makeOpName("LMDBDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LMDBDataset")); opBuilder.addInput(filenames.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LatencyStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LatencyStatsDataset.java index 7e890e8429b..29256f43c80 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LatencyStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LatencyStatsDataset.java @@ -63,7 +63,7 @@ private LatencyStatsDataset(Operation operation) { ) public static LatencyStatsDataset create(Scope scope, Operand inputDataset, Operand tag, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("LatencyStatsDataset", scope.makeOpName("LatencyStatsDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LatencyStatsDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(tag.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java index 032522050f3..1377b71eefd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java @@ -61,7 +61,7 @@ private LeakyReluGrad(Operation operation) { ) public static LeakyReluGrad create(Scope scope, Operand gradients, Operand features, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LeakyReluGrad", scope.makeOpName("LeakyReluGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LeakyReluGrad")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MakeIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MakeIterator.java index 446caa5aca2..755c99bc39d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MakeIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MakeIterator.java @@ -57,7 +57,7 @@ private MakeIterator(Operation operation) { ) public static MakeIterator create(Scope scope, Operand dataset, Operand iterator) { - OperationBuilder opBuilder = scope.env().opBuilder("MakeIterator", scope.makeOpName("MakeIterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MakeIterator")); opBuilder.addInput(dataset.asOutput()); opBuilder.addInput(iterator.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MatchingFilesDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MatchingFilesDataset.java index b8926e6bbb2..a4fefc83572 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MatchingFilesDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MatchingFilesDataset.java @@ -56,7 +56,7 @@ private MatchingFilesDataset(Operation operation) { describeByClass = true ) public static MatchingFilesDataset create(Scope scope, Operand patterns) { - OperationBuilder opBuilder = scope.env().opBuilder("MatchingFilesDataset", scope.makeOpName("MatchingFilesDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatchingFilesDataset")); opBuilder.addInput(patterns.asOutput()); opBuilder = scope.apply(opBuilder); return new MatchingFilesDataset(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MaxIntraOpParallelismDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MaxIntraOpParallelismDataset.java index d99b31adb6a..b63d4d577e3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MaxIntraOpParallelismDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MaxIntraOpParallelismDataset.java @@ -64,7 +64,7 @@ private MaxIntraOpParallelismDataset(Operation operation) { public static MaxIntraOpParallelismDataset create(Scope scope, Operand inputDataset, Operand maxIntraOpParallelism, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxIntraOpParallelismDataset", scope.makeOpName("MaxIntraOpParallelismDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxIntraOpParallelismDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(maxIntraOpParallelism.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ModelDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ModelDataset.java index 4bbafbc1e02..4133f57bddf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ModelDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ModelDataset.java @@ -63,7 +63,7 @@ private ModelDataset(Operation operation) { ) public static ModelDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ModelDataset", scope.makeOpName("ModelDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ModelDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java index 89552a58475..082aaea89c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java @@ -65,7 +65,7 @@ private MultiDeviceIterator(Operation operation) { ) public static MultiDeviceIterator create(Scope scope, List devices, String sharedName, String container, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("MultiDeviceIterator", scope.makeOpName("MultiDeviceIterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MultiDeviceIterator")); opBuilder = scope.apply(opBuilder); String[] devicesArray = new String[devices.size()]; for (int i = 0 ; i < devicesArray.length ; i++) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java index 58cf8dd6a46..d9594a04d06 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java @@ -63,7 +63,7 @@ private MultiDeviceIteratorFromStringHandle(Operation operation) { ) public static MultiDeviceIteratorFromStringHandle create(Scope scope, Operand stringHandle, List> outputTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MultiDeviceIteratorFromStringHandle", scope.makeOpName("MultiDeviceIteratorFromStringHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MultiDeviceIteratorFromStringHandle")); opBuilder.addInput(stringHandle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java index bd70cf3d33c..2c422c4872b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java @@ -71,7 +71,7 @@ public static MultiDeviceIteratorGetNextFromShard create(Scope scope, Operand multiDeviceIterator, Operand shardNum, Operand incarnationId, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("MultiDeviceIteratorGetNextFromShard", scope.makeOpName("MultiDeviceIteratorGetNextFromShard")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MultiDeviceIteratorGetNextFromShard")); opBuilder.addInput(multiDeviceIterator.asOutput()); opBuilder.addInput(shardNum.asOutput()); opBuilder.addInput(incarnationId.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java index e6f41f1c6e3..6b9db31cb82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java @@ -58,7 +58,7 @@ private MultiDeviceIteratorInit(Operation operation) { ) public static MultiDeviceIteratorInit create(Scope scope, Operand dataset, Operand multiDeviceIterator, Operand maxBufferSize) { - OperationBuilder opBuilder = scope.env().opBuilder("MultiDeviceIteratorInit", scope.makeOpName("MultiDeviceIteratorInit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MultiDeviceIteratorInit")); opBuilder.addInput(dataset.asOutput()); opBuilder.addInput(multiDeviceIterator.asOutput()); opBuilder.addInput(maxBufferSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java index 623530eb4f4..bf50da290f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java @@ -56,7 +56,7 @@ private MultiDeviceIteratorToStringHandle(Operation operation) { ) public static MultiDeviceIteratorToStringHandle create(Scope scope, Operand multiDeviceIterator) { - OperationBuilder opBuilder = scope.env().opBuilder("MultiDeviceIteratorToStringHandle", scope.makeOpName("MultiDeviceIteratorToStringHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MultiDeviceIteratorToStringHandle")); opBuilder.addInput(multiDeviceIterator.asOutput()); opBuilder = scope.apply(opBuilder); return new MultiDeviceIteratorToStringHandle(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/NonSerializableDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/NonSerializableDataset.java index d6b6b043cf5..8acb8a1a9c7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/NonSerializableDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/NonSerializableDataset.java @@ -61,7 +61,7 @@ private NonSerializableDataset(Operation operation) { ) public static NonSerializableDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("NonSerializableDataset", scope.makeOpName("NonSerializableDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NonSerializableDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDataset.java index 3ee2024af63..e4c68e1585b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDataset.java @@ -67,7 +67,7 @@ private OptimizeDataset(Operation operation) { public static OptimizeDataset create(Scope scope, Operand inputDataset, Operand optimizations, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OptimizeDataset", scope.makeOpName("OptimizeDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OptimizeDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(optimizations.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDatasetV2.java index b0e1d15badf..f2a7659cd76 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDatasetV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDatasetV2.java @@ -70,7 +70,7 @@ public static OptimizeDatasetV2 create(Scope scope, Operand inp Operand optimizationsEnabled, Operand optimizationsDisabled, Operand optimizationsDefault, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OptimizeDatasetV2", scope.makeOpName("OptimizeDatasetV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OptimizeDatasetV2")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(optimizationsEnabled.asOutput()); opBuilder.addInput(optimizationsDisabled.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalFromValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalFromValue.java index 5182e8292cb..741e5532640 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalFromValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalFromValue.java @@ -60,7 +60,7 @@ private OptionalFromValue(Operation operation) { describeByClass = true ) public static OptionalFromValue create(Scope scope, Iterable> components) { - OperationBuilder opBuilder = scope.env().opBuilder("OptionalFromValue", scope.makeOpName("OptionalFromValue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OptionalFromValue")); opBuilder.addInputList(Operands.asOutputs(components)); opBuilder = scope.apply(opBuilder); return new OptionalFromValue(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalGetValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalGetValue.java index f97316aef5a..d255fc381fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalGetValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalGetValue.java @@ -69,7 +69,7 @@ private OptionalGetValue(Operation operation) { ) public static OptionalGetValue create(Scope scope, Operand optional, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("OptionalGetValue", scope.makeOpName("OptionalGetValue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OptionalGetValue")); opBuilder.addInput(optional.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalHasValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalHasValue.java index 1e178947fb9..5de50ea6bd9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalHasValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalHasValue.java @@ -59,7 +59,7 @@ private OptionalHasValue(Operation operation) { describeByClass = true ) public static OptionalHasValue create(Scope scope, Operand optional) { - OperationBuilder opBuilder = scope.env().opBuilder("OptionalHasValue", scope.makeOpName("OptionalHasValue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OptionalHasValue")); opBuilder.addInput(optional.asOutput()); opBuilder = scope.apply(opBuilder); return new OptionalHasValue(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalNone.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalNone.java index 964867da2d8..062a2a9808f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalNone.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalNone.java @@ -58,7 +58,7 @@ private OptionalNone(Operation operation) { describeByClass = true ) public static OptionalNone create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("OptionalNone", scope.makeOpName("OptionalNone")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OptionalNone")); opBuilder = scope.apply(opBuilder); return new OptionalNone(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java index 34fb0f94ae4..5e74c59c2dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java @@ -75,7 +75,7 @@ public static PaddedBatchDataset create(Scope scope, Operand in Operand batchSize, Iterable> paddedShapes, Iterable> paddingValues, Operand dropRemainder, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("PaddedBatchDatasetV2", scope.makeOpName("PaddedBatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PaddedBatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(batchSize.asOutput()); opBuilder.addInputList(Operands.asOutputs(paddedShapes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java index 9d3c49b4111..8ef26922652 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java @@ -66,7 +66,7 @@ private PrefetchDataset(Operation operation) { public static PrefetchDataset create(Scope scope, Operand inputDataset, Operand bufferSize, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("PrefetchDataset", scope.makeOpName("PrefetchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PrefetchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(bufferSize.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrivateThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrivateThreadPoolDataset.java index ded8a1b216e..3290840da75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrivateThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrivateThreadPoolDataset.java @@ -64,7 +64,7 @@ private PrivateThreadPoolDataset(Operation operation) { public static PrivateThreadPoolDataset create(Scope scope, Operand inputDataset, Operand numThreads, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("PrivateThreadPoolDataset", scope.makeOpName("PrivateThreadPoolDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PrivateThreadPoolDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(numThreads.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java index 71002f903e2..788ff579222 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java @@ -73,7 +73,7 @@ private RandomDataset(Operation operation) { ) public static RandomDataset create(Scope scope, Operand seed, Operand seed2, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomDataset", scope.makeOpName("RandomDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomDataset")); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(seed2.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java index 083200e4bfb..bca448ecda1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java @@ -68,7 +68,7 @@ private RangeDataset(Operation operation) { ) public static RangeDataset create(Scope scope, Operand start, Operand stop, Operand step, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("RangeDataset", scope.makeOpName("RangeDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RangeDataset")); opBuilder.addInput(start.asOutput()); opBuilder.addInput(stop.asOutput()); opBuilder.addInput(step.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDataset.java index 982bd16b905..874587c68e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDataset.java @@ -69,7 +69,7 @@ private RebatchDataset(Operation operation) { public static RebatchDataset create(Scope scope, Operand inputDataset, Operand numReplicas, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RebatchDataset", scope.makeOpName("RebatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RebatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(numReplicas.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDatasetV2.java index d3a6158133c..b98013d55bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDatasetV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDatasetV2.java @@ -69,7 +69,7 @@ private RebatchDatasetV2(Operation operation) { public static RebatchDatasetV2 create(Scope scope, Operand inputDataset, Operand batchSizes, Operand dropRemainder, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("RebatchDatasetV2", scope.makeOpName("RebatchDatasetV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RebatchDatasetV2")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(batchSizes.asOutput()); opBuilder.addInput(dropRemainder.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java index 73fccce5c3e..5809a919ebd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java @@ -60,7 +60,7 @@ private RegisterDataset(Operation operation) { ) public static RegisterDataset create(Scope scope, Operand dataset, Operand address, Operand protocol, Long externalStatePolicy) { - OperationBuilder opBuilder = scope.env().opBuilder("RegisterDataset", scope.makeOpName("RegisterDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RegisterDataset")); opBuilder.addInput(dataset.asOutput()); opBuilder.addInput(address.asOutput()); opBuilder.addInput(protocol.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java index cc208dd626d..d569bdd267d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java @@ -68,7 +68,7 @@ private RepeatDataset(Operation operation) { ) public static RepeatDataset create(Scope scope, Operand inputDataset, Operand count, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("RepeatDataset", scope.makeOpName("RepeatDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RepeatDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(count.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SamplingDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SamplingDataset.java index 0f602e6c528..27778a1dd49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SamplingDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SamplingDataset.java @@ -73,7 +73,7 @@ private SamplingDataset(Operation operation) { public static SamplingDataset create(Scope scope, Operand inputDataset, Operand rate, Operand seed, Operand seed2, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("SamplingDataset", scope.makeOpName("SamplingDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SamplingDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(rate.asOutput()); opBuilder.addInput(seed.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SerializeIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SerializeIterator.java index 8f70a020ed1..4d850e18e3a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SerializeIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SerializeIterator.java @@ -61,7 +61,7 @@ private SerializeIterator(Operation operation) { ) public static SerializeIterator create(Scope scope, Operand resourceHandle, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SerializeIterator", scope.makeOpName("SerializeIterator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SerializeIterator")); opBuilder.addInput(resourceHandle.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SetStatsAggregatorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SetStatsAggregatorDataset.java index e72e914bdbc..63bce56359c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SetStatsAggregatorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SetStatsAggregatorDataset.java @@ -67,7 +67,7 @@ public static SetStatsAggregatorDataset create(Scope scope, Operand statsAggregator, Operand tag, Operand counterPrefix, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("SetStatsAggregatorDataset", scope.makeOpName("SetStatsAggregatorDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SetStatsAggregatorDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(statsAggregator.asOutput()); opBuilder.addInput(tag.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java index 95509b8fede..ea9b7059eea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java @@ -66,7 +66,7 @@ private ShardDataset(Operation operation) { public static ShardDataset create(Scope scope, Operand inputDataset, Operand numShards, Operand index, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ShardDataset", scope.makeOpName("ShardDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ShardDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(numShards.asOutput()); opBuilder.addInput(index.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java index 634a2aec7d2..1a7d3667632 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java @@ -70,7 +70,7 @@ public static ShuffleAndRepeatDataset create(Scope scope, Operand bufferSize, Operand seed, Operand seed2, Operand count, Operand seedGenerator, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ShuffleAndRepeatDatasetV2", scope.makeOpName("ShuffleAndRepeatDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ShuffleAndRepeatDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(bufferSize.asOutput()); opBuilder.addInput(seed.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java index ee99e969798..4ae9f5a57cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java @@ -69,7 +69,7 @@ public static ShuffleDataset create(Scope scope, Operand inputD Operand bufferSize, Operand seed, Operand seed2, Operand seedGenerator, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ShuffleDatasetV3", scope.makeOpName("ShuffleDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ShuffleDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(bufferSize.asOutput()); opBuilder.addInput(seed.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java index ab8e7433809..799fd924e15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java @@ -68,7 +68,7 @@ private SkipDataset(Operation operation) { ) public static SkipDataset create(Scope scope, Operand inputDataset, Operand count, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("SkipDataset", scope.makeOpName("SkipDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SkipDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(count.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SleepDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SleepDataset.java index a34a262886a..bc3b16e01a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SleepDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SleepDataset.java @@ -64,7 +64,7 @@ private SleepDataset(Operation operation) { public static SleepDataset create(Scope scope, Operand inputDataset, Operand sleepMicroseconds, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("SleepDataset", scope.makeOpName("SleepDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SleepDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(sleepMicroseconds.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java index ec77f306bb8..123a5780271 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java @@ -69,7 +69,7 @@ private SlidingWindowDataset(Operation operation) { public static SlidingWindowDataset create(Scope scope, Operand inputDataset, Operand windowSize, Operand windowShift, Operand windowStride, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("SlidingWindowDataset", scope.makeOpName("SlidingWindowDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SlidingWindowDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(windowSize.asOutput()); opBuilder.addInput(windowShift.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SparseTensorSliceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SparseTensorSliceDataset.java index ab51a3fe1da..188bf6f6a8b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SparseTensorSliceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SparseTensorSliceDataset.java @@ -59,7 +59,7 @@ private SparseTensorSliceDataset(Operation operation) { ) public static SparseTensorSliceDataset create(Scope scope, Operand indices, Operand values, Operand denseShape) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseTensorSliceDataset", scope.makeOpName("SparseTensorSliceDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseTensorSliceDataset")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(denseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SqlDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SqlDataset.java index f4f38e09b8f..7240a21d3b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SqlDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SqlDataset.java @@ -65,7 +65,7 @@ private SqlDataset(Operation operation) { public static SqlDataset create(Scope scope, Operand driverName, Operand dataSourceName, Operand query, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("SqlDataset", scope.makeOpName("SqlDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SqlDataset")); opBuilder.addInput(driverName.asOutput()); opBuilder.addInput(dataSourceName.asOutput()); opBuilder.addInput(query.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java index 5dca7232d89..a4cd180c217 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java @@ -55,7 +55,7 @@ private StatsAggregatorHandle(Operation operation) { describeByClass = true ) public static StatsAggregatorHandle create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StatsAggregatorHandle", scope.makeOpName("StatsAggregatorHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatsAggregatorHandle")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java index a266eb15319..c5fe31e28d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java @@ -69,7 +69,7 @@ private TakeDataset(Operation operation) { ) public static TakeDataset create(Scope scope, Operand inputDataset, Operand count, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("TakeDataset", scope.makeOpName("TakeDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TakeDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(count.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java index 163886415ca..1c93d86e475 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java @@ -60,7 +60,7 @@ private TensorDataset(Operation operation) { ) public static TensorDataset create(Scope scope, Iterable> components, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorDataset", scope.makeOpName("TensorDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorDataset")); opBuilder.addInputList(Operands.asOutputs(components)); opBuilder = scope.apply(opBuilder); Shape[] outputShapesArray = new Shape[outputShapes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java index 47a755e5fd2..ea02e5d6918 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java @@ -64,7 +64,7 @@ private TensorSliceDataset(Operation operation) { ) public static TensorSliceDataset create(Scope scope, Iterable> components, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorSliceDataset", scope.makeOpName("TensorSliceDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorSliceDataset")); opBuilder.addInputList(Operands.asOutputs(components)); opBuilder = scope.apply(opBuilder); Shape[] outputShapesArray = new Shape[outputShapes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java index 61ece6c9b65..45892e72242 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java @@ -66,7 +66,7 @@ private TextLineDataset(Operation operation) { ) public static TextLineDataset create(Scope scope, Operand filenames, Operand compressionType, Operand bufferSize) { - OperationBuilder opBuilder = scope.env().opBuilder("TextLineDataset", scope.makeOpName("TextLineDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TextLineDataset")); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(compressionType.asOutput()); opBuilder.addInput(bufferSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java index d79e936fb32..3ee07058682 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java @@ -67,7 +67,7 @@ private TfRecordDataset(Operation operation) { ) public static TfRecordDataset create(Scope scope, Operand filenames, Operand compressionType, Operand bufferSize) { - OperationBuilder opBuilder = scope.env().opBuilder("TFRecordDataset", scope.makeOpName("TfRecordDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TfRecordDataset")); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(compressionType.asOutput()); opBuilder.addInput(bufferSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolDataset.java index ea90a856b7f..9f67fdf0a66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolDataset.java @@ -63,7 +63,7 @@ private ThreadPoolDataset(Operation operation) { public static ThreadPoolDataset create(Scope scope, Operand inputDataset, Operand threadPool, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ThreadPoolDataset", scope.makeOpName("ThreadPoolDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ThreadPoolDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(threadPool.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java index 669aa031783..5c12acd811a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java @@ -60,7 +60,7 @@ private ThreadPoolHandle(Operation operation) { ) public static ThreadPoolHandle create(Scope scope, Long numThreads, String displayName, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ThreadPoolHandle", scope.makeOpName("ThreadPoolHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ThreadPoolHandle")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_threads", numThreads); opBuilder.setAttr("display_name", displayName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java index 257705cc485..a827341e997 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java @@ -61,7 +61,7 @@ private UnbatchDataset(Operation operation) { ) public static UnbatchDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("UnbatchDataset", scope.makeOpName("UnbatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnbatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java index 790f2c1da5e..79e21583277 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java @@ -61,7 +61,7 @@ private UniqueDataset(Operation operation) { ) public static UniqueDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("UniqueDataset", scope.makeOpName("UniqueDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UniqueDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnwrapDatasetVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnwrapDatasetVariant.java index 98a0399ba8d..df30e1749d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnwrapDatasetVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnwrapDatasetVariant.java @@ -55,7 +55,7 @@ private UnwrapDatasetVariant(Operation operation) { describeByClass = true ) public static UnwrapDatasetVariant create(Scope scope, Operand inputHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("UnwrapDatasetVariant", scope.makeOpName("UnwrapDatasetVariant")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnwrapDatasetVariant")); opBuilder.addInput(inputHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new UnwrapDatasetVariant(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java index f6ed2ea62a9..3e2069a2a93 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java @@ -110,7 +110,7 @@ public static WindowDataset create(Scope scope, Operand inputDa Operand sizeOutput, Operand shift, Operand stride, Operand dropRemainder, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("WindowDataset", scope.makeOpName("WindowDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WindowDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder.addInput(shift.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WrapDatasetVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WrapDatasetVariant.java index 06916439033..8f6de33fda7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WrapDatasetVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WrapDatasetVariant.java @@ -55,7 +55,7 @@ private WrapDatasetVariant(Operation operation) { describeByClass = true ) public static WrapDatasetVariant create(Scope scope, Operand inputHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("WrapDatasetVariant", scope.makeOpName("WrapDatasetVariant")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WrapDatasetVariant")); opBuilder.addInput(inputHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new WrapDatasetVariant(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java index 265b3f64dee..a07a8a14ede 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java @@ -69,7 +69,7 @@ private ZipDataset(Operation operation) { ) public static ZipDataset create(Scope scope, Iterable> inputDatasets, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ZipDataset", scope.makeOpName("ZipDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ZipDataset")); opBuilder.addInputList(Operands.asOutputs(inputDatasets)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertCardinalityDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertCardinalityDataset.java index d067a8da8ec..90ff976d9e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertCardinalityDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertCardinalityDataset.java @@ -64,7 +64,7 @@ private AssertCardinalityDataset(Operation operation) { public static AssertCardinalityDataset create(Scope scope, Operand inputDataset, Operand cardinality, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("AssertCardinalityDataset", scope.makeOpName("AssertCardinalityDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssertCardinalityDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(cardinality.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java index 4cb95fbccd2..56513038149 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java @@ -64,7 +64,7 @@ private AssertNextDataset(Operation operation) { public static AssertNextDataset create(Scope scope, Operand inputDataset, Operand transformations, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalAssertNextDataset", scope.makeOpName("AssertNextDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AssertNextDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(transformations.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java index 931e2db4ba1..4375aeda74d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java @@ -72,7 +72,7 @@ private AutoShardDataset(Operation operation) { public static AutoShardDataset create(Scope scope, Operand inputDataset, Operand numWorkers, Operand index, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalAutoShardDataset", scope.makeOpName("AutoShardDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AutoShardDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(numWorkers.asOutput()); opBuilder.addInput(index.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java index 524df6ddd0f..5f6a0ace7c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java @@ -63,7 +63,7 @@ private BytesProducedStatsDataset(Operation operation) { ) public static BytesProducedStatsDataset create(Scope scope, Operand inputDataset, Operand tag, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalBytesProducedStatsDataset", scope.makeOpName("BytesProducedStatsDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BytesProducedStatsDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(tag.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java index 0c26e1c263f..9a3ee0dfa20 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java @@ -73,7 +73,7 @@ public static CSVDataset create(Scope scope, Operand filenames, Operand compressionType, Operand bufferSize, Operand header, Operand fieldDelim, Operand useQuoteDelim, Operand naValue, Operand selectCols, Iterable> recordDefaults, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalCSVDataset", scope.makeOpName("CSVDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CSVDataset")); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(compressionType.asOutput()); opBuilder.addInput(bufferSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java index 093eccc41a3..0b2f583aab2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java @@ -63,7 +63,7 @@ private ChooseFastestDataset(Operation operation) { public static ChooseFastestDataset create(Scope scope, Iterable> inputDatasets, Long numExperiments, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalChooseFastestDataset", scope.makeOpName("ChooseFastestDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ChooseFastestDataset")); opBuilder.addInputList(Operands.asOutputs(inputDatasets)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_experiments", numExperiments); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CompressElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CompressElement.java index 093e7e6e5cc..30caf1434f3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CompressElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CompressElement.java @@ -56,7 +56,7 @@ private CompressElement(Operation operation) { describeByClass = true ) public static CompressElement create(Scope scope, Iterable> components) { - OperationBuilder opBuilder = scope.env().opBuilder("CompressElement", scope.makeOpName("CompressElement")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CompressElement")); opBuilder.addInputList(Operands.asOutputs(components)); opBuilder = scope.apply(opBuilder); return new CompressElement(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DataServiceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DataServiceDataset.java index eec5392bda5..dd4a3e3f6d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DataServiceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DataServiceDataset.java @@ -73,7 +73,7 @@ public static DataServiceDataset create(Scope scope, Operand datasetId, Operand jobName, Operand maxOutstandingRequests, Operand iterationCounter, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DataServiceDataset", scope.makeOpName("DataServiceDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DataServiceDataset")); opBuilder.addInput(datasetId.asOutput()); opBuilder.addInput(processingMode.asOutput()); opBuilder.addInput(address.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java index dd3be793b80..4d179b451bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java @@ -56,7 +56,7 @@ private DatasetCardinality(Operation operation) { describeByClass = true ) public static DatasetCardinality create(Scope scope, Operand inputDataset) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalDatasetCardinality", scope.makeOpName("DatasetCardinality")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DatasetCardinality")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); return new DatasetCardinality(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java index ad01668fe45..cf25191b693 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java @@ -54,7 +54,7 @@ private DatasetToTFRecord(Operation operation) { ) public static DatasetToTFRecord create(Scope scope, Operand inputDataset, Operand filename, Operand compressionType) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalDatasetToTFRecord", scope.makeOpName("DatasetToTFRecord")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DatasetToTFRecord")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(filename.asOutput()); opBuilder.addInput(compressionType.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java index 7ed96b92c8f..dcffd82c925 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java @@ -68,7 +68,7 @@ private DenseToSparseBatchDataset(Operation operation) { public static DenseToSparseBatchDataset create(Scope scope, Operand inputDataset, Operand batchSize, Operand rowShape, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalDenseToSparseBatchDataset", scope.makeOpName("DenseToSparseBatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DenseToSparseBatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(batchSize.asOutput()); opBuilder.addInput(rowShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java index 7c5f70770d0..cf572d6c552 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java @@ -66,7 +66,7 @@ public static DirectedInterleaveDataset create(Scope scope, Operand selectorInputDataset, Iterable> dataInputDatasets, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalDirectedInterleaveDataset", scope.makeOpName("DirectedInterleaveDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DirectedInterleaveDataset")); opBuilder.addInput(selectorInputDataset.asOutput()); opBuilder.addInputList(Operands.asOutputs(dataInputDatasets)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DummyIterationCounter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DummyIterationCounter.java index 2da650712fb..d7e6e4ce05e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DummyIterationCounter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DummyIterationCounter.java @@ -54,7 +54,7 @@ private DummyIterationCounter(Operation operation) { describeByClass = true ) public static DummyIterationCounter create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("DummyIterationCounter", scope.makeOpName("DummyIterationCounter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DummyIterationCounter")); opBuilder = scope.apply(opBuilder); return new DummyIterationCounter(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java index 893974e4fa3..0804fea6bc7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java @@ -62,7 +62,7 @@ private IgnoreErrorsDataset(Operation operation) { ) public static IgnoreErrorsDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalIgnoreErrorsDataset", scope.makeOpName("IgnoreErrorsDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IgnoreErrorsDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java index b2e0126d321..b05b4a59b08 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java @@ -55,7 +55,7 @@ private IteratorGetDevice(Operation operation) { describeByClass = true ) public static IteratorGetDevice create(Scope scope, Operand resource) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalIteratorGetDevice", scope.makeOpName("IteratorGetDevice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IteratorGetDevice")); opBuilder.addInput(resource.asOutput()); opBuilder = scope.apply(opBuilder); return new IteratorGetDevice(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java index 0b4756f3241..8fd36731572 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java @@ -63,7 +63,7 @@ private LatencyStatsDataset(Operation operation) { ) public static LatencyStatsDataset create(Scope scope, Operand inputDataset, Operand tag, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalLatencyStatsDataset", scope.makeOpName("LatencyStatsDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LatencyStatsDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(tag.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java index e7bab3a43b3..e53ab9a5387 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java @@ -62,7 +62,7 @@ private LmdbDataset(Operation operation) { ) public static LmdbDataset create(Scope scope, Operand filenames, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalLMDBDataset", scope.makeOpName("LmdbDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LmdbDataset")); opBuilder.addInput(filenames.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java index 615ee0bfa0b..ce807c2f225 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java @@ -56,7 +56,7 @@ private MatchingFilesDataset(Operation operation) { describeByClass = true ) public static MatchingFilesDataset create(Scope scope, Operand patterns) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalMatchingFilesDataset", scope.makeOpName("MatchingFilesDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatchingFilesDataset")); opBuilder.addInput(patterns.asOutput()); opBuilder = scope.apply(opBuilder); return new MatchingFilesDataset(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java index ceaf099584e..1cbbb19b216 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java @@ -64,7 +64,7 @@ private MaxIntraOpParallelismDataset(Operation operation) { public static MaxIntraOpParallelismDataset create(Scope scope, Operand inputDataset, Operand maxIntraOpParallelism, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalMaxIntraOpParallelismDataset", scope.makeOpName("MaxIntraOpParallelismDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxIntraOpParallelismDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(maxIntraOpParallelism.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java index f74a0207ae2..50d17ad20e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java @@ -61,7 +61,7 @@ private NonSerializableDataset(Operation operation) { ) public static NonSerializableDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalNonSerializableDataset", scope.makeOpName("NonSerializableDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NonSerializableDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java index 38a6aeda19d..c3e179c29db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java @@ -90,7 +90,7 @@ public static ParseExampleDataset create(Scope scope, Operand i List> outputTypes, List outputShapes, List> raggedValueTypes, List> raggedSplitTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ParseExampleDatasetV2", scope.makeOpName("ParseExampleDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParseExampleDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(numParallelCalls.asOutput()); opBuilder.addInputList(Operands.asOutputs(denseDefaults)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java index 4a590bdd205..8ac3d4955e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java @@ -64,7 +64,7 @@ private PrivateThreadPoolDataset(Operation operation) { public static PrivateThreadPoolDataset create(Scope scope, Operand inputDataset, Operand numThreads, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalPrivateThreadPoolDataset", scope.makeOpName("PrivateThreadPoolDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PrivateThreadPoolDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(numThreads.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java index e7b815f7374..c2c9cf73c4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java @@ -65,7 +65,7 @@ private RandomDataset(Operation operation) { ) public static RandomDataset create(Scope scope, Operand seed, Operand seed2, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalRandomDataset", scope.makeOpName("RandomDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomDataset")); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(seed2.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java index ec1c8db1ffe..73856b0d475 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java @@ -69,7 +69,7 @@ private RebatchDataset(Operation operation) { public static RebatchDataset create(Scope scope, Operand inputDataset, Operand numReplicas, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalRebatchDataset", scope.makeOpName("RebatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RebatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(numReplicas.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java index 2e2ad4dc8f3..ff9bad603f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java @@ -67,7 +67,7 @@ public static SetStatsAggregatorDataset create(Scope scope, Operand statsAggregator, Operand tag, Operand counterPrefix, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalSetStatsAggregatorDataset", scope.makeOpName("SetStatsAggregatorDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SetStatsAggregatorDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(statsAggregator.asOutput()); opBuilder.addInput(tag.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java index 8ba7e87d492..8e96f63923f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java @@ -64,7 +64,7 @@ private SleepDataset(Operation operation) { public static SleepDataset create(Scope scope, Operand inputDataset, Operand sleepMicroseconds, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalSleepDataset", scope.makeOpName("SleepDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SleepDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(sleepMicroseconds.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java index 9632b88e997..b81ae5016df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java @@ -69,7 +69,7 @@ private SlidingWindowDataset(Operation operation) { public static SlidingWindowDataset create(Scope scope, Operand inputDataset, Operand windowSize, Operand windowShift, Operand windowStride, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalSlidingWindowDataset", scope.makeOpName("SlidingWindowDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SlidingWindowDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(windowSize.asOutput()); opBuilder.addInput(windowShift.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java index 01b026f7321..fd566c63733 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java @@ -65,7 +65,7 @@ private SqlDataset(Operation operation) { public static SqlDataset create(Scope scope, Operand driverName, Operand dataSourceName, Operand query, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalSqlDataset", scope.makeOpName("SqlDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SqlDataset")); opBuilder.addInput(driverName.asOutput()); opBuilder.addInput(dataSourceName.asOutput()); opBuilder.addInput(query.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java index 792b01701f6..e4183305b0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java @@ -55,7 +55,7 @@ private StatsAggregatorHandle(Operation operation) { describeByClass = true ) public static StatsAggregatorHandle create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StatsAggregatorHandleV2", scope.makeOpName("StatsAggregatorHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatsAggregatorHandle")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSetSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSetSummaryWriter.java index bdbc08f505a..a0630991efe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSetSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSetSummaryWriter.java @@ -51,7 +51,7 @@ private StatsAggregatorSetSummaryWriter(Operation operation) { ) public static StatsAggregatorSetSummaryWriter create(Scope scope, Operand statsAggregator, Operand summary) { - OperationBuilder opBuilder = scope.env().opBuilder("StatsAggregatorSetSummaryWriter", scope.makeOpName("StatsAggregatorSetSummaryWriter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatsAggregatorSetSummaryWriter")); opBuilder.addInput(statsAggregator.asOutput()); opBuilder.addInput(summary.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java index 84ab19fd114..bf7250b438b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java @@ -55,7 +55,7 @@ private StatsAggregatorSummary(Operation operation) { describeByClass = true ) public static StatsAggregatorSummary create(Scope scope, Operand iterator) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalStatsAggregatorSummary", scope.makeOpName("StatsAggregatorSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatsAggregatorSummary")); opBuilder.addInput(iterator.asOutput()); opBuilder = scope.apply(opBuilder); return new StatsAggregatorSummary(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java index 6dc3e5057ae..6fa3410940d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java @@ -63,7 +63,7 @@ private ThreadPoolDataset(Operation operation) { public static ThreadPoolDataset create(Scope scope, Operand inputDataset, Operand threadPool, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalThreadPoolDataset", scope.makeOpName("ThreadPoolDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ThreadPoolDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(threadPool.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java index 67806fe8b9b..e117ad8e4f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java @@ -60,7 +60,7 @@ private ThreadPoolHandle(Operation operation) { ) public static ThreadPoolHandle create(Scope scope, Long numThreads, String displayName, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalThreadPoolHandle", scope.makeOpName("ThreadPoolHandle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ThreadPoolHandle")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_threads", numThreads); opBuilder.setAttr("display_name", displayName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java index 19a3eaf10cd..d557cc5184e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java @@ -61,7 +61,7 @@ private UnbatchDataset(Operation operation) { ) public static UnbatchDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalUnbatchDataset", scope.makeOpName("UnbatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnbatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UncompressElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UncompressElement.java index af2519a6111..0f498626609 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UncompressElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UncompressElement.java @@ -65,7 +65,7 @@ private UncompressElement(Operation operation) { ) public static UncompressElement create(Scope scope, Operand compressed, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("UncompressElement", scope.makeOpName("UncompressElement")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UncompressElement")); opBuilder.addInput(compressed.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java index a4e373cf734..a722cbf820d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java @@ -61,7 +61,7 @@ private UniqueDataset(Operation operation) { ) public static UniqueDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ExperimentalUniqueDataset", scope.makeOpName("UniqueDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UniqueDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java index 068892293e4..72fe63038f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java @@ -63,7 +63,7 @@ private CheckNumerics(Operation operation) { ) public static CheckNumerics create(Scope scope, Operand tensor, String message) { - OperationBuilder opBuilder = scope.env().opBuilder("CheckNumericsV2", scope.makeOpName("CheckNumerics")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CheckNumerics")); opBuilder.addInput(tensor.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("message", message); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java index 3de60d9b847..43f60d63d72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java @@ -60,7 +60,7 @@ private DebugGradientIdentity(Operation operation) { describeByClass = true ) public static DebugGradientIdentity create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("DebugGradientIdentity", scope.makeOpName("DebugGradientIdentity")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DebugGradientIdentity")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new DebugGradientIdentity<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java index caab8f1eef7..7ebca44b76e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java @@ -61,7 +61,7 @@ private DebugGradientRefIdentity(Operation operation) { ) public static DebugGradientRefIdentity create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("DebugGradientRefIdentity", scope.makeOpName("DebugGradientRefIdentity")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DebugGradientRefIdentity")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new DebugGradientRefIdentity<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java index 2f563306c0b..cdba78a556c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java @@ -69,7 +69,7 @@ private DebugIdentity(Operation operation) { ) public static DebugIdentity create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DebugIdentityV2", scope.makeOpName("DebugIdentity")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DebugIdentity")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java index fc1c74982e6..a087e1fc778 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java @@ -60,7 +60,7 @@ private DebugNanCount(Operation operation) { ) public static DebugNanCount create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DebugNanCount", scope.makeOpName("DebugNanCount")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DebugNanCount")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java index d06285ff947..3a3fed5025f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java @@ -66,7 +66,7 @@ private DebugNumericsSummary(Operation operation) { ) public static DebugNumericsSummary create(Scope scope, Operand input, Class outputDtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DebugNumericSummaryV2", scope.makeOpName("DebugNumericsSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DebugNumericsSummary")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_dtype", Operands.toDataType(outputDtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java index 9fca1e2f8f9..430784dfd45 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java @@ -71,7 +71,7 @@ private NcclAllReduce(Operation operation) { ) public static NcclAllReduce create(Scope scope, Operand input, String reduction, Long numDevices, String sharedName) { - OperationBuilder opBuilder = scope.env().opBuilder("NcclAllReduce", scope.makeOpName("NcclAllReduce")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NcclAllReduce")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("reduction", reduction); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java index 3ec4061857d..da831716e48 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java @@ -66,7 +66,7 @@ private NcclBroadcast(Operation operation) { ) public static NcclBroadcast create(Scope scope, Operand input, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("NcclBroadcast", scope.makeOpName("NcclBroadcast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NcclBroadcast")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java index 67a947860b0..a5b71f6c4dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java @@ -66,7 +66,7 @@ private NcclReduce(Operation operation) { ) public static NcclReduce create(Scope scope, Iterable> input, String reduction) { - OperationBuilder opBuilder = scope.env().opBuilder("NcclReduce", scope.makeOpName("NcclReduce")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NcclReduce")); opBuilder.addInputList(Operands.asOutputs(input)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("reduction", reduction); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java index e1bfd6c3e28..7b0ef02045b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java @@ -75,7 +75,7 @@ private AsString(Operation operation) { describeByClass = true ) public static AsString create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AsString", scope.makeOpName("AsString")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AsString")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java index 925e2fdf87a..9106b875824 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java @@ -65,7 +65,7 @@ private Cast(Operation operation) { ) public static Cast create(Scope scope, Operand x, Class DstT, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Cast", scope.makeOpName("Cast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Cast")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("DstT", Operands.toDataType(DstT)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java index 2e9208f57a7..b56b0226d48 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java @@ -78,7 +78,7 @@ private Complex(Operation operation) { ) public static Complex create(Scope scope, Operand real, Operand imag, Class Tout) { - OperationBuilder opBuilder = scope.env().opBuilder("Complex", scope.makeOpName("Complex")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Complex")); opBuilder.addInput(real.asOutput()); opBuilder.addInput(imag.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java index bd8e87f20ab..d4f622e57df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java @@ -70,7 +70,7 @@ private ToBool(Operation operation) { describeByClass = true ) public static ToBool create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("ToBool", scope.makeOpName("ToBool")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ToBool")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new ToBool(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java index 39ed87bb4a6..82d322bafbf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java @@ -63,7 +63,7 @@ private BoostedTreesAggregateStats(Operation operation) { public static BoostedTreesAggregateStats create(Scope scope, Operand nodeIds, Operand gradients, Operand hessians, Operand feature, Long maxSplits, Long numBuckets) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesAggregateStats", scope.makeOpName("BoostedTreesAggregateStats")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesAggregateStats")); opBuilder.addInput(nodeIds.asOutput()); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(hessians.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java index c6dbe81ae67..1cf6f719ebb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java @@ -67,7 +67,7 @@ private BoostedTreesBucketize(Operation operation) { ) public static BoostedTreesBucketize create(Scope scope, Iterable> floatValues, Iterable> bucketBoundaries) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesBucketize", scope.makeOpName("BoostedTreesBucketize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesBucketize")); opBuilder.addInputList(Operands.asOutputs(floatValues)); opBuilder.addInputList(Operands.asOutputs(bucketBoundaries)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java index 478abad9db1..f9b89d70e74 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java @@ -89,7 +89,7 @@ public static BoostedTreesCalculateBestFeatureSplit create(Scope scope, Operand nodeIdRange, Operand statsSummary, Operand l1, Operand l2, Operand treeComplexity, Operand minNodeWeight, Long logitsDimension, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesCalculateBestFeatureSplit", scope.makeOpName("BoostedTreesCalculateBestFeatureSplit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesCalculateBestFeatureSplit")); opBuilder.addInput(nodeIdRange.asOutput()); opBuilder.addInput(statsSummary.asOutput()); opBuilder.addInput(l1.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java index 76d9c8142bd..7dd9b267651 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java @@ -95,7 +95,7 @@ public static BoostedTreesCalculateBestFeatureSplitV2 create(Scope scope, Operand splitTypes, Operand candidateFeatureIds, Operand l1, Operand l2, Operand treeComplexity, Operand minNodeWeight, Long logitsDimension) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesCalculateBestFeatureSplitV2", scope.makeOpName("BoostedTreesCalculateBestFeatureSplitV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesCalculateBestFeatureSplitV2")); opBuilder.addInput(nodeIdRange.asOutput()); opBuilder.addInputList(Operands.asOutputs(statsSummariesList)); opBuilder.addInput(splitTypes.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java index 9566a717e1b..d09a1859ca5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java @@ -95,7 +95,7 @@ public static BoostedTreesCalculateBestGainsPerFeature create(Scope scope, Operand nodeIdRange, Iterable> statsSummaryList, Operand l1, Operand l2, Operand treeComplexity, Operand minNodeWeight, Long maxSplits) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesCalculateBestGainsPerFeature", scope.makeOpName("BoostedTreesCalculateBestGainsPerFeature")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesCalculateBestGainsPerFeature")); opBuilder.addInput(nodeIdRange.asOutput()); opBuilder.addInputList(Operands.asOutputs(statsSummaryList)); opBuilder.addInput(l1.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java index d6c86fa7030..06732ac99e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java @@ -62,7 +62,7 @@ private BoostedTreesCenterBias(Operation operation) { public static BoostedTreesCenterBias create(Scope scope, Operand treeEnsembleHandle, Operand meanGradients, Operand meanHessians, Operand l1, Operand l2) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesCenterBias", scope.makeOpName("BoostedTreesCenterBias")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesCenterBias")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder.addInput(meanGradients.asOutput()); opBuilder.addInput(meanHessians.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java index cf0705baa52..3e7eca3a7cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java @@ -55,7 +55,7 @@ private BoostedTreesCreateEnsemble(Operation operation) { public static BoostedTreesCreateEnsemble create(Scope scope, Operand treeEnsembleHandle, Operand stampToken, Operand treeEnsembleSerialized) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesCreateEnsemble", scope.makeOpName("BoostedTreesCreateEnsemble")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesCreateEnsemble")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder.addInput(stampToken.asOutput()); opBuilder.addInput(treeEnsembleSerialized.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java index 524620026f2..871f250c0ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java @@ -56,7 +56,7 @@ private BoostedTreesCreateQuantileStreamResource(Operation operation) { public static BoostedTreesCreateQuantileStreamResource create(Scope scope, Operand quantileStreamResourceHandle, Operand epsilon, Operand numStreams, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesCreateQuantileStreamResource", scope.makeOpName("BoostedTreesCreateQuantileStreamResource")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesCreateQuantileStreamResource")); opBuilder.addInput(quantileStreamResourceHandle.asOutput()); opBuilder.addInput(epsilon.asOutput()); opBuilder.addInput(numStreams.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java index dbd0fa5fde3..d24acd613a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java @@ -56,7 +56,7 @@ private BoostedTreesDeserializeEnsemble(Operation operation) { public static BoostedTreesDeserializeEnsemble create(Scope scope, Operand treeEnsembleHandle, Operand stampToken, Operand treeEnsembleSerialized) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesDeserializeEnsemble", scope.makeOpName("BoostedTreesDeserializeEnsemble")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesDeserializeEnsemble")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder.addInput(stampToken.asOutput()); opBuilder.addInput(treeEnsembleSerialized.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java index 5498299dd35..821bf890f39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java @@ -55,7 +55,7 @@ private BoostedTreesEnsembleResourceHandleOp(Operation operation) { describeByClass = true ) public static BoostedTreesEnsembleResourceHandleOp create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesEnsembleResourceHandleOp", scope.makeOpName("BoostedTreesEnsembleResourceHandleOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesEnsembleResourceHandleOp")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java index 3fbe45ce51a..8115000d538 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java @@ -66,7 +66,7 @@ private BoostedTreesExampleDebugOutputs(Operation operation) { public static BoostedTreesExampleDebugOutputs create(Scope scope, Operand treeEnsembleHandle, Iterable> bucketizedFeatures, Long logitsDimension) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesExampleDebugOutputs", scope.makeOpName("BoostedTreesExampleDebugOutputs")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesExampleDebugOutputs")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder.addInputList(Operands.asOutputs(bucketizedFeatures)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java index 3bcdfd6f5d4..cb028d06e56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java @@ -66,7 +66,7 @@ private BoostedTreesFlushQuantileSummaries(Operation operation) { ) public static BoostedTreesFlushQuantileSummaries create(Scope scope, Operand quantileStreamResourceHandle, Long numFeatures) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesFlushQuantileSummaries", scope.makeOpName("BoostedTreesFlushQuantileSummaries")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesFlushQuantileSummaries")); opBuilder.addInput(quantileStreamResourceHandle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_features", numFeatures); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java index 91283cb4e91..3e87ddfda80 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java @@ -69,7 +69,7 @@ private BoostedTreesGetEnsembleStates(Operation operation) { ) public static BoostedTreesGetEnsembleStates create(Scope scope, Operand treeEnsembleHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesGetEnsembleStates", scope.makeOpName("BoostedTreesGetEnsembleStates")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesGetEnsembleStates")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new BoostedTreesGetEnsembleStates(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java index 9178217dba0..69975228820 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java @@ -67,7 +67,7 @@ private BoostedTreesMakeQuantileSummaries(Operation operation) { public static BoostedTreesMakeQuantileSummaries create(Scope scope, Iterable> floatValues, Operand exampleWeights, Operand epsilon) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesMakeQuantileSummaries", scope.makeOpName("BoostedTreesMakeQuantileSummaries")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesMakeQuantileSummaries")); opBuilder.addInputList(Operands.asOutputs(floatValues)); opBuilder.addInput(exampleWeights.asOutput()); opBuilder.addInput(epsilon.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java index 3552cb6e986..ea1744361ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java @@ -64,7 +64,7 @@ private BoostedTreesMakeStatsSummary(Operation operation) { public static BoostedTreesMakeStatsSummary create(Scope scope, Operand nodeIds, Operand gradients, Operand hessians, Iterable> bucketizedFeaturesList, Long maxSplits, Long numBuckets) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesMakeStatsSummary", scope.makeOpName("BoostedTreesMakeStatsSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesMakeStatsSummary")); opBuilder.addInput(nodeIds.asOutput()); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(hessians.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java index 98807d14ad3..e3b23e355e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java @@ -64,7 +64,7 @@ private BoostedTreesPredict(Operation operation) { ) public static BoostedTreesPredict create(Scope scope, Operand treeEnsembleHandle, Iterable> bucketizedFeatures, Long logitsDimension) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesPredict", scope.makeOpName("BoostedTreesPredict")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesPredict")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder.addInputList(Operands.asOutputs(bucketizedFeatures)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java index b0e678c8b69..f7db36bf019 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java @@ -57,7 +57,7 @@ private BoostedTreesQuantileStreamResourceAddSummaries(Operation operation) { public static BoostedTreesQuantileStreamResourceAddSummaries create(Scope scope, Operand quantileStreamResourceHandle, Iterable> summaries) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesQuantileStreamResourceAddSummaries", scope.makeOpName("BoostedTreesQuantileStreamResourceAddSummaries")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesQuantileStreamResourceAddSummaries")); opBuilder.addInput(quantileStreamResourceHandle.asOutput()); opBuilder.addInputList(Operands.asOutputs(summaries)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java index e7f43f229b5..616d5b5bb58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java @@ -55,7 +55,7 @@ private BoostedTreesQuantileStreamResourceDeserialize(Operation operation) { public static BoostedTreesQuantileStreamResourceDeserialize create(Scope scope, Operand quantileStreamResourceHandle, Iterable> bucketBoundaries) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesQuantileStreamResourceDeserialize", scope.makeOpName("BoostedTreesQuantileStreamResourceDeserialize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesQuantileStreamResourceDeserialize")); opBuilder.addInput(quantileStreamResourceHandle.asOutput()); opBuilder.addInputList(Operands.asOutputs(bucketBoundaries)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java index 63e96f628ce..ce36cb3e366 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java @@ -55,7 +55,7 @@ private BoostedTreesQuantileStreamResourceFlush(Operation operation) { public static BoostedTreesQuantileStreamResourceFlush create(Scope scope, Operand quantileStreamResourceHandle, Operand numBuckets, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesQuantileStreamResourceFlush", scope.makeOpName("BoostedTreesQuantileStreamResourceFlush")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesQuantileStreamResourceFlush")); opBuilder.addInput(quantileStreamResourceHandle.asOutput()); opBuilder.addInput(numBuckets.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java index 68eb24ee4e1..a44db108e17 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java @@ -65,7 +65,7 @@ private BoostedTreesQuantileStreamResourceGetBucketBoundaries(Operation operatio ) public static BoostedTreesQuantileStreamResourceGetBucketBoundaries create(Scope scope, Operand quantileStreamResourceHandle, Long numFeatures) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesQuantileStreamResourceGetBucketBoundaries", scope.makeOpName("BoostedTreesQuantileStreamResourceGetBucketBoundaries")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesQuantileStreamResourceGetBucketBoundaries")); opBuilder.addInput(quantileStreamResourceHandle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_features", numFeatures); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java index 0e0dcd74f2b..114ac3741ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java @@ -55,7 +55,7 @@ private BoostedTreesQuantileStreamResourceHandleOp(Operation operation) { describeByClass = true ) public static BoostedTreesQuantileStreamResourceHandleOp create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesQuantileStreamResourceHandleOp", scope.makeOpName("BoostedTreesQuantileStreamResourceHandleOp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesQuantileStreamResourceHandleOp")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java index 6ee4ee2ebce..eb2f5193659 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java @@ -60,7 +60,7 @@ private BoostedTreesSerializeEnsemble(Operation operation) { ) public static BoostedTreesSerializeEnsemble create(Scope scope, Operand treeEnsembleHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesSerializeEnsemble", scope.makeOpName("BoostedTreesSerializeEnsemble")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesSerializeEnsemble")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new BoostedTreesSerializeEnsemble(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java index 7497e933574..7eb7971260f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java @@ -78,7 +78,7 @@ public static BoostedTreesSparseAggregateStats create(Scope scope, Operand gradients, Operand hessians, Operand featureIndices, Operand featureValues, Operand featureShape, Long maxSplits, Long numBuckets) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesSparseAggregateStats", scope.makeOpName("BoostedTreesSparseAggregateStats")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesSparseAggregateStats")); opBuilder.addInput(nodeIds.asOutput()); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(hessians.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java index 6e0bff134c2..d8541a15d9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java @@ -92,7 +92,7 @@ public static BoostedTreesSparseCalculateBestFeatureSplit create(Scope scope, Operand statsSummaryValues, Operand statsSummaryShape, Operand l1, Operand l2, Operand treeComplexity, Operand minNodeWeight, Long logitsDimension, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesSparseCalculateBestFeatureSplit", scope.makeOpName("BoostedTreesSparseCalculateBestFeatureSplit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesSparseCalculateBestFeatureSplit")); opBuilder.addInput(nodeIdRange.asOutput()); opBuilder.addInput(statsSummaryIndices.asOutput()); opBuilder.addInput(statsSummaryValues.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java index 7c762ab8092..55d400c9c0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java @@ -77,7 +77,7 @@ public static BoostedTreesTrainingPredict create(Scope scope, Operand treeEnsembleHandle, Operand cachedTreeIds, Operand cachedNodeIds, Iterable> bucketizedFeatures, Long logitsDimension) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesTrainingPredict", scope.makeOpName("BoostedTreesTrainingPredict")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesTrainingPredict")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder.addInput(cachedTreeIds.asOutput()); opBuilder.addInput(cachedNodeIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java index 1a5cf40a62d..93310fd7e96 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java @@ -75,7 +75,7 @@ public static BoostedTreesUpdateEnsemble create(Scope scope, Iterable> thresholds, Iterable> leftNodeContribs, Iterable> rightNodeContribs, Operand maxDepth, Operand learningRate, Long pruningMode) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesUpdateEnsemble", scope.makeOpName("BoostedTreesUpdateEnsemble")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesUpdateEnsemble")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder.addInput(featureIds.asOutput()); opBuilder.addInputList(Operands.asOutputs(nodeIds)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java index 971af4f268c..ce10bb48d3c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java @@ -80,7 +80,7 @@ public static BoostedTreesUpdateEnsembleV2 create(Scope scope, Iterable> leftNodeContribs, Iterable> rightNodeContribs, Iterable> splitTypes, Operand maxDepth, Operand learningRate, Operand pruningMode, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BoostedTreesUpdateEnsembleV2", scope.makeOpName("BoostedTreesUpdateEnsembleV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BoostedTreesUpdateEnsembleV2")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder.addInputList(Operands.asOutputs(featureIds)); opBuilder.addInputList(Operands.asOutputs(dimensionIds)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java index df85bcd5ddb..b4a4bf79800 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java @@ -56,7 +56,7 @@ private IsBoostedTreesEnsembleInitialized(Operation operation) { ) public static IsBoostedTreesEnsembleInitialized create(Scope scope, Operand treeEnsembleHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("IsBoostedTreesEnsembleInitialized", scope.makeOpName("IsBoostedTreesEnsembleInitialized")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IsBoostedTreesEnsembleInitialized")); opBuilder.addInput(treeEnsembleHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new IsBoostedTreesEnsembleInitialized(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java index 3358263f345..12f6d5240db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java @@ -57,7 +57,7 @@ private IsBoostedTreesQuantileStreamResourceInitialized(Operation operation) { ) public static IsBoostedTreesQuantileStreamResourceInitialized create(Scope scope, Operand quantileStreamResourceHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("IsBoostedTreesQuantileStreamResourceInitialized", scope.makeOpName("IsBoostedTreesQuantileStreamResourceInitialized")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IsBoostedTreesQuantileStreamResourceInitialized")); opBuilder.addInput(quantileStreamResourceHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new IsBoostedTreesQuantileStreamResourceInitialized(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java index 86bbca9334a..6f928151576 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java @@ -71,7 +71,7 @@ private AdjustContrast(Operation operation) { ) public static AdjustContrast create(Scope scope, Operand images, Operand contrastFactor) { - OperationBuilder opBuilder = scope.env().opBuilder("AdjustContrastv2", scope.makeOpName("AdjustContrast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AdjustContrast")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(contrastFactor.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java index 64b03a7600f..744bcdbfbf1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java @@ -69,7 +69,7 @@ private AdjustHue(Operation operation) { ) public static AdjustHue create(Scope scope, Operand images, Operand delta) { - OperationBuilder opBuilder = scope.env().opBuilder("AdjustHue", scope.makeOpName("AdjustHue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AdjustHue")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(delta.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java index a87a80aef4c..5656c0748cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java @@ -69,7 +69,7 @@ private AdjustSaturation(Operation operation) { ) public static AdjustSaturation create(Scope scope, Operand images, Operand scale) { - OperationBuilder opBuilder = scope.env().opBuilder("AdjustSaturation", scope.makeOpName("AdjustSaturation")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AdjustSaturation")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(scale.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java index acf5dadb78e..9f33ea0c760 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java @@ -97,7 +97,7 @@ private CombinedNonMaxSuppression(Operation operation) { public static CombinedNonMaxSuppression create(Scope scope, Operand boxes, Operand scores, Operand maxOutputSizePerClass, Operand maxTotalSize, Operand iouThreshold, Operand scoreThreshold, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CombinedNonMaxSuppression", scope.makeOpName("CombinedNonMaxSuppression")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CombinedNonMaxSuppression")); opBuilder.addInput(boxes.asOutput()); opBuilder.addInput(scores.asOutput()); opBuilder.addInput(maxOutputSizePerClass.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java index 85bca3e2d4e..5477322a6c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java @@ -94,7 +94,7 @@ private CropAndResize(Operation operation) { public static CropAndResize create(Scope scope, Operand image, Operand boxes, Operand boxInd, Operand cropSize, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CropAndResize", scope.makeOpName("CropAndResize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CropAndResize")); opBuilder.addInput(image.asOutput()); opBuilder.addInput(boxes.asOutput()); opBuilder.addInput(boxInd.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradBoxes.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradBoxes.java index 9eaddcb47ee..64eb73d46b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradBoxes.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradBoxes.java @@ -73,7 +73,7 @@ private CropAndResizeGradBoxes(Operation operation) { public static CropAndResizeGradBoxes create(Scope scope, Operand grads, Operand image, Operand boxes, Operand boxInd, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CropAndResizeGradBoxes", scope.makeOpName("CropAndResizeGradBoxes")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CropAndResizeGradBoxes")); opBuilder.addInput(grads.asOutput()); opBuilder.addInput(image.asOutput()); opBuilder.addInput(boxes.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java index 8091f63b647..971cfe70dc5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java @@ -79,7 +79,7 @@ private CropAndResizeGradImage(Operation operation) { public static CropAndResizeGradImage create(Scope scope, Operand grads, Operand boxes, Operand boxInd, Operand imageSize, Class T, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CropAndResizeGradImage", scope.makeOpName("CropAndResizeGradImage")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CropAndResizeGradImage")); opBuilder.addInput(grads.asOutput()); opBuilder.addInput(boxes.asOutput()); opBuilder.addInput(boxInd.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java index ca839df34f5..550219265be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java @@ -78,7 +78,7 @@ private DecodeAndCropJpeg(Operation operation) { ) public static DecodeAndCropJpeg create(Scope scope, Operand contents, Operand cropWindow, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeAndCropJpeg", scope.makeOpName("DecodeAndCropJpeg")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeAndCropJpeg")); opBuilder.addInput(contents.asOutput()); opBuilder.addInput(cropWindow.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeBmp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeBmp.java index e2f57633dc5..f2cfe2c86f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeBmp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeBmp.java @@ -68,7 +68,7 @@ private DecodeBmp(Operation operation) { describeByClass = true ) public static DecodeBmp create(Scope scope, Operand contents, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeBmp", scope.makeOpName("DecodeBmp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeBmp")); opBuilder.addInput(contents.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeGif.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeGif.java index f1aac55bf5d..85b936f7818 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeGif.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeGif.java @@ -67,7 +67,7 @@ private DecodeGif(Operation operation) { describeByClass = true ) public static DecodeGif create(Scope scope, Operand contents) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeGif", scope.makeOpName("DecodeGif")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeGif")); opBuilder.addInput(contents.asOutput()); opBuilder = scope.apply(opBuilder); return new DecodeGif(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java index 4b372bec4a6..8b6951f2da7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java @@ -82,7 +82,7 @@ private DecodeImage(Operation operation) { ) public static DecodeImage create(Scope scope, Operand contents, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeImage", scope.makeOpName("DecodeImage")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeImage")); opBuilder.addInput(contents.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java index 8cdbda731b5..5e11fe84aa0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java @@ -75,7 +75,7 @@ private DecodeJpeg(Operation operation) { describeByClass = true ) public static DecodeJpeg create(Scope scope, Operand contents, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeJpeg", scope.makeOpName("DecodeJpeg")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeJpeg")); opBuilder.addInput(contents.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java index edefdf308f7..2b3591fafcb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java @@ -80,7 +80,7 @@ private DecodePng(Operation operation) { ) public static DecodePng create(Scope scope, Operand contents, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodePng", scope.makeOpName("DecodePng")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodePng")); opBuilder.addInput(contents.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java index 660dad4034f..980487d0c3b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java @@ -75,7 +75,7 @@ private DrawBoundingBoxes(Operation operation) { ) public static DrawBoundingBoxes create(Scope scope, Operand images, Operand boxes, Operand colors) { - OperationBuilder opBuilder = scope.env().opBuilder("DrawBoundingBoxesV2", scope.makeOpName("DrawBoundingBoxes")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DrawBoundingBoxes")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(boxes.asOutput()); opBuilder.addInput(colors.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java index b03e10d8025..81ad610f790 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java @@ -76,7 +76,7 @@ private EncodeJpeg(Operation operation) { describeByClass = true ) public static EncodeJpeg create(Scope scope, Operand image, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EncodeJpeg", scope.makeOpName("EncodeJpeg")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EncodeJpeg")); opBuilder.addInput(image.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpegVariableQuality.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpegVariableQuality.java index 5128843c1e1..732ca7bf7a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpegVariableQuality.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpegVariableQuality.java @@ -64,7 +64,7 @@ private EncodeJpegVariableQuality(Operation operation) { ) public static EncodeJpegVariableQuality create(Scope scope, Operand images, Operand quality) { - OperationBuilder opBuilder = scope.env().opBuilder("EncodeJpegVariableQuality", scope.makeOpName("EncodeJpegVariableQuality")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EncodeJpegVariableQuality")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(quality.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodePng.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodePng.java index 09f0b5b8b44..e9994f0dad6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodePng.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodePng.java @@ -72,7 +72,7 @@ private EncodePng(Operation operation) { ) public static EncodePng create(Scope scope, Operand image, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EncodePng", scope.makeOpName("EncodePng")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EncodePng")); opBuilder.addInput(image.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java index 82b134b39d5..ac4a33f2e01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java @@ -81,7 +81,7 @@ private ExtractGlimpse(Operation operation) { ) public static ExtractGlimpse create(Scope scope, Operand input, Operand sizeOutput, Operand offsets, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ExtractGlimpseV2", scope.makeOpName("ExtractGlimpse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ExtractGlimpse")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder.addInput(offsets.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java index 89a2979726b..7843008dbeb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java @@ -73,7 +73,7 @@ private ExtractImagePatches(Operation operation) { ) public static ExtractImagePatches create(Scope scope, Operand images, List ksizes, List strides, List rates, String padding) { - OperationBuilder opBuilder = scope.env().opBuilder("ExtractImagePatches", scope.makeOpName("ExtractImagePatches")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ExtractImagePatches")); opBuilder.addInput(images.asOutput()); opBuilder = scope.apply(opBuilder); long[] ksizesArray = new long[ksizes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java index 5b1a97ac1dc..23a922277ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java @@ -68,7 +68,7 @@ private ExtractJpegShape(Operation operation) { ) public static ExtractJpegShape create(Scope scope, Operand contents, Class outputType) { - OperationBuilder opBuilder = scope.env().opBuilder("ExtractJpegShape", scope.makeOpName("ExtractJpegShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ExtractJpegShape")); opBuilder.addInput(contents.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_type", Operands.toDataType(outputType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java index 895afd33b3b..54dd35813f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java @@ -82,7 +82,7 @@ public static GenerateBoundingBoxProposals create(Scope scope, Operand Operand bboxDeltas, Operand imageInfo, Operand anchors, Operand nmsThreshold, Operand preNmsTopn, Operand minSize, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("GenerateBoundingBoxProposals", scope.makeOpName("GenerateBoundingBoxProposals")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GenerateBoundingBoxProposals")); opBuilder.addInput(scores.asOutput()); opBuilder.addInput(bboxDeltas.asOutput()); opBuilder.addInput(imageInfo.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java index f87b0d696ad..0fdfc5daac0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java @@ -65,7 +65,7 @@ private HsvToRgb(Operation operation) { describeByClass = true ) public static HsvToRgb create(Scope scope, Operand images) { - OperationBuilder opBuilder = scope.env().opBuilder("HSVToRGB", scope.makeOpName("HsvToRgb")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("HsvToRgb")); opBuilder.addInput(images.asOutput()); opBuilder = scope.apply(opBuilder); return new HsvToRgb<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java index cfd556a54d8..97d3244bd27 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java @@ -72,7 +72,7 @@ private ImageProjectiveTransformV2(Operation operation) { public static ImageProjectiveTransformV2 create(Scope scope, Operand images, Operand transforms, Operand outputShape, String interpolation, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ImageProjectiveTransformV2", scope.makeOpName("ImageProjectiveTransformV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ImageProjectiveTransformV2")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(transforms.asOutput()); opBuilder.addInput(outputShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java index 143c787038e..434260d117c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java @@ -73,7 +73,7 @@ private ImageProjectiveTransformV3(Operation operation) { public static ImageProjectiveTransformV3 create(Scope scope, Operand images, Operand transforms, Operand outputShape, Operand fillValue, String interpolation, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ImageProjectiveTransformV3", scope.makeOpName("ImageProjectiveTransformV3")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ImageProjectiveTransformV3")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(transforms.asOutput()); opBuilder.addInput(outputShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NearestNeighbors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NearestNeighbors.java index 4583d0b0f53..085e8e7ae05 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NearestNeighbors.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NearestNeighbors.java @@ -65,7 +65,7 @@ private NearestNeighbors(Operation operation) { ) public static NearestNeighbors create(Scope scope, Operand points, Operand centers, Operand k) { - OperationBuilder opBuilder = scope.env().opBuilder("NearestNeighbors", scope.makeOpName("NearestNeighbors")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NearestNeighbors")); opBuilder.addInput(points.asOutput()); opBuilder.addInput(centers.asOutput()); opBuilder.addInput(k.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java index 53126904212..82082ac8d74 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java @@ -104,7 +104,7 @@ private NonMaxSuppression(Operation operation) { public static NonMaxSuppression create(Scope scope, Operand boxes, Operand scores, Operand maxOutputSize, Operand iouThreshold, Operand scoreThreshold, Operand softNmsSigma, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("NonMaxSuppressionV5", scope.makeOpName("NonMaxSuppression")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NonMaxSuppression")); opBuilder.addInput(boxes.asOutput()); opBuilder.addInput(scores.asOutput()); opBuilder.addInput(maxOutputSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppressionWithOverlaps.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppressionWithOverlaps.java index 79518d3ea12..4c7fd59444c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppressionWithOverlaps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppressionWithOverlaps.java @@ -82,7 +82,7 @@ private NonMaxSuppressionWithOverlaps(Operation operation) { public static NonMaxSuppressionWithOverlaps create(Scope scope, Operand overlaps, Operand scores, Operand maxOutputSize, Operand overlapThreshold, Operand scoreThreshold) { - OperationBuilder opBuilder = scope.env().opBuilder("NonMaxSuppressionWithOverlaps", scope.makeOpName("NonMaxSuppressionWithOverlaps")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NonMaxSuppressionWithOverlaps")); opBuilder.addInput(overlaps.asOutput()); opBuilder.addInput(scores.asOutput()); opBuilder.addInput(maxOutputSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java index b22bae33b1d..1a08a0835d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java @@ -77,7 +77,7 @@ private QuantizedResizeBilinear(Operation operation) { public static QuantizedResizeBilinear create(Scope scope, Operand images, Operand sizeOutput, Operand min, Operand max, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedResizeBilinear", scope.makeOpName("QuantizedResizeBilinear")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedResizeBilinear")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder.addInput(min.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java index 6e3ea00d657..c8b3afda622 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java @@ -70,7 +70,7 @@ private RandomCrop(Operation operation) { ) public static RandomCrop create(Scope scope, Operand image, Operand sizeOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomCrop", scope.makeOpName("RandomCrop")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomCrop")); opBuilder.addInput(image.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeArea.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeArea.java index 45f3428ac25..5358048f92c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeArea.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeArea.java @@ -73,7 +73,7 @@ private ResizeArea(Operation operation) { ) public static ResizeArea create(Scope scope, Operand images, Operand sizeOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResizeArea", scope.makeOpName("ResizeArea")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResizeArea")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubic.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubic.java index 2d40c5ddb2c..a4814622c6e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubic.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubic.java @@ -65,7 +65,7 @@ private ResizeBicubic(Operation operation) { ) public static ResizeBicubic create(Scope scope, Operand images, Operand sizeOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResizeBicubic", scope.makeOpName("ResizeBicubic")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResizeBicubic")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java index 9d4e3bb5eb8..9c5c786215f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java @@ -62,7 +62,7 @@ private ResizeBicubicGrad(Operation operation) { ) public static ResizeBicubicGrad create(Scope scope, Operand grads, Operand originalImage, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResizeBicubicGrad", scope.makeOpName("ResizeBicubicGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResizeBicubicGrad")); opBuilder.addInput(grads.asOutput()); opBuilder.addInput(originalImage.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinear.java index 77d6d1660e1..ab625f8fa43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinear.java @@ -65,7 +65,7 @@ private ResizeBilinear(Operation operation) { ) public static ResizeBilinear create(Scope scope, Operand images, Operand sizeOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResizeBilinear", scope.makeOpName("ResizeBilinear")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResizeBilinear")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java index 79e15102d2e..3f1324421d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java @@ -62,7 +62,7 @@ private ResizeBilinearGrad(Operation operation) { ) public static ResizeBilinearGrad create(Scope scope, Operand grads, Operand originalImage, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResizeBilinearGrad", scope.makeOpName("ResizeBilinearGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResizeBilinearGrad")); opBuilder.addInput(grads.asOutput()); opBuilder.addInput(originalImage.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java index 48ea170bfa3..2f3e188c9f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java @@ -66,7 +66,7 @@ private ResizeNearestNeighbor(Operation operation) { ) public static ResizeNearestNeighbor create(Scope scope, Operand images, Operand sizeOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResizeNearestNeighbor", scope.makeOpName("ResizeNearestNeighbor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResizeNearestNeighbor")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java index 3e25989b8d9..ea78ad2422c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java @@ -62,7 +62,7 @@ private ResizeNearestNeighborGrad(Operation operation) { ) public static ResizeNearestNeighborGrad create(Scope scope, Operand grads, Operand sizeOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResizeNearestNeighborGrad", scope.makeOpName("ResizeNearestNeighborGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResizeNearestNeighborGrad")); opBuilder.addInput(grads.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java index 3c3b3bd0dcf..2a1a81c7543 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java @@ -82,7 +82,7 @@ private RgbToHsv(Operation operation) { describeByClass = true ) public static RgbToHsv create(Scope scope, Operand images) { - OperationBuilder opBuilder = scope.env().opBuilder("RGBToHSV", scope.makeOpName("RgbToHsv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RgbToHsv")); opBuilder.addInput(images.asOutput()); opBuilder = scope.apply(opBuilder); return new RgbToHsv<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java index 6f8756492dd..0148cf27973 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java @@ -112,7 +112,7 @@ private SampleDistortedBoundingBox(Operation operation) { public static SampleDistortedBoundingBox create(Scope scope, Operand imageSize, Operand boundingBoxes, Operand minObjectCovered, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SampleDistortedBoundingBoxV2", scope.makeOpName("SampleDistortedBoundingBox")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SampleDistortedBoundingBox")); opBuilder.addInput(imageSize.asOutput()); opBuilder.addInput(boundingBoxes.asOutput()); opBuilder.addInput(minObjectCovered.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslate.java index 2e221c62035..5569ff2873e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslate.java @@ -66,7 +66,7 @@ private ScaleAndTranslate(Operation operation) { public static ScaleAndTranslate create(Scope scope, Operand images, Operand sizeOutput, Operand scale, Operand translation, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScaleAndTranslate", scope.makeOpName("ScaleAndTranslate")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScaleAndTranslate")); opBuilder.addInput(images.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder.addInput(scale.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java index 7ab3f62e791..4afc53b6144 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java @@ -64,7 +64,7 @@ private ScaleAndTranslateGrad(Operation operation) { public static ScaleAndTranslateGrad create(Scope scope, Operand grads, Operand originalImage, Operand scale, Operand translation, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ScaleAndTranslateGrad", scope.makeOpName("ScaleAndTranslateGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScaleAndTranslateGrad")); opBuilder.addInput(grads.asOutput()); opBuilder.addInput(originalImage.asOutput()); opBuilder.addInput(scale.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java index cf5fd9ed0c7..cf52a0dd9ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java @@ -139,7 +139,7 @@ private StatelessSampleDistortedBoundingBox(Operation operation) { public static StatelessSampleDistortedBoundingBox create(Scope scope, Operand imageSize, Operand boundingBoxes, Operand minObjectCovered, Operand seed, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessSampleDistortedBoundingBox", scope.makeOpName("StatelessSampleDistortedBoundingBox")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessSampleDistortedBoundingBox")); opBuilder.addInput(imageSize.asOutput()); opBuilder.addInput(boundingBoxes.asOutput()); opBuilder.addInput(minObjectCovered.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java index 38c7a2c1b29..c8a01988603 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java @@ -60,7 +60,7 @@ private DecodeBase64(Operation operation) { describeByClass = true ) public static DecodeBase64 create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeBase64", scope.makeOpName("DecodeBase64")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeBase64")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new DecodeBase64(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCompressed.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCompressed.java index 314c1f74b43..d90bac56d9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCompressed.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCompressed.java @@ -64,7 +64,7 @@ private DecodeCompressed(Operation operation) { describeByClass = true ) public static DecodeCompressed create(Scope scope, Operand bytes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeCompressed", scope.makeOpName("DecodeCompressed")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeCompressed")); opBuilder.addInput(bytes.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCsv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCsv.java index bed943c4f66..f490599bb3b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCsv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCsv.java @@ -75,7 +75,7 @@ private DecodeCsv(Operation operation) { ) public static DecodeCsv create(Scope scope, Operand records, Iterable> recordDefaults, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeCSV", scope.makeOpName("DecodeCsv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeCsv")); opBuilder.addInput(records.asOutput()); opBuilder.addInputList(Operands.asOutputs(recordDefaults)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java index 3781c269b84..a139beb6e2d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java @@ -66,7 +66,7 @@ private DecodeJsonExample(Operation operation) { describeByClass = true ) public static DecodeJsonExample create(Scope scope, Operand jsonExamples) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeJSONExample", scope.makeOpName("DecodeJsonExample")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeJsonExample")); opBuilder.addInput(jsonExamples.asOutput()); opBuilder = scope.apply(opBuilder); return new DecodeJsonExample(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java index 8a50ac7beec..469742d7626 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java @@ -70,7 +70,7 @@ private DecodePaddedRaw(Operation operation) { public static DecodePaddedRaw create(Scope scope, Operand inputBytes, Operand fixedLength, Class outType, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodePaddedRaw", scope.makeOpName("DecodePaddedRaw")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodePaddedRaw")); opBuilder.addInput(inputBytes.asOutput()); opBuilder.addInput(fixedLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java index 654693bfe82..791c3e61b8a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java @@ -66,7 +66,7 @@ private DecodeRaw(Operation operation) { ) public static DecodeRaw create(Scope scope, Operand bytes, Class outType, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DecodeRaw", scope.makeOpName("DecodeRaw")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DecodeRaw")); opBuilder.addInput(bytes.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("out_type", Operands.toDataType(outType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java index 7effe9d1e06..a3780bff1c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java @@ -112,7 +112,7 @@ private DeserializeManySparse(Operation operation) { ) public static DeserializeManySparse create(Scope scope, Operand serializedSparse, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("DeserializeManySparse", scope.makeOpName("DeserializeManySparse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeserializeManySparse")); opBuilder.addInput(serializedSparse.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java index 5fcdb5ee0ff..b3f42819398 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java @@ -64,7 +64,7 @@ private EncodeBase64(Operation operation) { describeByClass = true ) public static EncodeBase64 create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EncodeBase64", scope.makeOpName("EncodeBase64")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EncodeBase64")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FifoQueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FifoQueue.java index ebe738189de..b83dffd8223 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FifoQueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FifoQueue.java @@ -65,7 +65,7 @@ private FifoQueue(Operation operation) { ) public static FifoQueue create(Scope scope, List> componentTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FIFOQueueV2", scope.makeOpName("FifoQueue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FifoQueue")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("component_types", Operands.toDataTypes(componentTypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FixedLengthRecordReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FixedLengthRecordReader.java index f36890bc20c..7236821a724 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FixedLengthRecordReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FixedLengthRecordReader.java @@ -60,7 +60,7 @@ private FixedLengthRecordReader(Operation operation) { describeByClass = true ) public static FixedLengthRecordReader create(Scope scope, Long recordBytes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FixedLengthRecordReaderV2", scope.makeOpName("FixedLengthRecordReader")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FixedLengthRecordReader")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("record_bytes", recordBytes); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/IdentityReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/IdentityReader.java index 5e486af8d8c..9e319c77bb2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/IdentityReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/IdentityReader.java @@ -61,7 +61,7 @@ private IdentityReader(Operation operation) { describeByClass = true ) public static IdentityReader create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("IdentityReaderV2", scope.makeOpName("IdentityReader")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IdentityReader")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/LmdbReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/LmdbReader.java index 809fa21ef06..cc136db7e30 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/LmdbReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/LmdbReader.java @@ -58,7 +58,7 @@ private LmdbReader(Operation operation) { describeByClass = true ) public static LmdbReader create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LMDBReader", scope.makeOpName("LmdbReader")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LmdbReader")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/MatchingFiles.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/MatchingFiles.java index 43d1a16b193..79db5b7b67c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/MatchingFiles.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/MatchingFiles.java @@ -61,7 +61,7 @@ private MatchingFiles(Operation operation) { describeByClass = true ) public static MatchingFiles create(Scope scope, Operand pattern) { - OperationBuilder opBuilder = scope.env().opBuilder("MatchingFiles", scope.makeOpName("MatchingFiles")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatchingFiles")); opBuilder.addInput(pattern.asOutput()); opBuilder = scope.apply(opBuilder); return new MatchingFiles(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PaddingFifoQueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PaddingFifoQueue.java index 932a54aa542..c12c95bf0e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PaddingFifoQueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PaddingFifoQueue.java @@ -68,7 +68,7 @@ private PaddingFifoQueue(Operation operation) { ) public static PaddingFifoQueue create(Scope scope, List> componentTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("PaddingFIFOQueueV2", scope.makeOpName("PaddingFifoQueue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PaddingFifoQueue")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("component_types", Operands.toDataTypes(componentTypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java index bd9226c2007..6013eb885d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java @@ -146,7 +146,7 @@ public static ParseExample create(Scope scope, Operand serialized, Operand raggedKeys, Iterable> denseDefaults, Long numSparse, List> sparseTypes, List> raggedValueTypes, List> raggedSplitTypes, List denseShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ParseExampleV2", scope.makeOpName("ParseExample")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParseExample")); opBuilder.addInput(serialized.asOutput()); opBuilder.addInput(names.asOutput()); opBuilder.addInput(sparseKeys.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSequenceExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSequenceExample.java index 7dba9d90ce1..a56a0e66fae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSequenceExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSequenceExample.java @@ -186,7 +186,7 @@ public static ParseSequenceExample create(Scope scope, Operand serializ List> featureListSparseTypes, List> featureListRaggedValueTypes, List> featureListRaggedSplitTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ParseSequenceExampleV2", scope.makeOpName("ParseSequenceExample")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParseSequenceExample")); opBuilder.addInput(serialized.asOutput()); opBuilder.addInput(debugName.asOutput()); opBuilder.addInput(contextSparseKeys.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java index ecb09fca4a4..d4bb50938a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java @@ -113,7 +113,7 @@ private ParseSingleExample(Operation operation) { public static ParseSingleExample create(Scope scope, Operand serialized, Iterable> denseDefaults, Long numSparse, List sparseKeys, List denseKeys, List> sparseTypes, List denseShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("ParseSingleExample", scope.makeOpName("ParseSingleExample")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParseSingleExample")); opBuilder.addInput(serialized.asOutput()); opBuilder.addInputList(Operands.asOutputs(denseDefaults)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleSequenceExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleSequenceExample.java index 34347ecaf9d..e251c7dcdac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleSequenceExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleSequenceExample.java @@ -148,7 +148,7 @@ public static ParseSingleSequenceExample create(Scope scope, Operand se Operand debugName, List> contextSparseTypes, List> featureListDenseTypes, List> featureListSparseTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ParseSingleSequenceExample", scope.makeOpName("ParseSingleSequenceExample")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParseSingleSequenceExample")); opBuilder.addInput(serialized.asOutput()); opBuilder.addInput(featureListDenseMissingAssumedEmpty.asOutput()); opBuilder.addInputList(Operands.asOutputs(contextSparseKeys)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java index 52a94630841..ecabdeedd1d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java @@ -66,7 +66,7 @@ private ParseTensor(Operation operation) { ) public static ParseTensor create(Scope scope, Operand serialized, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("ParseTensor", scope.makeOpName("ParseTensor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParseTensor")); opBuilder.addInput(serialized.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("out_type", Operands.toDataType(outType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PriorityQueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PriorityQueue.java index 50f19104033..40494e12bbb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PriorityQueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PriorityQueue.java @@ -73,7 +73,7 @@ private PriorityQueue(Operation operation) { ) public static PriorityQueue create(Scope scope, List> componentTypes, List shapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("PriorityQueueV2", scope.makeOpName("PriorityQueue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PriorityQueue")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("component_types", Operands.toDataTypes(componentTypes)); Shape[] shapesArray = new Shape[shapes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueClose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueClose.java index 6b8f52aa55f..6be7248ac76 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueClose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueClose.java @@ -60,7 +60,7 @@ private QueueClose(Operation operation) { ) public static QueueClose create(Scope scope, Operand handle, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QueueCloseV2", scope.makeOpName("QueueClose")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QueueClose")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeue.java index 58d59e2cd83..b5e34e4b502 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeue.java @@ -73,7 +73,7 @@ private QueueDequeue(Operation operation) { ) public static QueueDequeue create(Scope scope, Operand handle, List> componentTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QueueDequeueV2", scope.makeOpName("QueueDequeue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QueueDequeue")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("component_types", Operands.toDataTypes(componentTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueMany.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueMany.java index 1e9f70e4cbf..0974a0a1123 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueMany.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueMany.java @@ -80,7 +80,7 @@ private QueueDequeueMany(Operation operation) { ) public static QueueDequeueMany create(Scope scope, Operand handle, Operand n, List> componentTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QueueDequeueManyV2", scope.makeOpName("QueueDequeueMany")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QueueDequeueMany")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(n.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueUpTo.java index 55481610ae9..4a48ab57803 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueUpTo.java @@ -84,7 +84,7 @@ private QueueDequeueUpTo(Operation operation) { ) public static QueueDequeueUpTo create(Scope scope, Operand handle, Operand n, List> componentTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QueueDequeueUpToV2", scope.makeOpName("QueueDequeueUpTo")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QueueDequeueUpTo")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(n.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueue.java index 69d45c14e4d..35dd60dbdcf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueue.java @@ -61,7 +61,7 @@ private QueueEnqueue(Operation operation) { ) public static QueueEnqueue create(Scope scope, Operand handle, Iterable> components, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QueueEnqueueV2", scope.makeOpName("QueueEnqueue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QueueEnqueue")); opBuilder.addInput(handle.asOutput()); opBuilder.addInputList(Operands.asOutputs(components)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueueMany.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueueMany.java index bb94e9135e2..e6cf9c9926e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueueMany.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueueMany.java @@ -65,7 +65,7 @@ private QueueEnqueueMany(Operation operation) { ) public static QueueEnqueueMany create(Scope scope, Operand handle, Iterable> components, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QueueEnqueueManyV2", scope.makeOpName("QueueEnqueueMany")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QueueEnqueueMany")); opBuilder.addInput(handle.asOutput()); opBuilder.addInputList(Operands.asOutputs(components)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueIsClosed.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueIsClosed.java index dc421adf0cb..9df0cef815e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueIsClosed.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueIsClosed.java @@ -61,7 +61,7 @@ private QueueIsClosed(Operation operation) { describeByClass = true ) public static QueueIsClosed create(Scope scope, Operand handle) { - OperationBuilder opBuilder = scope.env().opBuilder("QueueIsClosedV2", scope.makeOpName("QueueIsClosed")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QueueIsClosed")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); return new QueueIsClosed(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueSize.java index 660f30af385..ff803a50843 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueSize.java @@ -59,7 +59,7 @@ private QueueSize(Operation operation) { describeByClass = true ) public static QueueSize create(Scope scope, Operand handle) { - OperationBuilder opBuilder = scope.env().opBuilder("QueueSizeV2", scope.makeOpName("QueueSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QueueSize")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); return new QueueSize(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/RandomShuffleQueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/RandomShuffleQueue.java index 0cfb155868a..7a3cbd62c62 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/RandomShuffleQueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/RandomShuffleQueue.java @@ -65,7 +65,7 @@ private RandomShuffleQueue(Operation operation) { ) public static RandomShuffleQueue create(Scope scope, List> componentTypes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomShuffleQueueV2", scope.makeOpName("RandomShuffleQueue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomShuffleQueue")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("component_types", Operands.toDataTypes(componentTypes)); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReadFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReadFile.java index 15ea7421431..5eafc6682ea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReadFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReadFile.java @@ -58,7 +58,7 @@ private ReadFile(Operation operation) { describeByClass = true ) public static ReadFile create(Scope scope, Operand filename) { - OperationBuilder opBuilder = scope.env().opBuilder("ReadFile", scope.makeOpName("ReadFile")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReadFile")); opBuilder.addInput(filename.asOutput()); opBuilder = scope.apply(opBuilder); return new ReadFile(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumRecordsProduced.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumRecordsProduced.java index 6f59aa79e82..31952d11fcd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumRecordsProduced.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumRecordsProduced.java @@ -62,7 +62,7 @@ private ReaderNumRecordsProduced(Operation operation) { ) public static ReaderNumRecordsProduced create(Scope scope, Operand readerHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("ReaderNumRecordsProducedV2", scope.makeOpName("ReaderNumRecordsProduced")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReaderNumRecordsProduced")); opBuilder.addInput(readerHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new ReaderNumRecordsProduced(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumWorkUnitsCompleted.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumWorkUnitsCompleted.java index e2a9d63cf6f..7e2de97fd90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumWorkUnitsCompleted.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumWorkUnitsCompleted.java @@ -60,7 +60,7 @@ private ReaderNumWorkUnitsCompleted(Operation operation) { ) public static ReaderNumWorkUnitsCompleted create(Scope scope, Operand readerHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("ReaderNumWorkUnitsCompletedV2", scope.makeOpName("ReaderNumWorkUnitsCompleted")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReaderNumWorkUnitsCompleted")); opBuilder.addInput(readerHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new ReaderNumWorkUnitsCompleted(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRead.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRead.java index 4d93e58b2af..d205d033ed2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRead.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRead.java @@ -67,7 +67,7 @@ private ReaderRead(Operation operation) { ) public static ReaderRead create(Scope scope, Operand readerHandle, Operand queueHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("ReaderReadV2", scope.makeOpName("ReaderRead")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReaderRead")); opBuilder.addInput(readerHandle.asOutput()); opBuilder.addInput(queueHandle.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReadUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReadUpTo.java index 6ad36598c35..3e5b3100870 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReadUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReadUpTo.java @@ -70,7 +70,7 @@ private ReaderReadUpTo(Operation operation) { ) public static ReaderReadUpTo create(Scope scope, Operand readerHandle, Operand queueHandle, Operand numRecords) { - OperationBuilder opBuilder = scope.env().opBuilder("ReaderReadUpToV2", scope.makeOpName("ReaderReadUpTo")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReaderReadUpTo")); opBuilder.addInput(readerHandle.asOutput()); opBuilder.addInput(queueHandle.asOutput()); opBuilder.addInput(numRecords.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReset.java index fb4760c29c3..a96b6a75468 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReset.java @@ -53,7 +53,7 @@ private ReaderReset(Operation operation) { describeByClass = true ) public static ReaderReset create(Scope scope, Operand readerHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("ReaderResetV2", scope.makeOpName("ReaderReset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReaderReset")); opBuilder.addInput(readerHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new ReaderReset(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRestoreState.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRestoreState.java index 3bd856a0ea9..23f64953497 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRestoreState.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRestoreState.java @@ -59,7 +59,7 @@ private ReaderRestoreState(Operation operation) { ) public static ReaderRestoreState create(Scope scope, Operand readerHandle, Operand state) { - OperationBuilder opBuilder = scope.env().opBuilder("ReaderRestoreStateV2", scope.makeOpName("ReaderRestoreState")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReaderRestoreState")); opBuilder.addInput(readerHandle.asOutput()); opBuilder.addInput(state.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderSerializeState.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderSerializeState.java index 9f430108cee..c996d0d7d8d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderSerializeState.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderSerializeState.java @@ -61,7 +61,7 @@ private ReaderSerializeState(Operation operation) { describeByClass = true ) public static ReaderSerializeState create(Scope scope, Operand readerHandle) { - OperationBuilder opBuilder = scope.env().opBuilder("ReaderSerializeStateV2", scope.makeOpName("ReaderSerializeState")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReaderSerializeState")); opBuilder.addInput(readerHandle.asOutput()); opBuilder = scope.apply(opBuilder); return new ReaderSerializeState(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java index e0c0e3224e6..a47ed876db2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java @@ -76,7 +76,7 @@ private SerializeManySparse(Operation operation) { public static SerializeManySparse create(Scope scope, Operand sparseIndices, Operand sparseValues, Operand sparseShape, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("SerializeManySparse", scope.makeOpName("SerializeManySparse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SerializeManySparse")); opBuilder.addInput(sparseIndices.asOutput()); opBuilder.addInput(sparseValues.asOutput()); opBuilder.addInput(sparseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java index a69add77a64..dec5fd1eeb9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java @@ -70,7 +70,7 @@ private SerializeSparse(Operation operation) { public static SerializeSparse create(Scope scope, Operand sparseIndices, Operand sparseValues, Operand sparseShape, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("SerializeSparse", scope.makeOpName("SerializeSparse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SerializeSparse")); opBuilder.addInput(sparseIndices.asOutput()); opBuilder.addInput(sparseValues.asOutput()); opBuilder.addInput(sparseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeTensor.java index 85f7452f213..2d7dbecabee 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeTensor.java @@ -59,7 +59,7 @@ private SerializeTensor(Operation operation) { describeByClass = true ) public static SerializeTensor create(Scope scope, Operand tensor) { - OperationBuilder opBuilder = scope.env().opBuilder("SerializeTensor", scope.makeOpName("SerializeTensor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SerializeTensor")); opBuilder.addInput(tensor.asOutput()); opBuilder = scope.apply(opBuilder); return new SerializeTensor(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilename.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilename.java index 912f27b7bc3..744a34b1329 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilename.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilename.java @@ -63,7 +63,7 @@ private ShardedFilename(Operation operation) { ) public static ShardedFilename create(Scope scope, Operand basename, Operand shard, Operand numShards) { - OperationBuilder opBuilder = scope.env().opBuilder("ShardedFilename", scope.makeOpName("ShardedFilename")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ShardedFilename")); opBuilder.addInput(basename.asOutput()); opBuilder.addInput(shard.asOutput()); opBuilder.addInput(numShards.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilespec.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilespec.java index e164304a889..8b369b037c0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilespec.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilespec.java @@ -61,7 +61,7 @@ private ShardedFilespec(Operation operation) { ) public static ShardedFilespec create(Scope scope, Operand basename, Operand numShards) { - OperationBuilder opBuilder = scope.env().opBuilder("ShardedFilespec", scope.makeOpName("ShardedFilespec")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ShardedFilespec")); opBuilder.addInput(basename.asOutput()); opBuilder.addInput(numShards.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TextLineReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TextLineReader.java index 61d2afa4869..136a4b7364d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TextLineReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TextLineReader.java @@ -59,7 +59,7 @@ private TextLineReader(Operation operation) { describeByClass = true ) public static TextLineReader create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TextLineReaderV2", scope.makeOpName("TextLineReader")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TextLineReader")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TfRecordReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TfRecordReader.java index 8d2c43efb03..8e8ca9daf9e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TfRecordReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TfRecordReader.java @@ -59,7 +59,7 @@ private TfRecordReader(Operation operation) { describeByClass = true ) public static TfRecordReader create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TFRecordReaderV2", scope.makeOpName("TfRecordReader")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TfRecordReader")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WholeFileReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WholeFileReader.java index d40b25906dd..8c14d1b103b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WholeFileReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WholeFileReader.java @@ -61,7 +61,7 @@ private WholeFileReader(Operation operation) { describeByClass = true ) public static WholeFileReader create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("WholeFileReaderV2", scope.makeOpName("WholeFileReader")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WholeFileReader")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java index 4397e567d90..28c66faab20 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java @@ -56,7 +56,7 @@ private WriteFile(Operation operation) { ) public static WriteFile create(Scope scope, Operand filename, Operand contents) { - OperationBuilder opBuilder = scope.env().opBuilder("WriteFile", scope.makeOpName("WriteFile")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WriteFile")); opBuilder.addInput(filename.asOutput()); opBuilder.addInput(contents.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java index 4205f873deb..77f4ac33b9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java @@ -97,7 +97,7 @@ private BandPart(Operation operation) { ) public static BandPart create(Scope scope, Operand input, Operand numLower, Operand numUpper) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixBandPart", scope.makeOpName("BandPart")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BandPart")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(numLower.asOutput()); opBuilder.addInput(numUpper.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java index cc7df0281c9..144660c2053 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java @@ -60,7 +60,7 @@ private BandedTriangularSolve(Operation operation) { ) public static BandedTriangularSolve create(Scope scope, Operand matrix, Operand rhs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BandedTriangularSolve", scope.makeOpName("BandedTriangularSolve")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BandedTriangularSolve")); opBuilder.addInput(matrix.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java index af22fb9c574..9be5326dbf3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java @@ -61,7 +61,7 @@ private BatchCholesky(Operation operation) { describeByClass = true ) public static BatchCholesky create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchCholesky", scope.makeOpName("BatchCholesky")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchCholesky")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchCholesky<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java index 50ad2bae38a..52f1bef7273 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java @@ -63,7 +63,7 @@ private BatchCholeskyGrad(Operation operation) { ) public static BatchCholeskyGrad create(Scope scope, Operand l, Operand grad) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchCholeskyGrad", scope.makeOpName("BatchCholeskyGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchCholeskyGrad")); opBuilder.addInput(l.asOutput()); opBuilder.addInput(grad.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java index 4fc2f6f262a..bd756ae0cd9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java @@ -65,7 +65,7 @@ private BatchMatrixBandPart(Operation operation) { ) public static BatchMatrixBandPart create(Scope scope, Operand input, Operand numLower, Operand numUpper) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixBandPart", scope.makeOpName("BatchMatrixBandPart")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixBandPart")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(numLower.asOutput()); opBuilder.addInput(numUpper.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java index 72ff4326b11..f5f06c0557e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java @@ -61,7 +61,7 @@ private BatchMatrixDeterminant(Operation operation) { describeByClass = true ) public static BatchMatrixDeterminant create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixDeterminant", scope.makeOpName("BatchMatrixDeterminant")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixDeterminant")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchMatrixDeterminant<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java index 09395a2925a..e79c4e6359a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java @@ -61,7 +61,7 @@ private BatchMatrixDiag(Operation operation) { describeByClass = true ) public static BatchMatrixDiag create(Scope scope, Operand diagonal) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixDiag", scope.makeOpName("BatchMatrixDiag")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixDiag")); opBuilder.addInput(diagonal.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchMatrixDiag<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java index f438faf728e..068e42fc02d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java @@ -61,7 +61,7 @@ private BatchMatrixDiagPart(Operation operation) { describeByClass = true ) public static BatchMatrixDiagPart create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixDiagPart", scope.makeOpName("BatchMatrixDiagPart")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixDiagPart")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchMatrixDiagPart<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java index 6e99d4a575b..d50d411971a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java @@ -63,7 +63,7 @@ private BatchMatrixInverse(Operation operation) { ) public static BatchMatrixInverse create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixInverse", scope.makeOpName("BatchMatrixInverse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixInverse")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java index 87a80dddc78..dc02831a296 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java @@ -63,7 +63,7 @@ private BatchMatrixSetDiag(Operation operation) { ) public static BatchMatrixSetDiag create(Scope scope, Operand input, Operand diagonal) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixSetDiag", scope.makeOpName("BatchMatrixSetDiag")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixSetDiag")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(diagonal.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java index fc9ea69fc10..9170246b78f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java @@ -64,7 +64,7 @@ private BatchMatrixSolve(Operation operation) { ) public static BatchMatrixSolve create(Scope scope, Operand matrix, Operand rhs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixSolve", scope.makeOpName("BatchMatrixSolve")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixSolve")); opBuilder.addInput(matrix.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java index a88d9999dd6..cd8011f1d83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java @@ -66,7 +66,7 @@ private BatchMatrixSolveLs(Operation operation) { ) public static BatchMatrixSolveLs create(Scope scope, Operand matrix, Operand rhs, Operand l2Regularizer, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixSolveLs", scope.makeOpName("BatchMatrixSolveLs")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixSolveLs")); opBuilder.addInput(matrix.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder.addInput(l2Regularizer.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java index beca75fc48c..ae3023e22a3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java @@ -64,7 +64,7 @@ private BatchMatrixTriangularSolve(Operation operation) { ) public static BatchMatrixTriangularSolve create(Scope scope, Operand matrix, Operand rhs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatrixTriangularSolve", scope.makeOpName("BatchMatrixTriangularSolve")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatrixTriangularSolve")); opBuilder.addInput(matrix.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java index 16b2f162576..bf624b3668e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java @@ -66,7 +66,7 @@ private BatchSelfAdjointEig(Operation operation) { ) public static BatchSelfAdjointEig create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchSelfAdjointEigV2", scope.makeOpName("BatchSelfAdjointEig")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchSelfAdjointEig")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java index 95600f6f7e1..3ef2cb277df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java @@ -69,7 +69,7 @@ private BatchSvd(Operation operation) { ) public static BatchSvd create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchSvd", scope.makeOpName("BatchSvd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchSvd")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java index 09ac253370d..ab4ee760183 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java @@ -71,7 +71,7 @@ private Cholesky(Operation operation) { describeByClass = true ) public static Cholesky create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("Cholesky", scope.makeOpName("Cholesky")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Cholesky")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Cholesky<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java index be0fcc6fe71..dcd90da3041 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java @@ -69,7 +69,7 @@ private CholeskyGrad(Operation operation) { ) public static CholeskyGrad create(Scope scope, Operand l, Operand grad) { - OperationBuilder opBuilder = scope.env().opBuilder("CholeskyGrad", scope.makeOpName("CholeskyGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CholeskyGrad")); opBuilder.addInput(l.asOutput()); opBuilder.addInput(grad.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java index 622f1e2469e..0bbcbbb88d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java @@ -67,7 +67,7 @@ private ConjugateTranspose(Operation operation) { ) public static ConjugateTranspose create(Scope scope, Operand x, Operand perm) { - OperationBuilder opBuilder = scope.env().opBuilder("ConjugateTranspose", scope.makeOpName("ConjugateTranspose")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ConjugateTranspose")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(perm.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java index 004c88358fd..3a61a137e51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java @@ -65,7 +65,7 @@ private Cross(Operation operation) { describeByClass = true ) public static Cross create(Scope scope, Operand a, Operand b) { - OperationBuilder opBuilder = scope.env().opBuilder("Cross", scope.makeOpName("Cross")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Cross")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java index 36f48f9383b..7047cb8d97f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java @@ -64,7 +64,7 @@ private Det(Operation operation) { describeByClass = true ) public static Det create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixDeterminant", scope.makeOpName("Det")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Det")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Det<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java index bb7ab87c4a0..b1cce520076 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java @@ -78,7 +78,7 @@ private Eig(Operation operation) { ) public static Eig create(Scope scope, Operand input, Class Tout, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Eig", scope.makeOpName("Eig")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Eig")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Tout", Operands.toDataType(Tout)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java index b9b334441ec..51ff55b64a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java @@ -127,7 +127,7 @@ private Einsum(Operation operation) { ) public static Einsum create(Scope scope, Iterable> inputs, String equation) { - OperationBuilder opBuilder = scope.env().opBuilder("Einsum", scope.makeOpName("Einsum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Einsum")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("equation", equation); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java index befba75f3c2..911650b50ea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java @@ -70,7 +70,7 @@ private EuclideanNorm(Operation operation) { ) public static EuclideanNorm create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EuclideanNorm", scope.makeOpName("EuclideanNorm")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EuclideanNorm")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java index ca8f65ed27f..67f6ab7895f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java @@ -69,7 +69,7 @@ private Inv(Operation operation) { describeByClass = true ) public static Inv create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixInverse", scope.makeOpName("Inv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Inv")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LoadAndRemapMatrix.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LoadAndRemapMatrix.java index 854ea4b2e51..f7e1d8214c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LoadAndRemapMatrix.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LoadAndRemapMatrix.java @@ -106,7 +106,7 @@ private LoadAndRemapMatrix(Operation operation) { public static LoadAndRemapMatrix create(Scope scope, Operand ckptPath, Operand oldTensorName, Operand rowRemapping, Operand colRemapping, Operand initializingValues, Long numRows, Long numCols, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadAndRemapMatrix", scope.makeOpName("LoadAndRemapMatrix")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadAndRemapMatrix")); opBuilder.addInput(ckptPath.asOutput()); opBuilder.addInput(oldTensorName.asOutput()); opBuilder.addInput(rowRemapping.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java index 26efa559281..a5374bb74f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java @@ -72,7 +72,7 @@ private LogMatrixDeterminant(Operation operation) { describeByClass = true ) public static LogMatrixDeterminant create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("LogMatrixDeterminant", scope.makeOpName("LogMatrixDeterminant")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LogMatrixDeterminant")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new LogMatrixDeterminant<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java index cc6adce3d1c..69be99d1411 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java @@ -86,7 +86,7 @@ private Lu(Operation operation) { ) public static Lu create(Scope scope, Operand input, Class outputIdxType) { - OperationBuilder opBuilder = scope.env().opBuilder("Lu", scope.makeOpName("Lu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Lu")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_idx_type", Operands.toDataType(outputIdxType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java index a82e456fa7c..5e67b7a3c4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java @@ -70,7 +70,7 @@ private MatMul(Operation operation) { ) public static MatMul create(Scope scope, Operand a, Operand b, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MatMul", scope.makeOpName("MatMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatMul")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java index b435a8f3ee3..cf9a8b9b4f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java @@ -156,7 +156,7 @@ private MatrixDiag(Operation operation) { public static MatrixDiag create(Scope scope, Operand diagonal, Operand k, Operand numRows, Operand numCols, Operand paddingValue) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixDiagV2", scope.makeOpName("MatrixDiag")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatrixDiag")); opBuilder.addInput(diagonal.asOutput()); opBuilder.addInput(k.asOutput()); opBuilder.addInput(numRows.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java index 7b05239e769..4e61470cebd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java @@ -129,7 +129,7 @@ private MatrixDiagPart(Operation operation) { ) public static MatrixDiagPart create(Scope scope, Operand input, Operand k, Operand paddingValue) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixDiagPartV2", scope.makeOpName("MatrixDiagPart")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatrixDiagPart")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(k.asOutput()); opBuilder.addInput(paddingValue.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java index fdf06598984..53c442dfb66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java @@ -160,7 +160,7 @@ private MatrixDiagPartV3(Operation operation) { ) public static MatrixDiagPartV3 create(Scope scope, Operand input, Operand k, Operand paddingValue, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixDiagPartV3", scope.makeOpName("MatrixDiagPartV3")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatrixDiagPartV3")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(k.asOutput()); opBuilder.addInput(paddingValue.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java index 3a118c142d4..22b39d750d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java @@ -185,7 +185,7 @@ private MatrixDiagV3(Operation operation) { public static MatrixDiagV3 create(Scope scope, Operand diagonal, Operand k, Operand numRows, Operand numCols, Operand paddingValue, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixDiagV3", scope.makeOpName("MatrixDiagV3")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatrixDiagV3")); opBuilder.addInput(diagonal.asOutput()); opBuilder.addInput(k.asOutput()); opBuilder.addInput(numRows.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java index bcc4f514762..7d6f981d59c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java @@ -68,7 +68,7 @@ private MatrixLogarithm(Operation operation) { describeByClass = true ) public static MatrixLogarithm create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixLogarithm", scope.makeOpName("MatrixLogarithm")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatrixLogarithm")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new MatrixLogarithm<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java index 61142a0228a..041fc78abab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java @@ -166,7 +166,7 @@ private MatrixSetDiag(Operation operation) { ) public static MatrixSetDiag create(Scope scope, Operand input, Operand diagonal, Operand k, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixSetDiagV3", scope.makeOpName("MatrixSetDiag")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatrixSetDiag")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(diagonal.asOutput()); opBuilder.addInput(k.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java index c68a78bd3ee..c838da66ca4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java @@ -99,7 +99,7 @@ private MatrixSolveLs(Operation operation) { ) public static MatrixSolveLs create(Scope scope, Operand matrix, Operand rhs, Operand l2Regularizer, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixSolveLs", scope.makeOpName("MatrixSolveLs")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MatrixSolveLs")); opBuilder.addInput(matrix.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder.addInput(l2Regularizer.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java index 513f2d3baef..bb0276b4fef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java @@ -78,7 +78,7 @@ private Qr(Operation operation) { describeByClass = true ) public static Qr create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Qr", scope.makeOpName("Qr")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Qr")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java index 30b464c48b4..87131aada14 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java @@ -86,7 +86,7 @@ public static QuantizedMatMul create(S Operand a, Operand b, Operand minA, Operand maxA, Operand minB, Operand maxB, Class Toutput, Class Tactivation, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedMatMul", scope.makeOpName("QuantizedMatMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedMatMul")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder.addInput(minA.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java index b781b62d915..ae37f39c042 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java @@ -83,7 +83,7 @@ public static QuantizedMatMulWithBias create(Scope scope, Operand a, Operand b, Operand bias, Operand minA, Operand maxA, Operand minB, Operand maxB, Class Toutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedMatMulWithBias", scope.makeOpName("QuantizedMatMulWithBias")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedMatMulWithBias")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java index 8a45662ec0a..b5a9c0adb82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java @@ -84,7 +84,7 @@ public static QuantizedMatMulWithBiasAndRelu create(Scope Operand a, Operand b, Operand bias, Operand minA, Operand maxA, Operand minB, Operand maxB, Class Toutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedMatMulWithBiasAndRelu", scope.makeOpName("QuantizedMatMulWithBiasAndRelu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedMatMulWithBiasAndRelu")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java index 65415fbfd3c..4d53f20eab9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java @@ -88,7 +88,7 @@ public static QuantizedMatMulWithBiasAndReluAndRequantize Operand bias, Operand minA, Operand maxA, Operand minB, Operand maxB, Operand minFreezedOutput, Operand maxFreezedOutput, Class Toutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedMatMulWithBiasAndReluAndRequantize", scope.makeOpName("QuantizedMatMulWithBiasAndReluAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedMatMulWithBiasAndReluAndRequantize")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java index d01b4f3b829..51415f87a2c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java @@ -76,7 +76,7 @@ private SelfAdjointEig(Operation operation) { ) public static SelfAdjointEig create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SelfAdjointEigV2", scope.makeOpName("SelfAdjointEig")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SelfAdjointEig")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java index 2fe29966804..b6eb29e4857 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java @@ -70,7 +70,7 @@ private Solve(Operation operation) { ) public static Solve create(Scope scope, Operand matrix, Operand rhs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixSolve", scope.makeOpName("Solve")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Solve")); opBuilder.addInput(matrix.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java index 3b09ba5ce39..cfff7ae9494 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java @@ -73,7 +73,7 @@ private Sqrtm(Operation operation) { describeByClass = true ) public static Sqrtm create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixSquareRoot", scope.makeOpName("Sqrtm")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sqrtm")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Sqrtm<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java index ed2ae52d604..5f1328bdfa7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java @@ -79,7 +79,7 @@ private Svd(Operation operation) { describeByClass = true ) public static Svd create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Svd", scope.makeOpName("Svd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Svd")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java index 29f5a142746..093c4c7e27a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java @@ -74,7 +74,7 @@ private TensorDiag(Operation operation) { describeByClass = true ) public static TensorDiag create(Scope scope, Operand diagonal) { - OperationBuilder opBuilder = scope.env().opBuilder("Diag", scope.makeOpName("TensorDiag")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorDiag")); opBuilder.addInput(diagonal.asOutput()); opBuilder = scope.apply(opBuilder); return new TensorDiag<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java index 0ec6ee1c92b..42dc603d1f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java @@ -75,7 +75,7 @@ private TensorDiagPart(Operation operation) { describeByClass = true ) public static TensorDiagPart create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("DiagPart", scope.makeOpName("TensorDiagPart")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorDiagPart")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new TensorDiagPart<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java index ecec93ee1b2..46703ae3a66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java @@ -66,7 +66,7 @@ private Transpose(Operation operation) { ) public static Transpose create(Scope scope, Operand x, Operand perm) { - OperationBuilder opBuilder = scope.env().opBuilder("Transpose", scope.makeOpName("Transpose")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Transpose")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(perm.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java index 63d24dd8554..b2a0047efbc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java @@ -106,7 +106,7 @@ private TriangularSolve(Operation operation) { ) public static TriangularSolve create(Scope scope, Operand matrix, Operand rhs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MatrixTriangularSolve", scope.makeOpName("TriangularSolve")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TriangularSolve")); opBuilder.addInput(matrix.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java index c7e17d347e2..d15c3c7f7fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java @@ -66,7 +66,7 @@ private TridiagonalMatMul(Operation operation) { ) public static TridiagonalMatMul create(Scope scope, Operand superdiag, Operand maindiag, Operand subdiag, Operand rhs) { - OperationBuilder opBuilder = scope.env().opBuilder("TridiagonalMatMul", scope.makeOpName("TridiagonalMatMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TridiagonalMatMul")); opBuilder.addInput(superdiag.asOutput()); opBuilder.addInput(maindiag.asOutput()); opBuilder.addInput(subdiag.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java index f2e37cd217b..04f5d95a4cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java @@ -71,7 +71,7 @@ private TridiagonalSolve(Operation operation) { ) public static TridiagonalSolve create(Scope scope, Operand diagonals, Operand rhs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TridiagonalSolve", scope.makeOpName("TridiagonalSolve")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TridiagonalSolve")); opBuilder.addInput(diagonals.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java index ea4b548550d..cab58a5bb71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java @@ -70,7 +70,7 @@ private CSRSparseMatrixComponents(Operation operation) { ) public static CSRSparseMatrixComponents create(Scope scope, Operand csrSparseMatrix, Operand index, Class type) { - OperationBuilder opBuilder = scope.env().opBuilder("CSRSparseMatrixComponents", scope.makeOpName("CSRSparseMatrixComponents")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CSRSparseMatrixComponents")); opBuilder.addInput(csrSparseMatrix.asOutput()); opBuilder.addInput(index.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java index cc8ac477876..cb992b01b6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java @@ -60,7 +60,7 @@ private CSRSparseMatrixToDense(Operation operation) { ) public static CSRSparseMatrixToDense create(Scope scope, Operand sparseInput, Class type) { - OperationBuilder opBuilder = scope.env().opBuilder("CSRSparseMatrixToDense", scope.makeOpName("CSRSparseMatrixToDense")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CSRSparseMatrixToDense")); opBuilder.addInput(sparseInput.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("type", Operands.toDataType(type)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java index 553480c9462..0fa5e61e95c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java @@ -67,7 +67,7 @@ private CSRSparseMatrixToSparseTensor(Operation operation) { ) public static CSRSparseMatrixToSparseTensor create(Scope scope, Operand sparseMatrix, Class type) { - OperationBuilder opBuilder = scope.env().opBuilder("CSRSparseMatrixToSparseTensor", scope.makeOpName("CSRSparseMatrixToSparseTensor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CSRSparseMatrixToSparseTensor")); opBuilder.addInput(sparseMatrix.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("type", Operands.toDataType(type)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java index 982450f9174..85698b15126 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java @@ -58,7 +58,7 @@ private DenseToCSRSparseMatrix(Operation operation) { ) public static DenseToCSRSparseMatrix create(Scope scope, Operand denseInput, Operand indices) { - OperationBuilder opBuilder = scope.env().opBuilder("DenseToCSRSparseMatrix", scope.makeOpName("DenseToCSRSparseMatrix")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DenseToCSRSparseMatrix")); opBuilder.addInput(denseInput.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java index 2aa740b9c1c..4a27f1c657e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java @@ -62,7 +62,7 @@ private SparseMatrixAdd(Operation operation) { ) public static SparseMatrixAdd create(Scope scope, Operand a, Operand b, Operand alpha, Operand beta) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixAdd", scope.makeOpName("SparseMatrixAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixAdd")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder.addInput(alpha.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java index 021e86bf4d1..dba8e84ea2e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java @@ -81,7 +81,7 @@ private SparseMatrixMatMul(Operation operation) { ) public static SparseMatrixMatMul create(Scope scope, Operand a, Operand b, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixMatMul", scope.makeOpName("SparseMatrixMatMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixMatMul")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMul.java index 4a9591186f0..1c276673459 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMul.java @@ -63,7 +63,7 @@ private SparseMatrixMul(Operation operation) { ) public static SparseMatrixMul create(Scope scope, Operand a, Operand b) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixMul", scope.makeOpName("SparseMatrixMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixMul")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java index 25bcb71d722..8522899df3e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java @@ -55,7 +55,7 @@ private SparseMatrixNNZ(Operation operation) { describeByClass = true ) public static SparseMatrixNNZ create(Scope scope, Operand sparseMatrix) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixNNZ", scope.makeOpName("SparseMatrixNNZ")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixNNZ")); opBuilder.addInput(sparseMatrix.asOutput()); opBuilder = scope.apply(opBuilder); return new SparseMatrixNNZ(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java index 2e3c95d2db8..7e267045538 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java @@ -96,7 +96,7 @@ private SparseMatrixOrderingAMD(Operation operation) { describeByClass = true ) public static SparseMatrixOrderingAMD create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixOrderingAMD", scope.makeOpName("SparseMatrixOrderingAMD")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixOrderingAMD")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new SparseMatrixOrderingAMD(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java index c7edeac6eca..339653ff4d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java @@ -64,7 +64,7 @@ private SparseMatrixSoftmax(Operation operation) { ) public static SparseMatrixSoftmax create(Scope scope, Operand logits, Class type) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixSoftmax", scope.makeOpName("SparseMatrixSoftmax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixSoftmax")); opBuilder.addInput(logits.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("type", Operands.toDataType(type)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java index b420c86cfa7..390e4b704f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java @@ -61,7 +61,7 @@ private SparseMatrixSoftmaxGrad(Operation operation) { ) public static SparseMatrixSoftmaxGrad create(Scope scope, Operand softmax, Operand gradSoftmax, Class type) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixSoftmaxGrad", scope.makeOpName("SparseMatrixSoftmaxGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixSoftmaxGrad")); opBuilder.addInput(softmax.asOutput()); opBuilder.addInput(gradSoftmax.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java index bfd4c8ad6a1..edaab7eb54b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java @@ -125,7 +125,7 @@ private SparseMatrixSparseCholesky(Operation operation) { ) public static SparseMatrixSparseCholesky create(Scope scope, Operand input, Operand permutation, Class type) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixSparseCholesky", scope.makeOpName("SparseMatrixSparseCholesky")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixSparseCholesky")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(permutation.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java index 22b6bac7a83..87995a7e78e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java @@ -127,7 +127,7 @@ private SparseMatrixSparseMatMul(Operation operation) { ) public static SparseMatrixSparseMatMul create(Scope scope, Operand a, Operand b, Class type, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixSparseMatMul", scope.makeOpName("SparseMatrixSparseMatMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixSparseMatMul")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java index 29432e58cf0..8ad12d3289e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java @@ -62,7 +62,7 @@ private SparseMatrixTranspose(Operation operation) { ) public static SparseMatrixTranspose create(Scope scope, Operand input, Class type, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixTranspose", scope.makeOpName("SparseMatrixTranspose")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixTranspose")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("type", Operands.toDataType(type)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java index 81651bcd0f6..e7f0a48e1aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java @@ -60,7 +60,7 @@ private SparseMatrixZeros(Operation operation) { ) public static SparseMatrixZeros create(Scope scope, Operand denseShape, Class type) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatrixZeros", scope.makeOpName("SparseMatrixZeros")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatrixZeros")); opBuilder.addInput(denseShape.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("type", Operands.toDataType(type)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java index 357549e364e..12f27f6ebea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java @@ -59,7 +59,7 @@ private SparseTensorToCSRSparseMatrix(Operation operation) { ) public static SparseTensorToCSRSparseMatrix create(Scope scope, Operand indices, Operand values, Operand denseShape) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseTensorToCSRSparseMatrix", scope.makeOpName("SparseTensorToCSRSparseMatrix")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseTensorToCSRSparseMatrix")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(denseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java index f29c1aedd64..249b2567578 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java @@ -64,7 +64,7 @@ private Abs(Operation operation) { describeByClass = true ) public static Abs create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Abs", scope.makeOpName("Abs")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Abs")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Abs<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java index 11beca2b5a9..2e314a940b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java @@ -71,7 +71,7 @@ private AccumulateN(Operation operation) { ) public static AccumulateN create(Scope scope, Iterable> inputs, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("AccumulateNV2", scope.makeOpName("AccumulateN")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AccumulateN")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java index d25c76a840f..063b23c388a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java @@ -63,7 +63,7 @@ private Acos(Operation operation) { describeByClass = true ) public static Acos create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Acos", scope.makeOpName("Acos")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Acos")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Acos<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java index a6170975930..37594adbbf2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java @@ -67,7 +67,7 @@ private Acosh(Operation operation) { describeByClass = true ) public static Acosh create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Acosh", scope.makeOpName("Acosh")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Acosh")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Acosh<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java index a7072481217..a1269762893 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java @@ -66,7 +66,7 @@ private Add(Operation operation) { describeByClass = true ) public static Add create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Add", scope.makeOpName("Add")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Add")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java index 0519af46cb5..664e648d2ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java @@ -67,7 +67,7 @@ private AddN(Operation operation) { describeByClass = true ) public static AddN create(Scope scope, Iterable> inputs) { - OperationBuilder opBuilder = scope.env().opBuilder("AddN", scope.makeOpName("AddN")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AddN")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); return new AddN<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java index 7a7a059538c..7f251ea8f65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java @@ -79,7 +79,7 @@ private Angle(Operation operation) { ) public static Angle create(Scope scope, Operand input, Class Tout) { - OperationBuilder opBuilder = scope.env().opBuilder("Angle", scope.makeOpName("Angle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Angle")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Tout", Operands.toDataType(Tout)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ApproximateEqual.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ApproximateEqual.java index 5da4254ff88..43972eefec5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ApproximateEqual.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ApproximateEqual.java @@ -63,7 +63,7 @@ private ApproximateEqual(Operation operation) { ) public static ApproximateEqual create(Scope scope, Operand x, Operand y, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApproximateEqual", scope.makeOpName("ApproximateEqual")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApproximateEqual")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java index 3e9308f1a06..5e66b0e5077 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java @@ -79,7 +79,7 @@ private ArgMax(Operation operation) { ) public static ArgMax create(Scope scope, Operand input, Operand dimension, Class outputType) { - OperationBuilder opBuilder = scope.env().opBuilder("ArgMax", scope.makeOpName("ArgMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ArgMax")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(dimension.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java index d72605944fc..d477cc981a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java @@ -79,7 +79,7 @@ private ArgMin(Operation operation) { ) public static ArgMin create(Scope scope, Operand input, Operand dimension, Class outputType) { - OperationBuilder opBuilder = scope.env().opBuilder("ArgMin", scope.makeOpName("ArgMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ArgMin")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(dimension.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java index 600d865afed..d3f30e93021 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java @@ -73,7 +73,7 @@ private Asin(Operation operation) { describeByClass = true ) public static Asin create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Asin", scope.makeOpName("Asin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Asin")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Asin<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java index f8b6fd84adc..b9e925d448a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java @@ -68,7 +68,7 @@ private Asinh(Operation operation) { describeByClass = true ) public static Asinh create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Asinh", scope.makeOpName("Asinh")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Asinh")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Asinh<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java index 85a6a0ef221..ca2794cd466 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java @@ -73,7 +73,7 @@ private Atan(Operation operation) { describeByClass = true ) public static Atan create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Atan", scope.makeOpName("Atan")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Atan")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Atan<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java index 822425ac47a..d89e5ab0e09 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java @@ -67,7 +67,7 @@ private Atan2(Operation operation) { describeByClass = true ) public static Atan2 create(Scope scope, Operand y, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Atan2", scope.makeOpName("Atan2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Atan2")); opBuilder.addInput(y.asOutput()); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java index 55bb3b613b4..69467ed3e57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java @@ -70,7 +70,7 @@ private Atanh(Operation operation) { describeByClass = true ) public static Atanh create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Atanh", scope.makeOpName("Atanh")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Atanh")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Atanh<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java index 3c906b94eb0..455e0d56b2c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java @@ -57,7 +57,7 @@ private BesselI0(Operation operation) { describeByClass = true ) public static BesselI0 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselI0", scope.makeOpName("BesselI0")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselI0")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselI0<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java index be97c5a5715..f5d69b5ab87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java @@ -57,7 +57,7 @@ private BesselI0e(Operation operation) { describeByClass = true ) public static BesselI0e create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselI0e", scope.makeOpName("BesselI0e")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselI0e")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselI0e<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java index 9b5c2e38932..a31a2245a90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java @@ -57,7 +57,7 @@ private BesselI1(Operation operation) { describeByClass = true ) public static BesselI1 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselI1", scope.makeOpName("BesselI1")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselI1")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselI1<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java index e79790decfd..181f9738b63 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java @@ -57,7 +57,7 @@ private BesselI1e(Operation operation) { describeByClass = true ) public static BesselI1e create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselI1e", scope.makeOpName("BesselI1e")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselI1e")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselI1e<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java index c7ba55995a5..ca2b90c3976 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java @@ -70,7 +70,7 @@ private Betainc(Operation operation) { ) public static Betainc create(Scope scope, Operand a, Operand b, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Betainc", scope.makeOpName("Betainc")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Betainc")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder.addInput(x.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java index aba2671e133..ad4b7beca9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java @@ -73,7 +73,7 @@ private Bincount(Operation operation) { ) public static Bincount create(Scope scope, Operand arr, Operand sizeOutput, Operand weights) { - OperationBuilder opBuilder = scope.env().opBuilder("Bincount", scope.makeOpName("Bincount")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Bincount")); opBuilder.addInput(arr.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder.addInput(weights.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java index e77f5356233..0f2d592a623 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java @@ -61,7 +61,7 @@ private Ceil(Operation operation) { describeByClass = true ) public static Ceil create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Ceil", scope.makeOpName("Ceil")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Ceil")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Ceil<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CompareAndBitpack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CompareAndBitpack.java index 06c8f028453..f3330443424 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CompareAndBitpack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CompareAndBitpack.java @@ -81,7 +81,7 @@ private CompareAndBitpack(Operation operation) { ) public static CompareAndBitpack create(Scope scope, Operand input, Operand threshold) { - OperationBuilder opBuilder = scope.env().opBuilder("CompareAndBitpack", scope.makeOpName("CompareAndBitpack")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CompareAndBitpack")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(threshold.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java index 5f9dc2e62f6..ba0f1015f54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java @@ -70,7 +70,7 @@ private ComplexAbs(Operation operation) { ) public static ComplexAbs create(Scope scope, Operand x, Class Tout) { - OperationBuilder opBuilder = scope.env().opBuilder("ComplexAbs", scope.makeOpName("ComplexAbs")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ComplexAbs")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Tout", Operands.toDataType(Tout)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java index 3d009360a25..504d351b494 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java @@ -71,7 +71,7 @@ private Conj(Operation operation) { describeByClass = true ) public static Conj create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("Conj", scope.makeOpName("Conj")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Conj")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Conj<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java index 84a1665786b..d4e329e1cd6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java @@ -69,7 +69,7 @@ private Cos(Operation operation) { describeByClass = true ) public static Cos create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Cos", scope.makeOpName("Cos")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Cos")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Cos<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java index 92ff056d059..7ce9f8ec55a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java @@ -68,7 +68,7 @@ private Cosh(Operation operation) { describeByClass = true ) public static Cosh create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Cosh", scope.makeOpName("Cosh")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Cosh")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Cosh<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java index f4c6602ec56..39044e4d6bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java @@ -88,7 +88,7 @@ private Cumprod(Operation operation) { ) public static Cumprod create(Scope scope, Operand x, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Cumprod", scope.makeOpName("Cumprod")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Cumprod")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java index fed21ea095b..b1e69cd3416 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java @@ -88,7 +88,7 @@ private Cumsum(Operation operation) { ) public static Cumsum create(Scope scope, Operand x, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Cumsum", scope.makeOpName("Cumsum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Cumsum")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java index cb240aa3f7b..52953db453a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java @@ -77,7 +77,7 @@ private CumulativeLogsumexp(Operation operation) { ) public static CumulativeLogsumexp create(Scope scope, Operand x, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CumulativeLogsumexp", scope.makeOpName("CumulativeLogsumexp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CumulativeLogsumexp")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java index 542464e1daa..b58d5cc1c43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java @@ -74,7 +74,7 @@ private DenseBincount(Operation operation) { ) public static DenseBincount create(Scope scope, Operand input, Operand sizeOutput, Operand weights, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DenseBincount", scope.makeOpName("DenseBincount")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DenseBincount")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder.addInput(weights.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java index 3b9bdcb8cc4..5903fefec40 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java @@ -62,7 +62,7 @@ private Digamma(Operation operation) { describeByClass = true ) public static Digamma create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Digamma", scope.makeOpName("Digamma")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Digamma")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Digamma<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java index 97ec0fb4ac8..3f62060e9be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java @@ -64,7 +64,7 @@ private Div(Operation operation) { describeByClass = true ) public static Div create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Div", scope.makeOpName("Div")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Div")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java index 1fd6eb73e25..ba8d8501b54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java @@ -64,7 +64,7 @@ private DivNoNan(Operation operation) { describeByClass = true ) public static DivNoNan create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("DivNoNan", scope.makeOpName("DivNoNan")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DivNoNan")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Equal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Equal.java index c36a207d014..91168cdab7f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Equal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Equal.java @@ -74,7 +74,7 @@ private Equal(Operation operation) { ) public static Equal create(Scope scope, Operand x, Operand y, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Equal", scope.makeOpName("Equal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Equal")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java index 37a0cbba878..f43df0cf45e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java @@ -61,7 +61,7 @@ private Erf(Operation operation) { describeByClass = true ) public static Erf create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Erf", scope.makeOpName("Erf")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Erf")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Erf<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java index c36514488e2..050212be504 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java @@ -61,7 +61,7 @@ private Erfc(Operation operation) { describeByClass = true ) public static Erfc create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Erfc", scope.makeOpName("Erfc")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Erfc")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Erfc<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java index d983f91e8f0..e48ba3892ea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java @@ -82,7 +82,7 @@ private Exp(Operation operation) { describeByClass = true ) public static Exp create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Exp", scope.makeOpName("Exp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Exp")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Exp<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java index f8d20f833a8..1fe5bb210f3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java @@ -73,7 +73,7 @@ private Expm1(Operation operation) { describeByClass = true ) public static Expm1 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Expm1", scope.makeOpName("Expm1")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Expm1")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Expm1<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Fact.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Fact.java index a4e3fc96366..5c41d484491 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Fact.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Fact.java @@ -57,7 +57,7 @@ private Fact(Operation operation) { describeByClass = true ) public static Fact create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("Fact", scope.makeOpName("Fact")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Fact")); opBuilder = scope.apply(opBuilder); return new Fact(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java index fdca33b84eb..759e8ad4862 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java @@ -61,7 +61,7 @@ private Floor(Operation operation) { describeByClass = true ) public static Floor create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Floor", scope.makeOpName("Floor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Floor")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Floor<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java index 4dfca58ce59..07da210d584 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java @@ -64,7 +64,7 @@ private FloorDiv(Operation operation) { describeByClass = true ) public static FloorDiv create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("FloorDiv", scope.makeOpName("FloorDiv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FloorDiv")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java index 43ac259ac8d..a3692d20df6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java @@ -66,7 +66,7 @@ private FloorMod(Operation operation) { describeByClass = true ) public static FloorMod create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("FloorMod", scope.makeOpName("FloorMod")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FloorMod")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Greater.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Greater.java index e79a13cbac4..9929569d99d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Greater.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Greater.java @@ -73,7 +73,7 @@ private Greater(Operation operation) { describeByClass = true ) public static Greater create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Greater", scope.makeOpName("Greater")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Greater")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/GreaterEqual.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/GreaterEqual.java index 6c49da75fae..26ae03084b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/GreaterEqual.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/GreaterEqual.java @@ -73,7 +73,7 @@ private GreaterEqual(Operation operation) { describeByClass = true ) public static GreaterEqual create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("GreaterEqual", scope.makeOpName("GreaterEqual")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GreaterEqual")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java index e03507c1be5..0a2201229e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java @@ -69,7 +69,7 @@ private Igamma(Operation operation) { describeByClass = true ) public static Igamma create(Scope scope, Operand a, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Igamma", scope.makeOpName("Igamma")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Igamma")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java index fd31d8cd394..1b2a0401952 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java @@ -58,7 +58,7 @@ private IgammaGradA(Operation operation) { describeByClass = true ) public static IgammaGradA create(Scope scope, Operand a, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("IgammaGradA", scope.makeOpName("IgammaGradA")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IgammaGradA")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java index d2a517e65c2..17966df3b73 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java @@ -69,7 +69,7 @@ private Igammac(Operation operation) { describeByClass = true ) public static Igammac create(Scope scope, Operand a, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Igammac", scope.makeOpName("Igammac")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Igammac")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java index 6b873186138..6d1bf220734 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java @@ -75,7 +75,7 @@ private Imag(Operation operation) { ) public static Imag create(Scope scope, Operand input, Class Tout) { - OperationBuilder opBuilder = scope.env().opBuilder("Imag", scope.makeOpName("Imag")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Imag")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Tout", Operands.toDataType(Tout)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java index 49a6a2574ba..ae6346a3bc3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java @@ -72,7 +72,7 @@ private InvertPermutation(Operation operation) { describeByClass = true ) public static InvertPermutation create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("InvertPermutation", scope.makeOpName("InvertPermutation")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InvertPermutation")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new InvertPermutation<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsFinite.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsFinite.java index ddd2f552f29..74fb4880e01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsFinite.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsFinite.java @@ -67,7 +67,7 @@ private IsFinite(Operation operation) { describeByClass = true ) public static IsFinite create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("IsFinite", scope.makeOpName("IsFinite")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IsFinite")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new IsFinite(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsInf.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsInf.java index 9e77a33cfbe..bb51b1ade20 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsInf.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsInf.java @@ -67,7 +67,7 @@ private IsInf(Operation operation) { describeByClass = true ) public static IsInf create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("IsInf", scope.makeOpName("IsInf")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IsInf")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new IsInf(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsNan.java index d760997cce2..b158338a052 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsNan.java @@ -67,7 +67,7 @@ private IsNan(Operation operation) { describeByClass = true ) public static IsNan create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("IsNan", scope.makeOpName("IsNan")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IsNan")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new IsNan(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Less.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Less.java index 2526b390242..89b264d2a78 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Less.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Less.java @@ -73,7 +73,7 @@ private Less(Operation operation) { describeByClass = true ) public static Less create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Less", scope.makeOpName("Less")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Less")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LessEqual.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LessEqual.java index ea46f504b0d..c8861aeff31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LessEqual.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LessEqual.java @@ -73,7 +73,7 @@ private LessEqual(Operation operation) { describeByClass = true ) public static LessEqual create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("LessEqual", scope.makeOpName("LessEqual")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LessEqual")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java index 561f491b09d..8afb9c307fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java @@ -68,7 +68,7 @@ private Lgamma(Operation operation) { describeByClass = true ) public static Lgamma create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Lgamma", scope.makeOpName("Lgamma")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Lgamma")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Lgamma<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java index 416279c57c6..a974b9f8478 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java @@ -67,7 +67,7 @@ private Log(Operation operation) { describeByClass = true ) public static Log create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Log", scope.makeOpName("Log")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Log")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Log<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java index 84a48b62611..bdccc675f90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java @@ -67,7 +67,7 @@ private Log1p(Operation operation) { describeByClass = true ) public static Log1p create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Log1p", scope.makeOpName("Log1p")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Log1p")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Log1p<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalAnd.java index dbbd7a94226..efa9c2a2379 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalAnd.java @@ -61,7 +61,7 @@ private LogicalAnd(Operation operation) { describeByClass = true ) public static LogicalAnd create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("LogicalAnd", scope.makeOpName("LogicalAnd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LogicalAnd")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalNot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalNot.java index 286e24ed931..2d5388f1bf0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalNot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalNot.java @@ -58,7 +58,7 @@ private LogicalNot(Operation operation) { describeByClass = true ) public static LogicalNot create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("LogicalNot", scope.makeOpName("LogicalNot")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LogicalNot")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new LogicalNot(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalOr.java index 016f936fe2e..ddd3fb8bd54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalOr.java @@ -61,7 +61,7 @@ private LogicalOr(Operation operation) { describeByClass = true ) public static LogicalOr create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("LogicalOr", scope.makeOpName("LogicalOr")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LogicalOr")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java index 05123a062fa..ecd43416bf7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java @@ -64,7 +64,7 @@ private Maximum(Operation operation) { describeByClass = true ) public static Maximum create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Maximum", scope.makeOpName("Maximum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Maximum")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java index b0c3a6ae37d..0f45a4a702a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java @@ -70,7 +70,7 @@ private Mean(Operation operation) { ) public static Mean create(Scope scope, Operand input, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Mean", scope.makeOpName("Mean")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Mean")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java index d8bb23fc36e..5acf98b24f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java @@ -64,7 +64,7 @@ private Minimum(Operation operation) { describeByClass = true ) public static Minimum create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Minimum", scope.makeOpName("Minimum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Minimum")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java index e4c80f108a2..078b0e8f4c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java @@ -66,7 +66,7 @@ private Mod(Operation operation) { describeByClass = true ) public static Mod create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Mod", scope.makeOpName("Mod")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Mod")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java index 3996906af1a..cbfaeb66151 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java @@ -64,7 +64,7 @@ private Mul(Operation operation) { describeByClass = true ) public static Mul create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Mul", scope.makeOpName("Mul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Mul")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java index 40d660ca27d..8c9e32db85e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java @@ -64,7 +64,7 @@ private MulNoNan(Operation operation) { describeByClass = true ) public static MulNoNan create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("MulNoNan", scope.makeOpName("MulNoNan")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MulNoNan")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java index 727c4f54ee0..925f0ab8db5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java @@ -61,7 +61,7 @@ private Ndtri(Operation operation) { describeByClass = true ) public static Ndtri create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Ndtri", scope.makeOpName("Ndtri")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Ndtri")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Ndtri<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java index dd0d280adec..085675cd65f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java @@ -62,7 +62,7 @@ private Neg(Operation operation) { describeByClass = true ) public static Neg create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Neg", scope.makeOpName("Neg")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Neg")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Neg<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java index 824652cd6d1..daf28601b65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java @@ -67,7 +67,7 @@ private NextAfter(Operation operation) { describeByClass = true ) public static NextAfter create(Scope scope, Operand x1, Operand x2) { - OperationBuilder opBuilder = scope.env().opBuilder("NextAfter", scope.makeOpName("NextAfter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NextAfter")); opBuilder.addInput(x1.asOutput()); opBuilder.addInput(x2.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NotEqual.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NotEqual.java index 1eb47231fd9..1291b1a8850 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NotEqual.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NotEqual.java @@ -65,7 +65,7 @@ private NotEqual(Operation operation) { ) public static NotEqual create(Scope scope, Operand x, Operand y, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("NotEqual", scope.makeOpName("NotEqual")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NotEqual")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java index 51755979c87..bbcd3bd9dad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java @@ -66,7 +66,7 @@ private Polygamma(Operation operation) { describeByClass = true ) public static Polygamma create(Scope scope, Operand a, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Polygamma", scope.makeOpName("Polygamma")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Polygamma")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/PopulationCount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/PopulationCount.java index a1bb46a9ec5..5c9f2c63137 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/PopulationCount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/PopulationCount.java @@ -64,7 +64,7 @@ private PopulationCount(Operation operation) { describeByClass = true ) public static PopulationCount create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("PopulationCount", scope.makeOpName("PopulationCount")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PopulationCount")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new PopulationCount(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java index 82ddfd03026..2f39d1da201 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java @@ -69,7 +69,7 @@ private Pow(Operation operation) { describeByClass = true ) public static Pow create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Pow", scope.makeOpName("Pow")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Pow")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java index bc1d4b07165..f6cdf0c8a13 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java @@ -77,7 +77,7 @@ private QuantizedAdd(Operation operation) { public static QuantizedAdd create(Scope scope, Operand x, Operand y, Operand minX, Operand maxX, Operand minY, Operand maxY, Class Toutput) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedAdd", scope.makeOpName("QuantizedAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedAdd")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder.addInput(minX.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java index cb69740807e..14d2ef8f4b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java @@ -77,7 +77,7 @@ private QuantizedMul(Operation operation) { public static QuantizedMul create(Scope scope, Operand x, Operand y, Operand minX, Operand maxX, Operand minY, Operand maxY, Class Toutput) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedMul", scope.makeOpName("QuantizedMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedMul")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder.addInput(minX.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java index 8c6f5a8eb8d..ec019f2899f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java @@ -75,7 +75,7 @@ private Real(Operation operation) { ) public static Real create(Scope scope, Operand input, Class Tout) { - OperationBuilder opBuilder = scope.env().opBuilder("Real", scope.makeOpName("Real")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Real")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Tout", Operands.toDataType(Tout)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java index 2bf500ef10a..6ca3a21c7f3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java @@ -65,7 +65,7 @@ private RealDiv(Operation operation) { describeByClass = true ) public static RealDiv create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RealDiv", scope.makeOpName("RealDiv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RealDiv")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java index 17caba787df..179c86160fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java @@ -62,7 +62,7 @@ private Reciprocal(Operation operation) { describeByClass = true ) public static Reciprocal create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Reciprocal", scope.makeOpName("Reciprocal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Reciprocal")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Reciprocal<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java index cc0ec84a0c5..bc95cef39c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java @@ -61,7 +61,7 @@ private ReciprocalGrad(Operation operation) { ) public static ReciprocalGrad create(Scope scope, Operand y, Operand dy) { - OperationBuilder opBuilder = scope.env().opBuilder("ReciprocalGrad", scope.makeOpName("ReciprocalGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReciprocalGrad")); opBuilder.addInput(y.asOutput()); opBuilder.addInput(dy.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java index 4ccf6718696..6c1539cee41 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java @@ -63,7 +63,7 @@ private RequantizationRangePerChannel(Operation operation) { ) public static RequantizationRangePerChannel create(Scope scope, Operand input, Operand inputMin, Operand inputMax, Float clipValueMax) { - OperationBuilder opBuilder = scope.env().opBuilder("RequantizationRangePerChannel", scope.makeOpName("RequantizationRangePerChannel")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RequantizationRangePerChannel")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); opBuilder.addInput(inputMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java index fb21eb1e38a..76c28bd2b87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java @@ -73,7 +73,7 @@ public static RequantizePerChannel create(Scope scope, Operand input, Operand inputMin, Operand inputMax, Operand requestedOutputMin, Operand requestedOutputMax, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("RequantizePerChannel", scope.makeOpName("RequantizePerChannel")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RequantizePerChannel")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); opBuilder.addInput(inputMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java index 1b481b2b613..4d139c2aed8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java @@ -69,7 +69,7 @@ private Rint(Operation operation) { describeByClass = true ) public static Rint create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Rint", scope.makeOpName("Rint")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Rint")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Rint<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java index b1c9d56e6a4..6198340ce3f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java @@ -63,7 +63,7 @@ private Round(Operation operation) { describeByClass = true ) public static Round create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Round", scope.makeOpName("Round")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Round")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Round<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java index 664dd6722c8..c5414bba1f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java @@ -62,7 +62,7 @@ private Rsqrt(Operation operation) { describeByClass = true ) public static Rsqrt create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Rsqrt", scope.makeOpName("Rsqrt")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Rsqrt")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Rsqrt<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java index 9391283211d..4537e79ef60 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java @@ -60,7 +60,7 @@ private RsqrtGrad(Operation operation) { describeByClass = true ) public static RsqrtGrad create(Scope scope, Operand y, Operand dy) { - OperationBuilder opBuilder = scope.env().opBuilder("RsqrtGrad", scope.makeOpName("RsqrtGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RsqrtGrad")); opBuilder.addInput(y.asOutput()); opBuilder.addInput(dy.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java index e480323f02b..65e7b648769 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java @@ -81,7 +81,7 @@ private SegmentMax(Operation operation) { ) public static SegmentMax create(Scope scope, Operand data, Operand segmentIds) { - OperationBuilder opBuilder = scope.env().opBuilder("SegmentMax", scope.makeOpName("SegmentMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SegmentMax")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java index de3468012ad..ca2733c96c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java @@ -83,7 +83,7 @@ private SegmentMean(Operation operation) { ) public static SegmentMean create(Scope scope, Operand data, Operand segmentIds) { - OperationBuilder opBuilder = scope.env().opBuilder("SegmentMean", scope.makeOpName("SegmentMean")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SegmentMean")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java index eab68f6bccd..2c0da496029 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java @@ -81,7 +81,7 @@ private SegmentMin(Operation operation) { ) public static SegmentMin create(Scope scope, Operand data, Operand segmentIds) { - OperationBuilder opBuilder = scope.env().opBuilder("SegmentMin", scope.makeOpName("SegmentMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SegmentMin")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java index a29b348a830..6c79d7cbb0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java @@ -82,7 +82,7 @@ private SegmentProd(Operation operation) { ) public static SegmentProd create(Scope scope, Operand data, Operand segmentIds) { - OperationBuilder opBuilder = scope.env().opBuilder("SegmentProd", scope.makeOpName("SegmentProd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SegmentProd")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java index 0f726cc10e0..aae76b9dd85 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java @@ -82,7 +82,7 @@ private SegmentSum(Operation operation) { ) public static SegmentSum create(Scope scope, Operand data, Operand segmentIds) { - OperationBuilder opBuilder = scope.env().opBuilder("SegmentSum", scope.makeOpName("SegmentSum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SegmentSum")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java index 752113b4947..d76210dc510 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java @@ -62,7 +62,7 @@ private Sigmoid(Operation operation) { describeByClass = true ) public static Sigmoid create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Sigmoid", scope.makeOpName("Sigmoid")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sigmoid")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Sigmoid<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java index ee500922cd6..3e307c96161 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java @@ -60,7 +60,7 @@ private SigmoidGrad(Operation operation) { describeByClass = true ) public static SigmoidGrad create(Scope scope, Operand y, Operand dy) { - OperationBuilder opBuilder = scope.env().opBuilder("SigmoidGrad", scope.makeOpName("SigmoidGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SigmoidGrad")); opBuilder.addInput(y.asOutput()); opBuilder.addInput(dy.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java index a8b3940948b..0f7b1a02ac2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java @@ -72,7 +72,7 @@ private Sign(Operation operation) { describeByClass = true ) public static Sign create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Sign", scope.makeOpName("Sign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sign")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Sign<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java index 9e076a6a8fd..7f4fa0211bd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java @@ -68,7 +68,7 @@ private Sin(Operation operation) { describeByClass = true ) public static Sin create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Sin", scope.makeOpName("Sin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sin")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Sin<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java index 8365de53578..8d89458774a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java @@ -68,7 +68,7 @@ private Sinh(Operation operation) { describeByClass = true ) public static Sinh create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Sinh", scope.makeOpName("Sinh")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sinh")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Sinh<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java index 88523a88cb0..ee8508fba07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java @@ -68,7 +68,7 @@ private SobolSample(Operation operation) { ) public static SobolSample create(Scope scope, Operand dim, Operand numResults, Operand skip, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("SobolSample", scope.makeOpName("SobolSample")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SobolSample")); opBuilder.addInput(dim.asOutput()); opBuilder.addInput(numResults.asOutput()); opBuilder.addInput(skip.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java index 00e59eadad2..41fb2fbbda1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java @@ -61,7 +61,7 @@ private Softplus(Operation operation) { describeByClass = true ) public static Softplus create(Scope scope, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("Softplus", scope.makeOpName("Softplus")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Softplus")); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); return new Softplus<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java index 0cf9b9963bc..f9662185666 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java @@ -59,7 +59,7 @@ private SoftplusGrad(Operation operation) { ) public static SoftplusGrad create(Scope scope, Operand gradients, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("SoftplusGrad", scope.makeOpName("SoftplusGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SoftplusGrad")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java index bcba1c9e2ef..43c436f2a1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java @@ -62,7 +62,7 @@ private Sqrt(Operation operation) { describeByClass = true ) public static Sqrt create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Sqrt", scope.makeOpName("Sqrt")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sqrt")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Sqrt<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java index 15ac765690a..4b941896b95 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java @@ -60,7 +60,7 @@ private SqrtGrad(Operation operation) { describeByClass = true ) public static SqrtGrad create(Scope scope, Operand y, Operand dy) { - OperationBuilder opBuilder = scope.env().opBuilder("SqrtGrad", scope.makeOpName("SqrtGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SqrtGrad")); opBuilder.addInput(y.asOutput()); opBuilder.addInput(dy.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java index 22321e62837..bd513b48b81 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java @@ -62,7 +62,7 @@ private Square(Operation operation) { describeByClass = true ) public static Square create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Square", scope.makeOpName("Square")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Square")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Square<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java index 0b01fa5a1f7..ff49e2a2be0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java @@ -65,7 +65,7 @@ private SquaredDifference(Operation operation) { ) public static SquaredDifference create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("SquaredDifference", scope.makeOpName("SquaredDifference")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SquaredDifference")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java index f1fae6af3d4..494e6a0a48e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java @@ -64,7 +64,7 @@ private Sub(Operation operation) { describeByClass = true ) public static Sub create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Sub", scope.makeOpName("Sub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sub")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java index 0511fd4c0a6..c12388ecf17 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java @@ -69,7 +69,7 @@ private Tan(Operation operation) { describeByClass = true ) public static Tan create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Tan", scope.makeOpName("Tan")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Tan")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Tan<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java index ce218e74fc9..9d3d1407e55 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java @@ -75,7 +75,7 @@ private Tanh(Operation operation) { describeByClass = true ) public static Tanh create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Tanh", scope.makeOpName("Tanh")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Tanh")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Tanh<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java index a46fb120cc6..9513223a61c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java @@ -60,7 +60,7 @@ private TanhGrad(Operation operation) { describeByClass = true ) public static TanhGrad create(Scope scope, Operand y, Operand dy) { - OperationBuilder opBuilder = scope.env().opBuilder("TanhGrad", scope.makeOpName("TanhGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TanhGrad")); opBuilder.addInput(y.asOutput()); opBuilder.addInput(dy.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java index 101422dc4c0..a5b7247eb9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java @@ -68,7 +68,7 @@ private TruncateDiv(Operation operation) { describeByClass = true ) public static TruncateDiv create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("TruncateDiv", scope.makeOpName("TruncateDiv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TruncateDiv")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java index 05e9b656cb1..afc4a569f0c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java @@ -65,7 +65,7 @@ private TruncateMod(Operation operation) { describeByClass = true ) public static TruncateMod create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("TruncateMod", scope.makeOpName("TruncateMod")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TruncateMod")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java index 9ae0ba7019d..5948bea7bb4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java @@ -87,7 +87,7 @@ private UnsortedSegmentMax(Operation operation) { ) public static UnsortedSegmentMax create(Scope scope, Operand data, Operand segmentIds, Operand numSegments) { - OperationBuilder opBuilder = scope.env().opBuilder("UnsortedSegmentMax", scope.makeOpName("UnsortedSegmentMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnsortedSegmentMax")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder.addInput(numSegments.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java index 2d3497e61ae..4c924844b12 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java @@ -84,7 +84,7 @@ private UnsortedSegmentMin(Operation operation) { ) public static UnsortedSegmentMin create(Scope scope, Operand data, Operand segmentIds, Operand numSegments) { - OperationBuilder opBuilder = scope.env().opBuilder("UnsortedSegmentMin", scope.makeOpName("UnsortedSegmentMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnsortedSegmentMin")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder.addInput(numSegments.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java index 5677ce1943f..ad3b2afa22a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java @@ -84,7 +84,7 @@ private UnsortedSegmentProd(Operation operation) { ) public static UnsortedSegmentProd create(Scope scope, Operand data, Operand segmentIds, Operand numSegments) { - OperationBuilder opBuilder = scope.env().opBuilder("UnsortedSegmentProd", scope.makeOpName("UnsortedSegmentProd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnsortedSegmentProd")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder.addInput(numSegments.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java index 9bf98201624..6a7d80d3bce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java @@ -86,7 +86,7 @@ private UnsortedSegmentSum(Operation operation) { ) public static UnsortedSegmentSum create(Scope scope, Operand data, Operand segmentIds, Operand numSegments) { - OperationBuilder opBuilder = scope.env().opBuilder("UnsortedSegmentSum", scope.makeOpName("UnsortedSegmentSum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnsortedSegmentSum")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder.addInput(numSegments.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java index b27043121f1..51b01fcc145 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java @@ -62,7 +62,7 @@ private Xdivy(Operation operation) { describeByClass = true ) public static Xdivy create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Xdivy", scope.makeOpName("Xdivy")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Xdivy")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java index a266f4f7947..40a9ffde383 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java @@ -62,7 +62,7 @@ private Xlog1py(Operation operation) { describeByClass = true ) public static Xlog1py create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Xlog1py", scope.makeOpName("Xlog1py")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Xlog1py")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java index a5227700166..c40480718a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java @@ -62,7 +62,7 @@ private Xlogy(Operation operation) { describeByClass = true ) public static Xlogy create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("Xlogy", scope.makeOpName("Xlogy")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Xlogy")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java index 40502468711..f137c7387fa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java @@ -64,7 +64,7 @@ private Zeta(Operation operation) { describeByClass = true ) public static Zeta create(Scope scope, Operand x, Operand q) { - OperationBuilder opBuilder = scope.env().opBuilder("Zeta", scope.makeOpName("Zeta")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Zeta")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(q.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java index 2f8af83f094..312289d1311 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java @@ -61,7 +61,7 @@ private erfinv(Operation operation) { describeByClass = true ) public static erfinv create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Erfinv", scope.makeOpName("erfinv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("erfinv")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new erfinv<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java index 42f0519eaa7..5a813d9bc63 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java @@ -57,7 +57,7 @@ private BesselJ0(Operation operation) { describeByClass = true ) public static BesselJ0 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselJ0", scope.makeOpName("BesselJ0")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselJ0")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselJ0<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java index c288143289c..e5465ae883c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java @@ -57,7 +57,7 @@ private BesselJ1(Operation operation) { describeByClass = true ) public static BesselJ1 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselJ1", scope.makeOpName("BesselJ1")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselJ1")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselJ1<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java index 7c13e1cb6fc..e0ece545227 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java @@ -57,7 +57,7 @@ private BesselK0(Operation operation) { describeByClass = true ) public static BesselK0 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselK0", scope.makeOpName("BesselK0")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselK0")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselK0<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java index 1ec7c7cafca..66ea449ddc8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java @@ -57,7 +57,7 @@ private BesselK0e(Operation operation) { describeByClass = true ) public static BesselK0e create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselK0e", scope.makeOpName("BesselK0e")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselK0e")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselK0e<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java index 58163b4fff0..e0d1e82c578 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java @@ -57,7 +57,7 @@ private BesselK1(Operation operation) { describeByClass = true ) public static BesselK1 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselK1", scope.makeOpName("BesselK1")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselK1")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselK1<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java index cc8e512c4b6..1137ceb5430 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java @@ -57,7 +57,7 @@ private BesselK1e(Operation operation) { describeByClass = true ) public static BesselK1e create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselK1e", scope.makeOpName("BesselK1e")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselK1e")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselK1e<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java index 4b6eb42de1e..5991a62d911 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java @@ -57,7 +57,7 @@ private BesselY0(Operation operation) { describeByClass = true ) public static BesselY0 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselY0", scope.makeOpName("BesselY0")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselY0")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselY0<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java index 4965db1cd8c..87fe3b8b799 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java @@ -57,7 +57,7 @@ private BesselY1(Operation operation) { describeByClass = true ) public static BesselY1 create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("BesselY1", scope.makeOpName("BesselY1")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BesselY1")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new BesselY1<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java index ef5934fb7ce..b8fd4b7c4bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java @@ -57,7 +57,7 @@ private Dawsn(Operation operation) { describeByClass = true ) public static Dawsn create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Dawsn", scope.makeOpName("Dawsn")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Dawsn")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Dawsn<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java index 999dfee7900..a5459c2e49a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java @@ -57,7 +57,7 @@ private Expint(Operation operation) { describeByClass = true ) public static Expint create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Expint", scope.makeOpName("Expint")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Expint")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Expint<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java index d590d1869b5..9ed978da85f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java @@ -57,7 +57,7 @@ private FresnelCos(Operation operation) { describeByClass = true ) public static FresnelCos create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("FresnelCos", scope.makeOpName("FresnelCos")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FresnelCos")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new FresnelCos<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java index 919b887cf29..4de11696395 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java @@ -57,7 +57,7 @@ private FresnelSin(Operation operation) { describeByClass = true ) public static FresnelSin create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("FresnelSin", scope.makeOpName("FresnelSin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FresnelSin")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new FresnelSin<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java index 8d6a983ffa5..18da2e750c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java @@ -57,7 +57,7 @@ private Spence(Operation operation) { describeByClass = true ) public static Spence create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("Spence", scope.makeOpName("Spence")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Spence")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new Spence<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java index d9ff43a7bf0..13947a3f016 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java @@ -69,7 +69,7 @@ private AvgPool(Operation operation) { ) public static AvgPool create(Scope scope, Operand value, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AvgPool", scope.makeOpName("AvgPool")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AvgPool")); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); long[] ksizeArray = new long[ksize.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java index 97e608b32c8..2dbb98b2960 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java @@ -71,7 +71,7 @@ private AvgPool3d(Operation operation) { ) public static AvgPool3d create(Scope scope, Operand input, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AvgPool3D", scope.makeOpName("AvgPool3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AvgPool3d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); long[] ksizeArray = new long[ksize.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java index 4bdb6034816..af10950e7fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java @@ -72,7 +72,7 @@ private AvgPool3dGrad(Operation operation) { public static AvgPool3dGrad create(Scope scope, Operand origInputShape, Operand grad, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AvgPool3DGrad", scope.makeOpName("AvgPool3dGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AvgPool3dGrad")); opBuilder.addInput(origInputShape.asOutput()); opBuilder.addInput(grad.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java index 039307cdbdd..813fd26b428 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java @@ -67,7 +67,7 @@ private AvgPoolGrad(Operation operation) { public static AvgPoolGrad create(Scope scope, Operand origInputShape, Operand grad, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AvgPoolGrad", scope.makeOpName("AvgPoolGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AvgPoolGrad")); opBuilder.addInput(origInputShape.asOutput()); opBuilder.addInput(grad.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java index 7c84dd5f26f..47cf236cad5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java @@ -78,7 +78,7 @@ private BatchNormWithGlobalNormalization(Operation operation) { public static BatchNormWithGlobalNormalization create(Scope scope, Operand t, Operand m, Operand v, Operand beta, Operand gamma, Float varianceEpsilon, Boolean scaleAfterNormalization) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchNormWithGlobalNormalization", scope.makeOpName("BatchNormWithGlobalNormalization")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchNormWithGlobalNormalization")); opBuilder.addInput(t.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java index 01189753828..0472d472dd6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java @@ -89,7 +89,7 @@ private BatchNormWithGlobalNormalizationGrad(Operation operation) { public static BatchNormWithGlobalNormalizationGrad create(Scope scope, Operand t, Operand m, Operand v, Operand gamma, Operand backprop, Float varianceEpsilon, Boolean scaleAfterNormalization) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchNormWithGlobalNormalizationGrad", scope.makeOpName("BatchNormWithGlobalNormalizationGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchNormWithGlobalNormalizationGrad")); opBuilder.addInput(t.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java index 26445fd21e5..fd477c86dc5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java @@ -66,7 +66,7 @@ private BiasAdd(Operation operation) { ) public static BiasAdd create(Scope scope, Operand value, Operand bias, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BiasAdd", scope.makeOpName("BiasAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BiasAdd")); opBuilder.addInput(value.asOutput()); opBuilder.addInput(bias.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java index 4ef33c75908..70204d02a5a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java @@ -66,7 +66,7 @@ private BiasAddGrad(Operation operation) { ) public static BiasAddGrad create(Scope scope, Operand outBackprop, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BiasAddGrad", scope.makeOpName("BiasAddGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BiasAddGrad")); opBuilder.addInput(outBackprop.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java index 9e2adfd2b29..d6f0ead46ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java @@ -108,7 +108,7 @@ private BlockLSTM(Operation operation) { public static BlockLSTM create(Scope scope, Operand seqLenMax, Operand x, Operand csPrev, Operand hPrev, Operand w, Operand wci, Operand wcf, Operand wco, Operand b, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BlockLSTMV2", scope.makeOpName("BlockLSTM")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BlockLSTM")); opBuilder.addInput(seqLenMax.asOutput()); opBuilder.addInput(x.asOutput()); opBuilder.addInput(csPrev.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java index ed7c7b79957..3727d150aac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java @@ -103,7 +103,7 @@ public static BlockLSTMGrad create(Scope scope, Operand wcf, Operand wco, Operand b, Operand i, Operand cs, Operand f, Operand o, Operand ci, Operand co, Operand h, Operand csGrad, Operand hGrad, Boolean usePeephole) { - OperationBuilder opBuilder = scope.env().opBuilder("BlockLSTMGradV2", scope.makeOpName("BlockLSTMGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BlockLSTMGrad")); opBuilder.addInput(seqLenMax.asOutput()); opBuilder.addInput(x.asOutput()); opBuilder.addInput(csPrev.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java index 17b187b0ec4..b8d3779d820 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java @@ -70,7 +70,7 @@ private CTCLossV2(Operation operation) { public static CTCLossV2 create(Scope scope, Operand inputs, Operand labelsIndices, Operand labelsValues, Operand sequenceLength, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CTCLossV2", scope.makeOpName("CTCLossV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CTCLossV2")); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(labelsIndices.asOutput()); opBuilder.addInput(labelsValues.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ComputeAccidentalHits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ComputeAccidentalHits.java index 9a7dbaf6080..594719f47ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ComputeAccidentalHits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ComputeAccidentalHits.java @@ -74,7 +74,7 @@ private ComputeAccidentalHits(Operation operation) { ) public static ComputeAccidentalHits create(Scope scope, Operand trueClasses, Operand sampledCandidates, Long numTrue, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ComputeAccidentalHits", scope.makeOpName("ComputeAccidentalHits")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ComputeAccidentalHits")); opBuilder.addInput(trueClasses.asOutput()); opBuilder.addInput(sampledCandidates.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java index cf5b02bc8bb..cdc3d27b72a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java @@ -92,7 +92,7 @@ private Conv2d(Operation operation) { ) public static Conv2d create(Scope scope, Operand input, Operand filter, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Conv2D", scope.makeOpName("Conv2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Conv2d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java index b1fcfad96b8..a1d95bcbd54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java @@ -76,7 +76,7 @@ private Conv2dBackpropFilter(Operation operation) { public static Conv2dBackpropFilter create(Scope scope, Operand input, Operand filterSizes, Operand outBackprop, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Conv2DBackpropFilter", scope.makeOpName("Conv2dBackpropFilter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Conv2dBackpropFilter")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filterSizes.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java index 5058b930786..334519fd5fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java @@ -76,7 +76,7 @@ private Conv2dBackpropInput(Operation operation) { public static Conv2dBackpropInput create(Scope scope, Operand inputSizes, Operand filter, Operand outBackprop, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Conv2DBackpropInput", scope.makeOpName("Conv2dBackpropInput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Conv2dBackpropInput")); opBuilder.addInput(inputSizes.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java index ed04f1095da..0fe70b05c62 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java @@ -73,7 +73,7 @@ private Conv3d(Operation operation) { ) public static Conv3d create(Scope scope, Operand input, Operand filter, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Conv3D", scope.makeOpName("Conv3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Conv3d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java index d0ea22719a6..456ee91437f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java @@ -75,7 +75,7 @@ private Conv3dBackpropFilter(Operation operation) { public static Conv3dBackpropFilter create(Scope scope, Operand input, Operand filterSizes, Operand outBackprop, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Conv3DBackpropFilterV2", scope.makeOpName("Conv3dBackpropFilter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Conv3dBackpropFilter")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filterSizes.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java index 919a06a6646..7f84fb3b019 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java @@ -74,7 +74,7 @@ private Conv3dBackpropInput(Operation operation) { public static Conv3dBackpropInput create(Scope scope, Operand inputSizes, Operand filter, Operand outBackprop, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Conv3DBackpropInputV2", scope.makeOpName("Conv3dBackpropInput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Conv3dBackpropInput")); opBuilder.addInput(inputSizes.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java index 88e3c05a648..22bbe1d8274 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java @@ -91,7 +91,7 @@ private CtcBeamSearchDecoder(Operation operation) { ) public static CtcBeamSearchDecoder create(Scope scope, Operand inputs, Operand sequenceLength, Long beamWidth, Long topPaths, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CTCBeamSearchDecoder", scope.makeOpName("CtcBeamSearchDecoder")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CtcBeamSearchDecoder")); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(sequenceLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java index 843715ec1c4..0630b40d3ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java @@ -83,7 +83,7 @@ private CtcGreedyDecoder(Operation operation) { ) public static CtcGreedyDecoder create(Scope scope, Operand inputs, Operand sequenceLength, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CTCGreedyDecoder", scope.makeOpName("CtcGreedyDecoder")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CtcGreedyDecoder")); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(sequenceLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java index 75bbbe7747c..15ce085e575 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java @@ -76,7 +76,7 @@ private CtcLoss(Operation operation) { public static CtcLoss create(Scope scope, Operand inputs, Operand labelsIndices, Operand labelsValues, Operand sequenceLength, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CTCLoss", scope.makeOpName("CtcLoss")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CtcLoss")); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(labelsIndices.asOutput()); opBuilder.addInput(labelsValues.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java index 5817e7011cc..3c8adc1094e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java @@ -115,7 +115,7 @@ private CudnnRNN(Operation operation) { public static CudnnRNN create(Scope scope, Operand input, Operand inputH, Operand inputC, Operand params, Operand sequenceLengths, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CudnnRNNV3", scope.makeOpName("CudnnRNN")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CudnnRNN")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputH.asOutput()); opBuilder.addInput(inputC.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java index d7a50c029db..f8852914f34 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java @@ -131,7 +131,7 @@ public static CudnnRNNBackprop create(Scope scope, Operan Operand output, Operand outputH, Operand outputC, Operand outputBackprop, Operand outputHBackprop, Operand outputCBackprop, Operand reserveSpace, Operand hostReserved, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CudnnRNNBackpropV3", scope.makeOpName("CudnnRNNBackprop")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CudnnRNNBackprop")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputH.asOutput()); opBuilder.addInput(inputC.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java index c704ad9fe1f..d94d7201002 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java @@ -98,7 +98,7 @@ private CudnnRNNCanonicalToParams(Operation operation) { public static CudnnRNNCanonicalToParams create(Scope scope, Operand numLayers, Operand numUnits, Operand inputSize, Iterable> weights, Iterable> biases, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CudnnRNNCanonicalToParamsV2", scope.makeOpName("CudnnRNNCanonicalToParams")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CudnnRNNCanonicalToParams")); opBuilder.addInput(numLayers.asOutput()); opBuilder.addInput(numUnits.asOutput()); opBuilder.addInput(inputSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java index f336e5bd553..70dc874dc27 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java @@ -108,7 +108,7 @@ private CudnnRNNParamsToCanonical(Operation operation) { public static CudnnRNNParamsToCanonical create(Scope scope, Operand numLayers, Operand numUnits, Operand inputSize, Operand params, Long numParamsWeights, Long numParamsBiases, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CudnnRNNParamsToCanonicalV2", scope.makeOpName("CudnnRNNParamsToCanonical")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CudnnRNNParamsToCanonical")); opBuilder.addInput(numLayers.asOutput()); opBuilder.addInput(numUnits.asOutput()); opBuilder.addInput(inputSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java index 642a79909fd..17114a95d9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java @@ -91,7 +91,7 @@ private CudnnRnnParamsSize(Operation operation) { public static CudnnRnnParamsSize create(Scope scope, Operand numLayers, Operand numUnits, Operand inputSize, Class T, Class S, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CudnnRNNParamsSize", scope.makeOpName("CudnnRnnParamsSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CudnnRnnParamsSize")); opBuilder.addInput(numLayers.asOutput()); opBuilder.addInput(numUnits.asOutput()); opBuilder.addInput(inputSize.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java index 991d5961a29..7ebfca31245 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java @@ -65,7 +65,7 @@ private DataFormatDimMap(Operation operation) { ) public static DataFormatDimMap create(Scope scope, Operand x, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DataFormatDimMap", scope.makeOpName("DataFormatDimMap")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DataFormatDimMap")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java index fcc7c3fe995..1f0740e51f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java @@ -82,7 +82,7 @@ private DataFormatVecPermute(Operation operation) { ) public static DataFormatVecPermute create(Scope scope, Operand x, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DataFormatVecPermute", scope.makeOpName("DataFormatVecPermute")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DataFormatVecPermute")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java index 11595c5d17e..7607d7abc44 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java @@ -138,7 +138,7 @@ private DepthToSpace(Operation operation) { ) public static DepthToSpace create(Scope scope, Operand input, Long blockSize, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DepthToSpace", scope.makeOpName("DepthToSpace")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DepthToSpace")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("block_size", blockSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java index 2062c1d2823..115df899a13 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java @@ -85,7 +85,7 @@ private DepthwiseConv2dNative(Operation operation) { ) public static DepthwiseConv2dNative create(Scope scope, Operand input, Operand filter, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DepthwiseConv2dNative", scope.makeOpName("DepthwiseConv2dNative")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DepthwiseConv2dNative")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java index cb53251b8bd..46039ed5364 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java @@ -78,7 +78,7 @@ private DepthwiseConv2dNativeBackpropFilter(Operation operation) { public static DepthwiseConv2dNativeBackpropFilter create(Scope scope, Operand input, Operand filterSizes, Operand outBackprop, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DepthwiseConv2dNativeBackpropFilter", scope.makeOpName("DepthwiseConv2dNativeBackpropFilter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DepthwiseConv2dNativeBackpropFilter")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filterSizes.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java index 6d514f599ec..84b917dda07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java @@ -78,7 +78,7 @@ private DepthwiseConv2dNativeBackpropInput(Operation operation) { public static DepthwiseConv2dNativeBackpropInput create(Scope scope, Operand inputSizes, Operand filter, Operand outBackprop, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DepthwiseConv2dNativeBackpropInput", scope.makeOpName("DepthwiseConv2dNativeBackpropInput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DepthwiseConv2dNativeBackpropInput")); opBuilder.addInput(inputSizes.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java index 0838b5f6eea..0ce416f962d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java @@ -90,7 +90,7 @@ private Dilation2d(Operation operation) { ) public static Dilation2d create(Scope scope, Operand input, Operand filter, List strides, List rates, String padding) { - OperationBuilder opBuilder = scope.env().opBuilder("Dilation2D", scope.makeOpName("Dilation2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Dilation2d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java index fb07f81692a..f278fb3f223 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java @@ -71,7 +71,7 @@ private Dilation2dBackpropFilter(Operation operation) { public static Dilation2dBackpropFilter create(Scope scope, Operand input, Operand filter, Operand outBackprop, List strides, List rates, String padding) { - OperationBuilder opBuilder = scope.env().opBuilder("Dilation2DBackpropFilter", scope.makeOpName("Dilation2dBackpropFilter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Dilation2dBackpropFilter")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java index e7f407b7d50..def304dcbe7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java @@ -71,7 +71,7 @@ private Dilation2dBackpropInput(Operation operation) { public static Dilation2dBackpropInput create(Scope scope, Operand input, Operand filter, Operand outBackprop, List strides, List rates, String padding) { - OperationBuilder opBuilder = scope.env().opBuilder("Dilation2DBackpropInput", scope.makeOpName("Dilation2dBackpropInput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Dilation2dBackpropInput")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java index d7afc873a44..35656420d31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java @@ -81,7 +81,7 @@ private Elu(Operation operation) { describeByClass = true ) public static Elu create(Scope scope, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("Elu", scope.makeOpName("Elu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Elu")); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); return new Elu<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java index 94c17656ca9..84b0f7a0232 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java @@ -59,7 +59,7 @@ private EluGrad(Operation operation) { ) public static EluGrad create(Scope scope, Operand gradients, Operand outputs) { - OperationBuilder opBuilder = scope.env().opBuilder("EluGrad", scope.makeOpName("EluGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EluGrad")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(outputs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FixedUnigramCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FixedUnigramCandidateSampler.java index 86aaf612abf..acfe0fe9a34 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FixedUnigramCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FixedUnigramCandidateSampler.java @@ -87,7 +87,7 @@ private FixedUnigramCandidateSampler(Operation operation) { ) public static FixedUnigramCandidateSampler create(Scope scope, Operand trueClasses, Long numTrue, Long numSampled, Boolean unique, Long rangeMax, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FixedUnigramCandidateSampler", scope.makeOpName("FixedUnigramCandidateSampler")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FixedUnigramCandidateSampler")); opBuilder.addInput(trueClasses.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_true", numTrue); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java index 5fddbf60bbe..291d2c22ec6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java @@ -81,7 +81,7 @@ private FractionalAvgPool(Operation operation) { ) public static FractionalAvgPool create(Scope scope, Operand value, List poolingRatio, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FractionalAvgPool", scope.makeOpName("FractionalAvgPool")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FractionalAvgPool")); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); float[] poolingRatioArray = new float[poolingRatio.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java index 298f4309afe..b3b5cdc8e49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java @@ -72,7 +72,7 @@ private FractionalAvgPoolGrad(Operation operation) { public static FractionalAvgPoolGrad create(Scope scope, Operand origInputTensorShape, Operand outBackprop, Operand rowPoolingSequence, Operand colPoolingSequence, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FractionalAvgPoolGrad", scope.makeOpName("FractionalAvgPoolGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FractionalAvgPoolGrad")); opBuilder.addInput(origInputTensorShape.asOutput()); opBuilder.addInput(outBackprop.asOutput()); opBuilder.addInput(rowPoolingSequence.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java index ff58d050dfd..632f0ffab4b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java @@ -103,7 +103,7 @@ private FractionalMaxPool(Operation operation) { ) public static FractionalMaxPool create(Scope scope, Operand value, List poolingRatio, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FractionalMaxPool", scope.makeOpName("FractionalMaxPool")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FractionalMaxPool")); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); float[] poolingRatioArray = new float[poolingRatio.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java index 2e1ec6d800d..9168aada03d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java @@ -68,7 +68,7 @@ private FractionalMaxPoolGrad(Operation operation) { public static FractionalMaxPoolGrad create(Scope scope, Operand origInput, Operand origOutput, Operand outBackprop, Operand rowPoolingSequence, Operand colPoolingSequence, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FractionalMaxPoolGrad", scope.makeOpName("FractionalMaxPoolGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FractionalMaxPoolGrad")); opBuilder.addInput(origInput.asOutput()); opBuilder.addInput(origOutput.asOutput()); opBuilder.addInput(outBackprop.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java index 158d5f58709..be19069844a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java @@ -90,7 +90,7 @@ private FusedBatchNorm(Operation operation) { public static FusedBatchNorm create(Scope scope, Operand x, Operand scale, Operand offset, Operand mean, Operand variance, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FusedBatchNormV3", scope.makeOpName("FusedBatchNorm")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FusedBatchNorm")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(scale.asOutput()); opBuilder.addInput(offset.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java index 081bc93da40..8f70d25a40c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java @@ -96,7 +96,7 @@ private FusedBatchNormGrad(Operation operation) { public static FusedBatchNormGrad create(Scope scope, Operand yBackprop, Operand x, Operand scale, Operand reserveSpace1, Operand reserveSpace2, Operand reserveSpace3, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FusedBatchNormGradV3", scope.makeOpName("FusedBatchNormGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FusedBatchNormGrad")); opBuilder.addInput(yBackprop.asOutput()); opBuilder.addInput(x.asOutput()); opBuilder.addInput(scale.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java index 8e1d840740f..7ffb05c66d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java @@ -84,7 +84,7 @@ private FusedPadConv2d(Operation operation) { public static FusedPadConv2d create(Scope scope, Operand input, Operand paddings, Operand filter, String mode, List strides, String padding) { - OperationBuilder opBuilder = scope.env().opBuilder("FusedPadConv2D", scope.makeOpName("FusedPadConv2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FusedPadConv2d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(paddings.asOutput()); opBuilder.addInput(filter.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java index c7e368d1042..1ec36de171c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java @@ -86,7 +86,7 @@ private FusedResizeAndPadConv2d(Operation operation) { public static FusedResizeAndPadConv2d create(Scope scope, Operand input, Operand sizeOutput, Operand paddings, Operand filter, String mode, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FusedResizeAndPadConv2D", scope.makeOpName("FusedResizeAndPadConv2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FusedResizeAndPadConv2d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); opBuilder.addInput(paddings.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java index cf6598621f0..dc096503547 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java @@ -110,7 +110,7 @@ private GRUBlockCell(Operation operation) { ) public static GRUBlockCell create(Scope scope, Operand x, Operand hPrev, Operand wRu, Operand wC, Operand bRu, Operand bC) { - OperationBuilder opBuilder = scope.env().opBuilder("GRUBlockCell", scope.makeOpName("GRUBlockCell")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GRUBlockCell")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(hPrev.asOutput()); opBuilder.addInput(wRu.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java index 094b7d46f6e..12503ae477d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java @@ -150,7 +150,7 @@ private GRUBlockCellGrad(Operation operation) { public static GRUBlockCellGrad create(Scope scope, Operand x, Operand hPrev, Operand wRu, Operand wC, Operand bRu, Operand bC, Operand r, Operand u, Operand c, Operand dH) { - OperationBuilder opBuilder = scope.env().opBuilder("GRUBlockCellGrad", scope.makeOpName("GRUBlockCellGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GRUBlockCellGrad")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(hPrev.asOutput()); opBuilder.addInput(wRu.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InTopK.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InTopK.java index bd307645aed..2d87109548b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InTopK.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InTopK.java @@ -75,7 +75,7 @@ private InTopK(Operation operation) { ) public static InTopK create(Scope scope, Operand predictions, Operand targets, Operand k) { - OperationBuilder opBuilder = scope.env().opBuilder("InTopKV2", scope.makeOpName("InTopK")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InTopK")); opBuilder.addInput(predictions.asOutput()); opBuilder.addInput(targets.asOutput()); opBuilder.addInput(k.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java index 2673b2e225d..a65d21a2ca0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java @@ -60,7 +60,7 @@ private InvGrad(Operation operation) { describeByClass = true ) public static InvGrad create(Scope scope, Operand y, Operand dy) { - OperationBuilder opBuilder = scope.env().opBuilder("InvGrad", scope.makeOpName("InvGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InvGrad")); opBuilder.addInput(y.asOutput()); opBuilder.addInput(dy.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java index 7e3642b9f5f..9810a19768b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java @@ -65,7 +65,7 @@ private IsotonicRegression(Operation operation) { ) public static IsotonicRegression create(Scope scope, Operand input, Class outputDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("IsotonicRegression", scope.makeOpName("IsotonicRegression")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("IsotonicRegression")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_dtype", Operands.toDataType(outputDtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java index 8fb95490147..a85f32110f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java @@ -65,7 +65,7 @@ private L2Loss(Operation operation) { describeByClass = true ) public static L2Loss create(Scope scope, Operand t) { - OperationBuilder opBuilder = scope.env().opBuilder("L2Loss", scope.makeOpName("L2Loss")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("L2Loss")); opBuilder.addInput(t.asOutput()); opBuilder = scope.apply(opBuilder); return new L2Loss<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java index 9f9084b8007..f910a98be46 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java @@ -107,7 +107,7 @@ private LSTMBlockCell(Operation operation) { public static LSTMBlockCell create(Scope scope, Operand x, Operand csPrev, Operand hPrev, Operand w, Operand wci, Operand wcf, Operand wco, Operand b, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LSTMBlockCell", scope.makeOpName("LSTMBlockCell")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LSTMBlockCell")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(csPrev.asOutput()); opBuilder.addInput(hPrev.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java index 401ba8a0dd6..1c5be476677 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java @@ -89,7 +89,7 @@ public static LSTMBlockCellGrad create(Scope scope, Opera Operand csPrev, Operand hPrev, Operand w, Operand wci, Operand wcf, Operand wco, Operand b, Operand i, Operand cs, Operand f, Operand o, Operand ci, Operand co, Operand csGrad, Operand hGrad, Boolean usePeephole) { - OperationBuilder opBuilder = scope.env().opBuilder("LSTMBlockCellGrad", scope.makeOpName("LSTMBlockCellGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LSTMBlockCellGrad")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(csPrev.asOutput()); opBuilder.addInput(hPrev.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java index 1a0679d979c..102e339294a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java @@ -63,7 +63,7 @@ private LeakyRelu(Operation operation) { ) public static LeakyRelu create(Scope scope, Operand features, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LeakyRelu", scope.makeOpName("LeakyRelu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LeakyRelu")); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LearnedUnigramCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LearnedUnigramCandidateSampler.java index e7fba301202..a4e0e02ac56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LearnedUnigramCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LearnedUnigramCandidateSampler.java @@ -81,7 +81,7 @@ private LearnedUnigramCandidateSampler(Operation operation) { ) public static LearnedUnigramCandidateSampler create(Scope scope, Operand trueClasses, Long numTrue, Long numSampled, Boolean unique, Long rangeMax, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LearnedUnigramCandidateSampler", scope.makeOpName("LearnedUnigramCandidateSampler")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LearnedUnigramCandidateSampler")); opBuilder.addInput(trueClasses.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_true", numTrue); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java index 2da47d2cecc..aafc6033ad4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java @@ -74,7 +74,7 @@ private LocalResponseNormalization(Operation operation) { ) public static LocalResponseNormalization create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LRN", scope.makeOpName("LocalResponseNormalization")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LocalResponseNormalization")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java index bbd4a38dee0..ef0017a4c90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java @@ -61,7 +61,7 @@ private LocalResponseNormalizationGrad(Operation operation) { ) public static LocalResponseNormalizationGrad create(Scope scope, Operand inputGrads, Operand inputImage, Operand outputImage, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LRNGrad", scope.makeOpName("LocalResponseNormalizationGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LocalResponseNormalizationGrad")); opBuilder.addInput(inputGrads.asOutput()); opBuilder.addInput(inputImage.asOutput()); opBuilder.addInput(outputImage.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java index e39f5686016..699922585a8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java @@ -65,7 +65,7 @@ private LogSoftmax(Operation operation) { describeByClass = true ) public static LogSoftmax create(Scope scope, Operand logits) { - OperationBuilder opBuilder = scope.env().opBuilder("LogSoftmax", scope.makeOpName("LogSoftmax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LogSoftmax")); opBuilder.addInput(logits.asOutput()); opBuilder = scope.apply(opBuilder); return new LogSoftmax<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java index 9eda868c92f..9079b0bd366 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java @@ -68,7 +68,7 @@ private MaxPool(Operation operation) { ) public static MaxPool create(Scope scope, Operand input, Operand ksize, Operand strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPoolV2", scope.makeOpName("MaxPool")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPool")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(ksize.asOutput()); opBuilder.addInput(strides.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java index 9488cba0214..31cdd55de62 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java @@ -69,7 +69,7 @@ private MaxPool3d(Operation operation) { ) public static MaxPool3d create(Scope scope, Operand input, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPool3D", scope.makeOpName("MaxPool3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPool3d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); long[] ksizeArray = new long[ksize.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java index 2d38c68cdbd..096e4b4369e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java @@ -73,7 +73,7 @@ private MaxPool3dGrad(Operation operation) { public static MaxPool3dGrad create(Scope scope, Operand origInput, Operand origOutput, Operand grad, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPool3DGrad", scope.makeOpName("MaxPool3dGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPool3dGrad")); opBuilder.addInput(origInput.asOutput()); opBuilder.addInput(origOutput.asOutput()); opBuilder.addInput(grad.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java index dba59ac6923..3c5bbf30708 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java @@ -72,7 +72,7 @@ private MaxPool3dGradGrad(Operation operation) { public static MaxPool3dGradGrad create(Scope scope, Operand origInput, Operand origOutput, Operand grad, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPool3DGradGrad", scope.makeOpName("MaxPool3dGradGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPool3dGradGrad")); opBuilder.addInput(origInput.asOutput()); opBuilder.addInput(origOutput.asOutput()); opBuilder.addInput(grad.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java index 3323aeed090..e73b77b308c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java @@ -71,7 +71,7 @@ private MaxPoolGrad(Operation operation) { public static MaxPoolGrad create(Scope scope, Operand origInput, Operand origOutput, Operand grad, Operand ksize, Operand strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPoolGradV2", scope.makeOpName("MaxPoolGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPoolGrad")); opBuilder.addInput(origInput.asOutput()); opBuilder.addInput(origOutput.asOutput()); opBuilder.addInput(grad.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java index e254b70bf7f..d0bfcc9956a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java @@ -71,7 +71,7 @@ private MaxPoolGradGrad(Operation operation) { public static MaxPoolGradGrad create(Scope scope, Operand origInput, Operand origOutput, Operand grad, Operand ksize, Operand strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPoolGradGradV2", scope.makeOpName("MaxPoolGradGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPoolGradGrad")); opBuilder.addInput(origInput.asOutput()); opBuilder.addInput(origOutput.asOutput()); opBuilder.addInput(grad.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java index 7751bca3d64..6a4d68fcab3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java @@ -72,7 +72,7 @@ private MaxPoolGradGradWithArgmax(Operation operation) { public static MaxPoolGradGradWithArgmax create(Scope scope, Operand input, Operand grad, Operand argmax, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPoolGradGradWithArgmax", scope.makeOpName("MaxPoolGradGradWithArgmax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPoolGradGradWithArgmax")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(grad.asOutput()); opBuilder.addInput(argmax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java index e4a93a781a0..ca8153f86da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java @@ -68,7 +68,7 @@ private MaxPoolGradWithArgmax(Operation operation) { public static MaxPoolGradWithArgmax create(Scope scope, Operand input, Operand grad, Operand argmax, List ksize, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPoolGradWithArgmax", scope.makeOpName("MaxPoolGradWithArgmax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPoolGradWithArgmax")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(grad.asOutput()); opBuilder.addInput(argmax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java index bc705020ec7..d8682b8e4bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java @@ -86,7 +86,7 @@ private MaxPoolWithArgmax(Operation operation) { public static MaxPoolWithArgmax create(Scope scope, Operand input, List ksize, List strides, Class Targmax, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MaxPoolWithArgmax", scope.makeOpName("MaxPoolWithArgmax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MaxPoolWithArgmax")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); long[] ksizeArray = new long[ksize.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java index 33dbf662b9d..7ed8fee52d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java @@ -73,7 +73,7 @@ private NthElement(Operation operation) { ) public static NthElement create(Scope scope, Operand input, Operand n, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("NthElement", scope.makeOpName("NthElement")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NthElement")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(n.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java index 34101f042f4..b33fc36e608 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java @@ -78,7 +78,7 @@ private QuantizedAvgPool(Operation operation) { public static QuantizedAvgPool create(Scope scope, Operand input, Operand minInput, Operand maxInput, List ksize, List strides, String padding) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedAvgPool", scope.makeOpName("QuantizedAvgPool")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedAvgPool")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(minInput.asOutput()); opBuilder.addInput(maxInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java index 058947effea..ddc13f90224 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java @@ -102,7 +102,7 @@ public static QuantizedBatchNormWithGloba Operand vMax, Operand beta, Operand betaMin, Operand betaMax, Operand gamma, Operand gammaMin, Operand gammaMax, Class outType, Float varianceEpsilon, Boolean scaleAfterNormalization) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedBatchNormWithGlobalNormalization", scope.makeOpName("QuantizedBatchNormWithGlobalNormalization")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedBatchNormWithGlobalNormalization")); opBuilder.addInput(t.asOutput()); opBuilder.addInput(tMin.asOutput()); opBuilder.addInput(tMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java index a5857e1ae76..cb1e586f257 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java @@ -79,7 +79,7 @@ public static QuantizedBiasAdd create(Scope scope, Operand input, Operand bias, Operand minInput, Operand maxInput, Operand minBias, Operand maxBias, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedBiasAdd", scope.makeOpName("QuantizedBiasAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedBiasAdd")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(bias.asOutput()); opBuilder.addInput(minInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java index 1548df84847..17df774e266 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java @@ -80,7 +80,7 @@ public static QuantizedConv2DAndRelu create(Scope scope, Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DAndRelu", scope.makeOpName("QuantizedConv2DAndRelu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DAndRelu")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(minInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java index cb0ed49c2e9..aff2a5fa3ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java @@ -83,7 +83,7 @@ public static QuantizedConv2DAndReluAndRequantize create( Operand maxFilter, Operand minFreezedOutput, Operand maxFreezedOutput, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DAndReluAndRequantize", scope.makeOpName("QuantizedConv2DAndReluAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DAndReluAndRequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(minInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java index ebb408f7434..ff848ffe752 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java @@ -83,7 +83,7 @@ public static QuantizedConv2DAndRequantize create(Scope s Operand maxFilter, Operand minFreezedOutput, Operand maxFreezedOutput, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DAndRequantize", scope.makeOpName("QuantizedConv2DAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DAndRequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(minInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java index 5fb9a7ccae3..db50edce61d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java @@ -80,7 +80,7 @@ public static QuantizedConv2DPerChannel create(Scope scop Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DPerChannel", scope.makeOpName("QuantizedConv2DPerChannel")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DPerChannel")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(minInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java index 4988f86b878..bab9a335a71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java @@ -81,7 +81,7 @@ public static QuantizedConv2DWithBias create(Scope scope, Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DWithBias", scope.makeOpName("QuantizedConv2DWithBias")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DWithBias")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java index 4dcee0cfa02..2fd266645ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java @@ -81,7 +81,7 @@ public static QuantizedConv2DWithBiasAndRelu create(Scope Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DWithBiasAndRelu", scope.makeOpName("QuantizedConv2DWithBiasAndRelu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DWithBiasAndRelu")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java index de6b10dfc0a..f2675f3d14b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java @@ -84,7 +84,7 @@ public static QuantizedConv2DWithBiasAndReluAndRequantize Operand minFilter, Operand maxFilter, Operand minFreezedOutput, Operand maxFreezedOutput, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DWithBiasAndReluAndRequantize", scope.makeOpName("QuantizedConv2DWithBiasAndReluAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DWithBiasAndReluAndRequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java index 8ff06313fca..87c648079e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java @@ -84,7 +84,7 @@ public static QuantizedConv2DWithBiasAndRequantize create Operand minFilter, Operand maxFilter, Operand minFreezedOutput, Operand maxFreezedOutput, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DWithBiasAndRequantize", scope.makeOpName("QuantizedConv2DWithBiasAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DWithBiasAndRequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java index 06cde9bad75..f8db22536e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java @@ -88,7 +88,7 @@ public static QuantizedConv2DWithBiasSignedSumAndReluAndRequ Operand maxFreezedOutput, Operand summand, Operand minSummand, Operand maxSummand, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", scope.makeOpName("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java index 7cd443db36d..90d4172060d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java @@ -82,7 +82,7 @@ public static QuantizedConv2DWithBiasSumAndRelu create(Sc Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Operand summand, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DWithBiasSumAndRelu", scope.makeOpName("QuantizedConv2DWithBiasSumAndRelu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DWithBiasSumAndRelu")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java index 1081e876325..fb302d2676a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java @@ -88,7 +88,7 @@ public static QuantizedConv2DWithBiasSumAndReluAndRequantize Operand maxFreezedOutput, Operand summand, Operand minSummand, Operand maxSummand, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2DWithBiasSumAndReluAndRequantize", scope.makeOpName("QuantizedConv2DWithBiasSumAndReluAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2DWithBiasSumAndReluAndRequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java index 634dcbf13eb..1074cf9f002 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java @@ -89,7 +89,7 @@ public static QuantizedConv2d create(Scope scope, Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConv2D", scope.makeOpName("QuantizedConv2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConv2d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(minInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java index 36acaca3067..126a89aa6b7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java @@ -80,7 +80,7 @@ public static QuantizedDepthwiseConv2D create(Scope scope Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedDepthwiseConv2D", scope.makeOpName("QuantizedDepthwiseConv2D")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedDepthwiseConv2D")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(minInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java index 40298bf6d2a..e8672cdcd8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java @@ -81,7 +81,7 @@ public static QuantizedDepthwiseConv2DWithBias create(Sco Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedDepthwiseConv2DWithBias", scope.makeOpName("QuantizedDepthwiseConv2DWithBias")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedDepthwiseConv2DWithBias")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java index d9105ad94a4..43163c51c44 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java @@ -81,7 +81,7 @@ public static QuantizedDepthwiseConv2DWithBiasAndRelu cre Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedDepthwiseConv2DWithBiasAndRelu", scope.makeOpName("QuantizedDepthwiseConv2DWithBiasAndRelu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedDepthwiseConv2DWithBiasAndRelu")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java index dc801c317f0..b330f7609c0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java @@ -84,7 +84,7 @@ public static QuantizedDepthwiseConv2DWithBiasAndReluAndRequ Operand minFilter, Operand maxFilter, Operand minFreezedOutput, Operand maxFreezedOutput, Class outType, List strides, String padding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", scope.makeOpName("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java index 28336b47159..b357e42f390 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java @@ -72,7 +72,7 @@ private QuantizedInstanceNorm(Operation operation) { ) public static QuantizedInstanceNorm create(Scope scope, Operand x, Operand xMin, Operand xMax, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedInstanceNorm", scope.makeOpName("QuantizedInstanceNorm")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedInstanceNorm")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(xMin.asOutput()); opBuilder.addInput(xMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java index fef1e38a3d8..e37245fdd42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java @@ -78,7 +78,7 @@ private QuantizedMaxPool(Operation operation) { public static QuantizedMaxPool create(Scope scope, Operand input, Operand minInput, Operand maxInput, List ksize, List strides, String padding) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedMaxPool", scope.makeOpName("QuantizedMaxPool")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedMaxPool")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(minInput.asOutput()); opBuilder.addInput(maxInput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java index 82b87d4f38c..b060f82abe6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java @@ -74,7 +74,7 @@ private QuantizedRelu(Operation operation) { public static QuantizedRelu create(Scope scope, Operand features, Operand minFeatures, Operand maxFeatures, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedRelu", scope.makeOpName("QuantizedRelu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedRelu")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(minFeatures.asOutput()); opBuilder.addInput(maxFeatures.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java index 79034e99177..2f865eb4ad1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java @@ -74,7 +74,7 @@ private QuantizedRelu6(Operation operation) { public static QuantizedRelu6 create(Scope scope, Operand features, Operand minFeatures, Operand maxFeatures, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedRelu6", scope.makeOpName("QuantizedRelu6")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedRelu6")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(minFeatures.asOutput()); opBuilder.addInput(maxFeatures.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java index c529a8851bc..36809492c69 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java @@ -75,7 +75,7 @@ private QuantizedReluX(Operation operation) { public static QuantizedReluX create(Scope scope, Operand features, Operand maxValue, Operand minFeatures, Operand maxFeatures, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedReluX", scope.makeOpName("QuantizedReluX")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedReluX")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(maxValue.asOutput()); opBuilder.addInput(minFeatures.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java index 6963a4905cb..4bae9944d20 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java @@ -71,7 +71,7 @@ private Relu(Operation operation) { describeByClass = true ) public static Relu create(Scope scope, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("Relu", scope.makeOpName("Relu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Relu")); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); return new Relu<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java index 7fd261997e1..b710f942761 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java @@ -61,7 +61,7 @@ private Relu6(Operation operation) { describeByClass = true ) public static Relu6 create(Scope scope, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("Relu6", scope.makeOpName("Relu6")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Relu6")); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); return new Relu6<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java index ef74b7153e2..660dcb7085a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java @@ -60,7 +60,7 @@ private Relu6Grad(Operation operation) { ) public static Relu6Grad create(Scope scope, Operand gradients, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("Relu6Grad", scope.makeOpName("Relu6Grad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Relu6Grad")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java index 9d332cb5f9a..92788e886ae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java @@ -60,7 +60,7 @@ private ReluGrad(Operation operation) { ) public static ReluGrad create(Scope scope, Operand gradients, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("ReluGrad", scope.makeOpName("ReluGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReluGrad")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java index 4958748b10c..ca3357fdff1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java @@ -66,7 +66,7 @@ private Selu(Operation operation) { describeByClass = true ) public static Selu create(Scope scope, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("Selu", scope.makeOpName("Selu")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Selu")); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); return new Selu<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java index 5c54a3f109b..66a4c89806f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java @@ -59,7 +59,7 @@ private SeluGrad(Operation operation) { ) public static SeluGrad create(Scope scope, Operand gradients, Operand outputs) { - OperationBuilder opBuilder = scope.env().opBuilder("SeluGrad", scope.makeOpName("SeluGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SeluGrad")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(outputs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java index fa3f0b32214..bd7f8c0f9c0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java @@ -65,7 +65,7 @@ private Softmax(Operation operation) { describeByClass = true ) public static Softmax create(Scope scope, Operand logits) { - OperationBuilder opBuilder = scope.env().opBuilder("Softmax", scope.makeOpName("Softmax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Softmax")); opBuilder.addInput(logits.asOutput()); opBuilder = scope.apply(opBuilder); return new Softmax<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java index 5eeb25a3be3..0fe94d6659e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java @@ -61,7 +61,7 @@ private Softsign(Operation operation) { describeByClass = true ) public static Softsign create(Scope scope, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("Softsign", scope.makeOpName("Softsign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Softsign")); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); return new Softsign<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java index c60c25a60cd..45de76bc54f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java @@ -59,7 +59,7 @@ private SoftsignGrad(Operation operation) { ) public static SoftsignGrad create(Scope scope, Operand gradients, Operand features) { - OperationBuilder opBuilder = scope.env().opBuilder("SoftsignGrad", scope.makeOpName("SoftsignGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SoftsignGrad")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(features.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java index b3cd1fb0c60..0b29e7b9afe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java @@ -138,7 +138,7 @@ private SpaceToBatch(Operation operation) { ) public static SpaceToBatch create(Scope scope, Operand input, Operand paddings, Long blockSize) { - OperationBuilder opBuilder = scope.env().opBuilder("SpaceToBatch", scope.makeOpName("SpaceToBatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SpaceToBatch")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(paddings.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java index c592597f077..e1fa58d4ff1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java @@ -132,7 +132,7 @@ private SpaceToDepth(Operation operation) { ) public static SpaceToDepth create(Scope scope, Operand input, Long blockSize, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SpaceToDepth", scope.makeOpName("SpaceToDepth")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SpaceToDepth")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("block_size", blockSize); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java index 99d156bba87..6da2e8816a8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java @@ -78,7 +78,7 @@ private TopK(Operation operation) { ) public static TopK create(Scope scope, Operand input, Operand k, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TopKV2", scope.makeOpName("TopK")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TopK")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(k.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java index 331933979c7..0f2d50289b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java @@ -69,7 +69,7 @@ private SoftmaxCrossEntropyWithLogits(Operation operation) { ) public static SoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { - OperationBuilder opBuilder = scope.env().opBuilder("SoftmaxCrossEntropyWithLogits", scope.makeOpName("SoftmaxCrossEntropyWithLogits")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java index 8c48cd0db4d..30eec4c3c05 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java @@ -72,7 +72,7 @@ private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { ) public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSoftmaxCrossEntropyWithLogits", scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java index 134856e714e..faab6105ce4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java @@ -113,7 +113,7 @@ private Dequantize(Operation operation) { public static Dequantize create(Scope scope, Operand input, Operand minRange, Operand maxRange, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Dequantize", scope.makeOpName("Dequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Dequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(minRange.asOutput()); opBuilder.addInput(maxRange.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgs.java index 8f0e3a3cd5d..b56ea653c7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgs.java @@ -80,7 +80,7 @@ private FakeQuantWithMinMaxArgs(Operation operation) { ) public static FakeQuantWithMinMaxArgs create(Scope scope, Operand inputs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FakeQuantWithMinMaxArgs", scope.makeOpName("FakeQuantWithMinMaxArgs")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FakeQuantWithMinMaxArgs")); opBuilder.addInput(inputs.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java index 05da8e5d06f..e7cdd673eda 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java @@ -61,7 +61,7 @@ private FakeQuantWithMinMaxArgsGradient(Operation operation) { ) public static FakeQuantWithMinMaxArgsGradient create(Scope scope, Operand gradients, Operand inputs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FakeQuantWithMinMaxArgsGradient", scope.makeOpName("FakeQuantWithMinMaxArgsGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FakeQuantWithMinMaxArgsGradient")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(inputs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java index e411e8e93d3..81ac8d7d52a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java @@ -85,7 +85,7 @@ private FakeQuantWithMinMaxVars(Operation operation) { ) public static FakeQuantWithMinMaxVars create(Scope scope, Operand inputs, Operand min, Operand max, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FakeQuantWithMinMaxVars", scope.makeOpName("FakeQuantWithMinMaxVars")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FakeQuantWithMinMaxVars")); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(min.asOutput()); opBuilder.addInput(max.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsGradient.java index d5809fa3abc..66a5e18efe0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsGradient.java @@ -70,7 +70,7 @@ private FakeQuantWithMinMaxVarsGradient(Operation operation) { ) public static FakeQuantWithMinMaxVarsGradient create(Scope scope, Operand gradients, Operand inputs, Operand min, Operand max, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FakeQuantWithMinMaxVarsGradient", scope.makeOpName("FakeQuantWithMinMaxVarsGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FakeQuantWithMinMaxVarsGradient")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(min.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannel.java index 953271d9129..61c59c23a0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannel.java @@ -86,7 +86,7 @@ private FakeQuantWithMinMaxVarsPerChannel(Operation operation) { ) public static FakeQuantWithMinMaxVarsPerChannel create(Scope scope, Operand inputs, Operand min, Operand max, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FakeQuantWithMinMaxVarsPerChannel", scope.makeOpName("FakeQuantWithMinMaxVarsPerChannel")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FakeQuantWithMinMaxVarsPerChannel")); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(min.asOutput()); opBuilder.addInput(max.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannelGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannelGradient.java index 0f5748301cf..e07a811035b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannelGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannelGradient.java @@ -73,7 +73,7 @@ private FakeQuantWithMinMaxVarsPerChannelGradient(Operation operation) { public static FakeQuantWithMinMaxVarsPerChannelGradient create(Scope scope, Operand gradients, Operand inputs, Operand min, Operand max, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FakeQuantWithMinMaxVarsPerChannelGradient", scope.makeOpName("FakeQuantWithMinMaxVarsPerChannelGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FakeQuantWithMinMaxVarsPerChannelGradient")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(min.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java index 2302b6530c5..6c63f3ec77d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java @@ -171,7 +171,7 @@ private Quantize(Operation operation) { ) public static Quantize create(Scope scope, Operand input, Operand minRange, Operand maxRange, Class T, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizeV2", scope.makeOpName("Quantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Quantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(minRange.asOutput()); opBuilder.addInput(maxRange.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java index 4fa1a9a0021..314b577d96d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java @@ -69,7 +69,7 @@ private QuantizeAndDequantize(Operation operation) { ) public static QuantizeAndDequantize create(Scope scope, Operand input, Operand inputMin, Operand inputMax, Operand numBits, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizeAndDequantizeV3", scope.makeOpName("QuantizeAndDequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizeAndDequantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); opBuilder.addInput(inputMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java index 6cb3d26c426..ad04f64d628 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java @@ -69,7 +69,7 @@ private QuantizeAndDequantizeV3(Operation operation) { ) public static QuantizeAndDequantizeV3 create(Scope scope, Operand input, Operand inputMin, Operand inputMax, Operand numBits, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizeAndDequantizeV3", scope.makeOpName("QuantizeAndDequantizeV3")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizeAndDequantizeV3")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); opBuilder.addInput(inputMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java index 00f10f48024..0d51a0020b2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java @@ -67,7 +67,7 @@ private QuantizeAndDequantizeV4(Operation operation) { ) public static QuantizeAndDequantizeV4 create(Scope scope, Operand input, Operand inputMin, Operand inputMax, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizeAndDequantizeV4", scope.makeOpName("QuantizeAndDequantizeV4")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizeAndDequantizeV4")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); opBuilder.addInput(inputMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java index 31ee0a84208..84274a6e9d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java @@ -75,7 +75,7 @@ private QuantizeAndDequantizeV4Grad(Operation operation) { public static QuantizeAndDequantizeV4Grad create(Scope scope, Operand gradients, Operand input, Operand inputMin, Operand inputMax, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizeAndDequantizeV4Grad", scope.makeOpName("QuantizeAndDequantizeV4Grad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizeAndDequantizeV4Grad")); opBuilder.addInput(gradients.asOutput()); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java index 3de5b5c3f34..4e98bdcaa26 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java @@ -93,7 +93,7 @@ private QuantizeDownAndShrinkRange(Operation operation) { public static QuantizeDownAndShrinkRange create(Scope scope, Operand input, Operand inputMin, Operand inputMax, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizeDownAndShrinkRange", scope.makeOpName("QuantizeDownAndShrinkRange")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizeDownAndShrinkRange")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); opBuilder.addInput(inputMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java index 602bd900291..0ef565d38db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java @@ -77,7 +77,7 @@ private QuantizedConcat(Operation operation) { public static QuantizedConcat create(Scope scope, Operand concatDim, Iterable> values, Iterable> inputMins, Iterable> inputMaxes) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedConcat", scope.makeOpName("QuantizedConcat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedConcat")); opBuilder.addInput(concatDim.asOutput()); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder.addInputList(Operands.asOutputs(inputMins)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java index 1fc826d66d7..233af64fc95 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java @@ -73,7 +73,7 @@ public static QuantizedMatMulWithBiasAndDequantize create Operand minA, Operand maxA, Operand minB, Operand maxB, Operand minFreezedOutput, Operand maxFreezedOutput, Class Toutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedMatMulWithBiasAndDequantize", scope.makeOpName("QuantizedMatMulWithBiasAndDequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedMatMulWithBiasAndDequantize")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java index a5b19eedc25..b235db81014 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java @@ -79,7 +79,7 @@ public static QuantizedMatMulWithBiasAndRequantize create Operand minA, Operand maxA, Operand minB, Operand maxB, Operand minFreezedOutput, Operand maxFreezedOutput, Class Toutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("QuantizedMatMulWithBiasAndRequantize", scope.makeOpName("QuantizedMatMulWithBiasAndRequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("QuantizedMatMulWithBiasAndRequantize")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder.addInput(bias.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/RequantizationRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/RequantizationRange.java index e226adfe73a..0a8d62cab19 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/RequantizationRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/RequantizationRange.java @@ -69,7 +69,7 @@ private RequantizationRange(Operation operation) { ) public static RequantizationRange create(Scope scope, Operand input, Operand inputMin, Operand inputMax) { - OperationBuilder opBuilder = scope.env().opBuilder("RequantizationRange", scope.makeOpName("RequantizationRange")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RequantizationRange")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); opBuilder.addInput(inputMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java index f990b3548f0..f7a4cb8fe11 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java @@ -83,7 +83,7 @@ public static Requantize create(Scope scope, Operand input, Operand inputMin, Operand inputMax, Operand requestedOutputMin, Operand requestedOutputMax, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("Requantize", scope.makeOpName("Requantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Requantize")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(inputMin.asOutput()); opBuilder.addInput(inputMax.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java index 6a3aaa298c4..3441decff8d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java @@ -77,7 +77,7 @@ private RaggedBincount(Operation operation) { public static RaggedBincount create(Scope scope, Operand splits, Operand values, Operand sizeOutput, Operand weights, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedBincount", scope.makeOpName("RaggedBincount")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedBincount")); opBuilder.addInput(splits.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java index 20ec23bdafc..925d4a3a505 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java @@ -72,7 +72,7 @@ private RaggedCountSparseOutput(Operation operation) { public static RaggedCountSparseOutput create(Scope scope, Operand splits, Operand values, Operand weights, Boolean binaryOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedCountSparseOutput", scope.makeOpName("RaggedCountSparseOutput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedCountSparseOutput")); opBuilder.addInput(splits.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(weights.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java index 7f388bb3212..976f9aa9c1f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java @@ -86,7 +86,7 @@ public static RaggedCross create(Scop Iterable> sparseShape, Iterable> denseInputs, String inputOrder, Boolean hashedOutput, Long numBuckets, Long hashKey, Class outValuesType, Class outRowSplitsType) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedCross", scope.makeOpName("RaggedCross")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedCross")); opBuilder.addInputList(Operands.asOutputs(raggedValues)); opBuilder.addInputList(Operands.asOutputs(raggedRowSplits)); opBuilder.addInputList(Operands.asOutputs(sparseIndices)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java index 036a5f252f2..4744fb613a3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java @@ -99,7 +99,7 @@ private RaggedGather(Operation operation) { public static RaggedGather create(Scope scope, Iterable> paramsNestedSplits, Operand paramsDenseValues, Operand indices, Long OUTPUTRAGGEDRANK) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedGather", scope.makeOpName("RaggedGather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedGather")); opBuilder.addInputList(Operands.asOutputs(paramsNestedSplits)); opBuilder.addInput(paramsDenseValues.asOutput()); opBuilder.addInput(indices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java index d842da7dd53..715d8771f7f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java @@ -82,7 +82,7 @@ private RaggedRange(Operation operation) { ) public static RaggedRange create(Scope scope, Operand starts, Operand limits, Operand deltas, Class Tsplits) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedRange", scope.makeOpName("RaggedRange")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedRange")); opBuilder.addInput(starts.asOutput()); opBuilder.addInput(limits.asOutput()); opBuilder.addInput(deltas.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java index 830b73899f2..f293efe3754 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java @@ -90,7 +90,7 @@ private RaggedTensorFromVariant(Operation operation) { public static RaggedTensorFromVariant create( Scope scope, Operand encodedRagged, Long inputRaggedRank, Long outputRaggedRank, Class Tvalues, Class Tsplits) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedTensorFromVariant", scope.makeOpName("RaggedTensorFromVariant")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedTensorFromVariant")); opBuilder.addInput(encodedRagged.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("input_ragged_rank", inputRaggedRank); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java index 8b0b91bbcf1..fdf111286a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java @@ -71,7 +71,7 @@ private RaggedTensorToSparse(Operation operation) { ) public static RaggedTensorToSparse create(Scope scope, Iterable> rtNestedSplits, Operand rtDenseValues) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedTensorToSparse", scope.makeOpName("RaggedTensorToSparse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedTensorToSparse")); opBuilder.addInputList(Operands.asOutputs(rtNestedSplits)); opBuilder.addInput(rtDenseValues.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java index 6b269656c5d..94970bfcd57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java @@ -108,7 +108,7 @@ private RaggedTensorToTensor(Operation operation) { public static RaggedTensorToTensor create(Scope scope, Operand shape, Operand values, Operand defaultValue, Iterable> rowPartitionTensors, List rowPartitionTypes) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedTensorToTensor", scope.makeOpName("RaggedTensorToTensor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedTensorToTensor")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(defaultValue.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java index 52385b1cf29..6324dcf933f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java @@ -72,7 +72,7 @@ private RaggedTensorToVariant(Operation operation) { public static RaggedTensorToVariant create(Scope scope, Iterable> rtNestedSplits, Operand rtDenseValues, Boolean batchedInput) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedTensorToVariant", scope.makeOpName("RaggedTensorToVariant")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedTensorToVariant")); opBuilder.addInputList(Operands.asOutputs(rtNestedSplits)); opBuilder.addInput(rtDenseValues.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java index ae33a6e778a..f090fb01bdf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java @@ -70,7 +70,7 @@ private RaggedTensorToVariantGradient(Operation operation) { public static RaggedTensorToVariantGradient create(Scope scope, Operand encodedRaggedGrad, Operand rowSplits, Operand denseValuesShape, Class Tvalues) { - OperationBuilder opBuilder = scope.env().opBuilder("RaggedTensorToVariantGradient", scope.makeOpName("RaggedTensorToVariantGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RaggedTensorToVariantGradient")); opBuilder.addInput(encodedRaggedGrad.asOutput()); opBuilder.addInput(rowSplits.asOutput()); opBuilder.addInput(denseValuesShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AllCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AllCandidateSampler.java index ec4700fa695..73197facfd8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AllCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AllCandidateSampler.java @@ -80,7 +80,7 @@ private AllCandidateSampler(Operation operation) { ) public static AllCandidateSampler create(Scope scope, Operand trueClasses, Long numTrue, Long numSampled, Boolean unique, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AllCandidateSampler", scope.makeOpName("AllCandidateSampler")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AllCandidateSampler")); opBuilder.addInput(trueClasses.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_true", numTrue); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java index 7081ec18ef8..9ee7de7e0aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java @@ -61,7 +61,7 @@ private AnonymousRandomSeedGenerator(Operation operation) { ) public static AnonymousRandomSeedGenerator create(Scope scope, Operand seed, Operand seed2) { - OperationBuilder opBuilder = scope.env().opBuilder("AnonymousRandomSeedGenerator", scope.makeOpName("AnonymousRandomSeedGenerator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AnonymousRandomSeedGenerator")); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(seed2.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java index 11a128fb80a..e4a21446bbc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java @@ -63,7 +63,7 @@ private AnonymousSeedGenerator(Operation operation) { ) public static AnonymousSeedGenerator create(Scope scope, Operand seed, Operand seed2, Operand reshuffle) { - OperationBuilder opBuilder = scope.env().opBuilder("AnonymousSeedGenerator", scope.makeOpName("AnonymousSeedGenerator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AnonymousSeedGenerator")); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(seed2.asOutput()); opBuilder.addInput(reshuffle.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java index 60600eab717..6d777113e8b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java @@ -51,7 +51,7 @@ private DeleteRandomSeedGenerator(Operation operation) { ) public static DeleteRandomSeedGenerator create(Scope scope, Operand handle, Operand deleter) { - OperationBuilder opBuilder = scope.env().opBuilder("DeleteRandomSeedGenerator", scope.makeOpName("DeleteRandomSeedGenerator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeleteRandomSeedGenerator")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(deleter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java index 663529c7ee3..4e32e957220 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java @@ -51,7 +51,7 @@ private DeleteSeedGenerator(Operation operation) { ) public static DeleteSeedGenerator create(Scope scope, Operand handle, Operand deleter) { - OperationBuilder opBuilder = scope.env().opBuilder("DeleteSeedGenerator", scope.makeOpName("DeleteSeedGenerator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeleteSeedGenerator")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(deleter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/LogUniformCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/LogUniformCandidateSampler.java index 617175b4152..03e07249afb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/LogUniformCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/LogUniformCandidateSampler.java @@ -81,7 +81,7 @@ private LogUniformCandidateSampler(Operation operation) { ) public static LogUniformCandidateSampler create(Scope scope, Operand trueClasses, Long numTrue, Long numSampled, Boolean unique, Long rangeMax, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LogUniformCandidateSampler", scope.makeOpName("LogUniformCandidateSampler")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LogUniformCandidateSampler")); opBuilder.addInput(trueClasses.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_true", numTrue); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java index 123a5de9e16..1e6cd745615 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java @@ -70,7 +70,7 @@ private Multinomial(Operation operation) { public static Multinomial create(Scope scope, Operand logits, Operand numSamples, Class outputDtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Multinomial", scope.makeOpName("Multinomial")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Multinomial")); opBuilder.addInput(logits.asOutput()); opBuilder.addInput(numSamples.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java index 9c1fa52000f..3d6636de0fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java @@ -62,7 +62,7 @@ private NonDeterministicInts(Operation operation) { ) public static NonDeterministicInts create(Scope scope, Operand shape, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("NonDeterministicInts", scope.makeOpName("NonDeterministicInts")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NonDeterministicInts")); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java index 3187c28447c..606d62a32a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java @@ -71,7 +71,7 @@ private ParameterizedTruncatedNormal(Operation operation) { public static ParameterizedTruncatedNormal create(Scope scope, Operand shape, Operand means, Operand stdevs, Operand minvals, Operand maxvals, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ParameterizedTruncatedNormal", scope.makeOpName("ParameterizedTruncatedNormal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParameterizedTruncatedNormal")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(means.asOutput()); opBuilder.addInput(stdevs.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java index da7fa82b70c..8cb32cd1c30 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java @@ -69,7 +69,7 @@ private RandomGamma(Operation operation) { ) public static RandomGamma create(Scope scope, Operand shape, Operand alpha, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomGamma", scope.makeOpName("RandomGamma")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomGamma")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(alpha.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java index 6daf06d2e7f..802b9fdd757 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java @@ -59,7 +59,7 @@ private RandomGammaGrad(Operation operation) { ) public static RandomGammaGrad create(Scope scope, Operand alpha, Operand sample) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomGammaGrad", scope.makeOpName("RandomGammaGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomGammaGrad")); opBuilder.addInput(alpha.asOutput()); opBuilder.addInput(sample.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java index 914ab2f3a44..4922b13255e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java @@ -78,7 +78,7 @@ private RandomPoisson(Operation operation) { public static RandomPoisson create(Scope scope, Operand shape, Operand rate, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomPoissonV2", scope.makeOpName("RandomPoisson")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomPoisson")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(rate.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java index bb2a282ad00..32aeeb36e34 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java @@ -71,7 +71,7 @@ private RandomShuffle(Operation operation) { ) public static RandomShuffle create(Scope scope, Operand value, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomShuffle", scope.makeOpName("RandomShuffle")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomShuffle")); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java index 1cdea4ae17f..fae2ff235c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java @@ -66,7 +66,7 @@ private RandomStandardNormal(Operation operation) { ) public static RandomStandardNormal create(Scope scope, Operand shape, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomStandardNormal", scope.makeOpName("RandomStandardNormal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomStandardNormal")); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java index f5ad7fcc271..58e9944512b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java @@ -67,7 +67,7 @@ private RandomUniform(Operation operation) { ) public static RandomUniform create(Scope scope, Operand shape, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomUniform", scope.makeOpName("RandomUniform")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomUniform")); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java index d08e70a0baf..fa87d13c8ae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java @@ -71,7 +71,7 @@ private RandomUniformInt(Operation operation) { ) public static RandomUniformInt create(Scope scope, Operand shape, Operand minval, Operand maxval, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RandomUniformInt", scope.makeOpName("RandomUniformInt")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RandomUniformInt")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(minval.asOutput()); opBuilder.addInput(maxval.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RecordInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RecordInput.java index 9469070c11a..0ab52ae834c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RecordInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RecordInput.java @@ -59,7 +59,7 @@ private RecordInput(Operation operation) { describeByClass = true ) public static RecordInput create(Scope scope, String filePattern, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RecordInput", scope.makeOpName("RecordInput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RecordInput")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("file_pattern", filePattern); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java index d86074997b0..0803ccfc026 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java @@ -63,7 +63,7 @@ private RngReadAndSkip(Operation operation) { ) public static RngReadAndSkip create(Scope scope, Operand resource, Operand alg, Operand delta) { - OperationBuilder opBuilder = scope.env().opBuilder("RngReadAndSkip", scope.makeOpName("RngReadAndSkip")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RngReadAndSkip")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(alg.asOutput()); opBuilder.addInput(delta.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java index fea4eed3b24..5e027c1ea8f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java @@ -57,7 +57,7 @@ private RngSkip(Operation operation) { ) public static RngSkip create(Scope scope, Operand resource, Operand algorithm, Operand delta) { - OperationBuilder opBuilder = scope.env().opBuilder("RngSkip", scope.makeOpName("RngSkip")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RngSkip")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(algorithm.asOutput()); opBuilder.addInput(delta.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java index 405f3be4ed5..4bcb8219373 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java @@ -72,7 +72,7 @@ private StatefulRandomBinomial(Operation operation) { public static StatefulRandomBinomial create(Scope scope, Operand resource, Operand algorithm, Operand shape, Operand counts, Operand probs, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatefulRandomBinomial", scope.makeOpName("StatefulRandomBinomial")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatefulRandomBinomial")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(algorithm.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java index 920b132283c..8cde26f1306 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java @@ -70,7 +70,7 @@ private StatefulStandardNormal(Operation operation) { public static StatefulStandardNormal create(Scope scope, Operand resource, Operand algorithm, Operand shape, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatefulStandardNormalV2", scope.makeOpName("StatefulStandardNormal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatefulStandardNormal")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(algorithm.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java index 29f4c827a56..c97237166a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java @@ -68,7 +68,7 @@ private StatefulTruncatedNormal(Operation operation) { public static StatefulTruncatedNormal create(Scope scope, Operand resource, Operand algorithm, Operand shape, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatefulTruncatedNormal", scope.makeOpName("StatefulTruncatedNormal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatefulTruncatedNormal")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(algorithm.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java index 525ec5d6db3..54530a52e9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java @@ -67,7 +67,7 @@ private StatefulUniform(Operation operation) { public static StatefulUniform create(Scope scope, Operand resource, Operand algorithm, Operand shape, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatefulUniform", scope.makeOpName("StatefulUniform")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatefulUniform")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(algorithm.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java index 17c36ba762d..de83124e355 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java @@ -65,7 +65,7 @@ private StatefulUniformFullInt(Operation operation) { public static StatefulUniformFullInt create(Scope scope, Operand resource, Operand algorithm, Operand shape, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatefulUniformFullInt", scope.makeOpName("StatefulUniformFullInt")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatefulUniformFullInt")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(algorithm.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java index 34b12f3c48f..d51b3ca0f11 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java @@ -70,7 +70,7 @@ private StatefulUniformInt(Operation operation) { public static StatefulUniformInt create(Scope scope, Operand resource, Operand algorithm, Operand shape, Operand minval, Operand maxval) { - OperationBuilder opBuilder = scope.env().opBuilder("StatefulUniformInt", scope.makeOpName("StatefulUniformInt")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatefulUniformInt")); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(algorithm.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java index d1cb7dd4542..661327137f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java @@ -70,7 +70,7 @@ private StatelessMultinomial(Operation operation) { public static StatelessMultinomial create(Scope scope, Operand logits, Operand numSamples, Operand seed, Class outputDtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessMultinomial", scope.makeOpName("StatelessMultinomial")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessMultinomial")); opBuilder.addInput(logits.asOutput()); opBuilder.addInput(numSamples.asOutput()); opBuilder.addInput(seed.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java index 6cfee4854e1..65561c38db9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java @@ -65,7 +65,7 @@ private StatelessParameterizedTruncatedNormal(Operation operation) { public static StatelessParameterizedTruncatedNormal create(Scope scope, Operand shape, Operand seed, Operand means, Operand stddevs, Operand minvals, Operand maxvals) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessParameterizedTruncatedNormal", scope.makeOpName("StatelessParameterizedTruncatedNormal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessParameterizedTruncatedNormal")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(means.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java index f2f8548aa7a..857c1898ee2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java @@ -70,7 +70,7 @@ private StatelessRandomBinomial(Operation operation) { public static StatelessRandomBinomial create( Scope scope, Operand shape, Operand seed, Operand counts, Operand probs, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomBinomial", scope.makeOpName("StatelessRandomBinomial")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomBinomial")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(counts.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java index b19c41fe4eb..6a254a07415 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java @@ -63,7 +63,7 @@ private StatelessRandomGamma(Operation operation) { ) public static StatelessRandomGamma create(Scope scope, Operand shape, Operand seed, Operand alpha) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomGammaV2", scope.makeOpName("StatelessRandomGamma")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomGamma")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(alpha.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java index 9432aa91e76..28f5edd810d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java @@ -65,7 +65,7 @@ private StatelessRandomGetKeyCounterAlg(Operation operation) { ) public static StatelessRandomGetKeyCounterAlg create(Scope scope, Operand seed) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomGetKeyCounterAlg", scope.makeOpName("StatelessRandomGetKeyCounterAlg")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomGetKeyCounterAlg")); opBuilder.addInput(seed.asOutput()); opBuilder = scope.apply(opBuilder); return new StatelessRandomGetKeyCounterAlg(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java index 03402d96c48..abd2b668fd3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java @@ -68,7 +68,7 @@ private StatelessRandomNormal(Operation operation) { ) public static StatelessRandomNormal create(Scope scope, Operand shape, Operand seed, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomNormal", scope.makeOpName("StatelessRandomNormal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomNormal")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java index beae49bbea2..853b40eb33c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java @@ -69,7 +69,7 @@ private StatelessRandomNormalV2(Operation operation) { public static StatelessRandomNormalV2 create(Scope scope, Operand shape, Operand key, Operand counter, Operand alg, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomNormalV2", scope.makeOpName("StatelessRandomNormalV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomNormalV2")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder.addInput(counter.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java index 024b387165b..f11d14cd4fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java @@ -66,7 +66,7 @@ private StatelessRandomPoisson(Operation operation) { public static StatelessRandomPoisson create(Scope scope, Operand shape, Operand seed, Operand lam, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomPoisson", scope.makeOpName("StatelessRandomPoisson")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomPoisson")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(lam.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java index 862fd3cdd30..e6f8732083a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java @@ -69,7 +69,7 @@ private StatelessRandomUniform(Operation operation) { ) public static StatelessRandomUniform create(Scope scope, Operand shape, Operand seed, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomUniform", scope.makeOpName("StatelessRandomUniform")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomUniform")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java index 7b3b02d9677..519adfae636 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java @@ -63,7 +63,7 @@ private StatelessRandomUniformFullInt(Operation operation) { ) public static StatelessRandomUniformFullInt create(Scope scope, Operand shape, Operand seed, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomUniformFullInt", scope.makeOpName("StatelessRandomUniformFullInt")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomUniformFullInt")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java index b2334470ce2..d85c334d713 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java @@ -68,7 +68,7 @@ private StatelessRandomUniformFullIntV2(Operation operation) { public static StatelessRandomUniformFullIntV2 create(Scope scope, Operand shape, Operand key, Operand counter, Operand alg, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomUniformFullIntV2", scope.makeOpName("StatelessRandomUniformFullIntV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomUniformFullIntV2")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder.addInput(counter.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java index 4233ab669e0..b7a31b2f180 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java @@ -64,7 +64,7 @@ private StatelessRandomUniformInt(Operation operation) { public static StatelessRandomUniformInt create(Scope scope, Operand shape, Operand seed, Operand minval, Operand maxval) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomUniformInt", scope.makeOpName("StatelessRandomUniformInt")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomUniformInt")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(minval.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java index ad8440df49b..3b1086869b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java @@ -68,7 +68,7 @@ private StatelessRandomUniformIntV2(Operation operation) { public static StatelessRandomUniformIntV2 create(Scope scope, Operand shape, Operand key, Operand counter, Operand alg, Operand minval, Operand maxval) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomUniformIntV2", scope.makeOpName("StatelessRandomUniformIntV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomUniformIntV2")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder.addInput(counter.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java index 2764bbbb81c..1f6b6339720 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java @@ -70,7 +70,7 @@ private StatelessRandomUniformV2(Operation operation) { public static StatelessRandomUniformV2 create(Scope scope, Operand shape, Operand key, Operand counter, Operand alg, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomUniformV2", scope.makeOpName("StatelessRandomUniformV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomUniformV2")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder.addInput(counter.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java index 7e69cacd5d4..20228830150 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java @@ -70,7 +70,7 @@ private StatelessTruncatedNormal(Operation operation) { ) public static StatelessTruncatedNormal create(Scope scope, Operand shape, Operand seed, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessTruncatedNormal", scope.makeOpName("StatelessTruncatedNormal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessTruncatedNormal")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(seed.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java index 24515a7739f..51de197be08 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java @@ -71,7 +71,7 @@ private StatelessTruncatedNormalV2(Operation operation) { public static StatelessTruncatedNormalV2 create(Scope scope, Operand shape, Operand key, Operand counter, Operand alg, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessTruncatedNormalV2", scope.makeOpName("StatelessTruncatedNormalV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessTruncatedNormalV2")); opBuilder.addInput(shape.asOutput()); opBuilder.addInput(key.asOutput()); opBuilder.addInput(counter.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java index 064c00c8eaf..bb61bb3fc28 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java @@ -68,7 +68,7 @@ private TruncatedNormal(Operation operation) { ) public static TruncatedNormal create(Scope scope, Operand shape, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TruncatedNormal", scope.makeOpName("TruncatedNormal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TruncatedNormal")); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/UniformCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/UniformCandidateSampler.java index acd4e26b450..4b726580c1c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/UniformCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/UniformCandidateSampler.java @@ -81,7 +81,7 @@ private UniformCandidateSampler(Operation operation) { ) public static UniformCandidateSampler create(Scope scope, Operand trueClasses, Long numTrue, Long numSampled, Boolean unique, Long rangeMax, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("UniformCandidateSampler", scope.makeOpName("UniformCandidateSampler")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UniformCandidateSampler")); opBuilder.addInput(trueClasses.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_true", numTrue); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/DummySeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/DummySeedGenerator.java index 5a9a6d8b9ff..b5f8baccd92 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/DummySeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/DummySeedGenerator.java @@ -54,7 +54,7 @@ private DummySeedGenerator(Operation operation) { describeByClass = true ) public static DummySeedGenerator create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("DummySeedGenerator", scope.makeOpName("DummySeedGenerator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DummySeedGenerator")); opBuilder = scope.apply(opBuilder); return new DummySeedGenerator(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java index aba0663c0d1..20337970fdd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java @@ -67,7 +67,7 @@ private CollectiveBcastRecvV2(Operation operation) { public static CollectiveBcastRecvV2 create(Scope scope, Operand groupSize, Operand groupKey, Operand instanceKey, Operand shape, Class T, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveBcastRecvV2", scope.makeOpName("CollectiveBcastRecvV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CollectiveBcastRecvV2")); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); opBuilder.addInput(instanceKey.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java index 28a8a714995..137f5f2d3ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java @@ -64,7 +64,7 @@ private CollectiveBcastSendV2(Operation operation) { public static CollectiveBcastSendV2 create(Scope scope, Operand input, Operand groupSize, Operand groupKey, Operand instanceKey, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectiveBcastSendV2", scope.makeOpName("CollectiveBcastSendV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CollectiveBcastSendV2")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/DataServiceDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/DataServiceDatasetV2.java index ebd5f4220b8..96059a264e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/DataServiceDatasetV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/DataServiceDatasetV2.java @@ -75,7 +75,7 @@ public static DataServiceDatasetV2 create(Scope scope, Operand datasetId Operand jobName, Operand consumerIndex, Operand numConsumers, Operand maxOutstandingRequests, Operand iterationCounter, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DataServiceDatasetV2", scope.makeOpName("DataServiceDatasetV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DataServiceDatasetV2")); opBuilder.addInput(datasetId.asOutput()); opBuilder.addInput(processingMode.asOutput()); opBuilder.addInput(address.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/FinalizeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/FinalizeDataset.java index 9ef62abad9c..6b37412d717 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/FinalizeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/FinalizeDataset.java @@ -62,7 +62,7 @@ private FinalizeDataset(Operation operation) { ) public static FinalizeDataset create(Scope scope, Operand inputDataset, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("FinalizeDataset", scope.makeOpName("FinalizeDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FinalizeDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java index ad7ec23eb00..dbf716fd32d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java @@ -55,7 +55,7 @@ private GetOptions(Operation operation) { describeByClass = true ) public static GetOptions create(Scope scope, Operand inputDataset) { - OperationBuilder opBuilder = scope.env().opBuilder("GetOptions", scope.makeOpName("GetOptions")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GetOptions")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); return new GetOptions(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java index be8583f352c..a74c575add4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java @@ -60,7 +60,7 @@ private LoadTPUEmbeddingFrequencyEstimatorParameters(Operation operation) { public static LoadTPUEmbeddingFrequencyEstimatorParameters create(Scope scope, Operand parameters, Operand lastHitStep, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingFrequencyEstimatorParameters", scope.makeOpName("LoadTPUEmbeddingFrequencyEstimatorParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingFrequencyEstimatorParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(lastHitStep.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java index cb5dcd87eef..eca33b34407 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java @@ -62,7 +62,7 @@ private LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation ope public static LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug create(Scope scope, Operand parameters, Operand lastHitStep, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(lastHitStep.asOutput()); opBuilder.addInput(gradientAccumulators.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/OptionsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/OptionsDataset.java index a8a2bea05a3..ec3bf2560c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/OptionsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/OptionsDataset.java @@ -63,7 +63,7 @@ private OptionsDataset(Operation operation) { public static OptionsDataset create(Scope scope, Operand inputDataset, String serializedOptions, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.env().opBuilder("OptionsDataset", scope.makeOpName("OptionsDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OptionsDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("serialized_options", serializedOptions); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/ParallelBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/ParallelBatchDataset.java index 49b2b56b1c6..23fe867b5ae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/ParallelBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/ParallelBatchDataset.java @@ -68,7 +68,7 @@ private ParallelBatchDataset(Operation operation) { public static ParallelBatchDataset create(Scope scope, Operand inputDataset, Operand batchSize, Operand numParallelCalls, Operand dropRemainder, List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ParallelBatchDataset", scope.makeOpName("ParallelBatchDataset")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ParallelBatchDataset")); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(batchSize.asOutput()); opBuilder.addInput(numParallelCalls.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java index 56a60afbde8..cef412fb0e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java @@ -63,7 +63,7 @@ private RetrieveTPUEmbeddingFrequencyEstimatorParameters(Operation operation) { ) public static RetrieveTPUEmbeddingFrequencyEstimatorParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingFrequencyEstimatorParameters", scope.makeOpName("RetrieveTPUEmbeddingFrequencyEstimatorParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingFrequencyEstimatorParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java index dfa83fc4ee2..a2911a8b4bd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java @@ -66,7 +66,7 @@ private RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation ) public static RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java index f2fae0ede92..60545e4ee6c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java @@ -54,7 +54,7 @@ private StatelessRandomGetAlg(Operation operation) { describeByClass = true ) public static StatelessRandomGetAlg create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomGetAlg", scope.makeOpName("StatelessRandomGetAlg")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomGetAlg")); opBuilder = scope.apply(opBuilder); return new StatelessRandomGetAlg(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java index 96e06befdc7..cf481a59478 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java @@ -60,7 +60,7 @@ private StatelessRandomGetKeyCounter(Operation operation) { describeByClass = true ) public static StatelessRandomGetKeyCounter create(Scope scope, Operand seed) { - OperationBuilder opBuilder = scope.env().opBuilder("StatelessRandomGetKeyCounter", scope.makeOpName("StatelessRandomGetKeyCounter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatelessRandomGetKeyCounter")); opBuilder.addInput(seed.asOutput()); opBuilder = scope.apply(opBuilder); return new StatelessRandomGetKeyCounter(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java index 19b042663bd..29a7dcdd9c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java @@ -57,7 +57,7 @@ private RiscAbs(Operation operation) { describeByClass = true ) public static RiscAbs create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscAbs", scope.makeOpName("RiscAbs")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscAbs")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscAbs<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java index 33a9471e18b..406b45a4711 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java @@ -61,7 +61,7 @@ private RiscAdd(Operation operation) { describeByClass = true ) public static RiscAdd create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscAdd", scope.makeOpName("RiscAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscAdd")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java index 94c493dcea1..62213143018 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java @@ -60,7 +60,7 @@ private RiscBinaryArithmetic(Operation operation) { ) public static RiscBinaryArithmetic create(Scope scope, Operand x, Operand y, String opType) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscBinaryArithmetic", scope.makeOpName("RiscBinaryArithmetic")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscBinaryArithmetic")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java index 8ef2fcd4e79..4409d5229d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java @@ -59,7 +59,7 @@ private RiscBinaryComparison(Operation operation) { ) public static RiscBinaryComparison create(Scope scope, Operand x, Operand y, String opType) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscBinaryComparison", scope.makeOpName("RiscBinaryComparison")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscBinaryComparison")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java index 9692477dfd4..ed3ea2df3b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java @@ -60,7 +60,7 @@ private RiscBitcast(Operation operation) { ) public static RiscBitcast create(Scope scope, Operand x, Class DstT) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscBitcast", scope.makeOpName("RiscBitcast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscBitcast")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("DstT", Operands.toDataType(DstT)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java index b8f1bfe7086..54f7c313839 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java @@ -60,7 +60,7 @@ private RiscBroadcast(Operation operation) { ) public static RiscBroadcast create(Scope scope, Operand input, Operand shape) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscBroadcast", scope.makeOpName("RiscBroadcast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscBroadcast")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java index fcdf9c65520..c998c319f04 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java @@ -60,7 +60,7 @@ private RiscCast(Operation operation) { ) public static RiscCast create(Scope scope, Operand x, Class DstT) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscCast", scope.makeOpName("RiscCast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscCast")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("DstT", Operands.toDataType(DstT)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java index cff47bfb7aa..b42a4c7fc20 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java @@ -57,7 +57,7 @@ private RiscCeil(Operation operation) { describeByClass = true ) public static RiscCeil create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscCeil", scope.makeOpName("RiscCeil")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscCeil")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscCeil<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java index a862bfda954..01fd5eda433 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java @@ -57,7 +57,7 @@ private RiscCholesky(Operation operation) { describeByClass = true ) public static RiscCholesky create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscCholesky", scope.makeOpName("RiscCholesky")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscCholesky")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscCholesky<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java index c5432006fcd..60b42cbd7a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java @@ -61,7 +61,7 @@ private RiscConcat(Operation operation) { ) public static RiscConcat create(Scope scope, Iterable> values, Operand axis) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscConcat", scope.makeOpName("RiscConcat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscConcat")); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java index 7f320f37311..327010ddf5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java @@ -63,7 +63,7 @@ private RiscConv(Operation operation) { ) public static RiscConv create(Scope scope, Operand input, Operand filter, List strides, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscConv", scope.makeOpName("RiscConv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscConv")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(filter.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java index f81bc3b4040..69ce9934490 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java @@ -57,7 +57,7 @@ private RiscCos(Operation operation) { describeByClass = true ) public static RiscCos create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscCos", scope.makeOpName("RiscCos")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscCos")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscCos<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java index 621e4213f56..abaa5fb8866 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java @@ -58,7 +58,7 @@ private RiscDiv(Operation operation) { describeByClass = true ) public static RiscDiv create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscDiv", scope.makeOpName("RiscDiv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscDiv")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java index 650d2db5157..f644c8aaf65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java @@ -60,7 +60,7 @@ private RiscDot(Operation operation) { ) public static RiscDot create(Scope scope, Operand a, Operand b, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscDot", scope.makeOpName("RiscDot")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscDot")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java index ab6b1388b52..0d1ab7791ea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java @@ -57,7 +57,7 @@ private RiscExp(Operation operation) { describeByClass = true ) public static RiscExp create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscExp", scope.makeOpName("RiscExp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscExp")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscExp<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java index 9096bcb2f33..020983224bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java @@ -57,7 +57,7 @@ private RiscFft(Operation operation) { describeByClass = true ) public static RiscFft create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscFft", scope.makeOpName("RiscFft")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscFft")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscFft<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java index ceac25a2609..8b0c26e14a3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java @@ -57,7 +57,7 @@ private RiscFloor(Operation operation) { describeByClass = true ) public static RiscFloor create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscFloor", scope.makeOpName("RiscFloor")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscFloor")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscFloor<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java index 9b6f39aa2ac..f28efc507df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java @@ -62,7 +62,7 @@ private RiscGather(Operation operation) { ) public static RiscGather create(Scope scope, Operand params, Operand indices, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscGather", scope.makeOpName("RiscGather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscGather")); opBuilder.addInput(params.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(axis.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java index ba7c79dd7e7..a6ba50a85c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java @@ -62,7 +62,7 @@ private RiscImag(Operation operation) { ) public static RiscImag create(Scope scope, Operand input, Class Tout) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscImag", scope.makeOpName("RiscImag")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscImag")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Tout", Operands.toDataType(Tout)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java index d9f88c6006a..be1c544f909 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java @@ -55,7 +55,7 @@ private RiscIsFinite(Operation operation) { describeByClass = true ) public static RiscIsFinite create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscIsFinite", scope.makeOpName("RiscIsFinite")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscIsFinite")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscIsFinite(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java index 97fac206298..c089cf21012 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java @@ -57,7 +57,7 @@ private RiscLog(Operation operation) { describeByClass = true ) public static RiscLog create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscLog", scope.makeOpName("RiscLog")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscLog")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscLog<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java index 71ea8855546..ee63434558c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java @@ -55,7 +55,7 @@ private RiscLogicalAnd(Operation operation) { describeByClass = true ) public static RiscLogicalAnd create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscLogicalAnd", scope.makeOpName("RiscLogicalAnd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscLogicalAnd")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java index 053a64376a5..478c6cf0ca7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java @@ -54,7 +54,7 @@ private RiscLogicalNot(Operation operation) { describeByClass = true ) public static RiscLogicalNot create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscLogicalNot", scope.makeOpName("RiscLogicalNot")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscLogicalNot")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscLogicalNot(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java index e3ffe0141f2..0b59472b63c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java @@ -55,7 +55,7 @@ private RiscLogicalOr(Operation operation) { describeByClass = true ) public static RiscLogicalOr create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscLogicalOr", scope.makeOpName("RiscLogicalOr")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscLogicalOr")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java index f036460b85e..7c04002ff3c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java @@ -60,7 +60,7 @@ private RiscMax(Operation operation) { describeByClass = true ) public static RiscMax create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscMax", scope.makeOpName("RiscMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscMax")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java index 259bc140e93..a2ad19c04bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java @@ -58,7 +58,7 @@ private RiscMin(Operation operation) { describeByClass = true ) public static RiscMin create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscMin", scope.makeOpName("RiscMin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscMin")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java index 16518324a8d..d72af9fceec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java @@ -58,7 +58,7 @@ private RiscMul(Operation operation) { describeByClass = true ) public static RiscMul create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscMul", scope.makeOpName("RiscMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscMul")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java index 8b2833592c1..4fd5d508102 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java @@ -57,7 +57,7 @@ private RiscNeg(Operation operation) { describeByClass = true ) public static RiscNeg create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscNeg", scope.makeOpName("RiscNeg")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscNeg")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscNeg<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java index 34dfbd72afe..ddf537f8788 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java @@ -60,7 +60,7 @@ private RiscPad(Operation operation) { ) public static RiscPad create(Scope scope, Operand input, Operand paddings, Operand constantValues) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscPad", scope.makeOpName("RiscPad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscPad")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(paddings.asOutput()); opBuilder.addInput(constantValues.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java index 83762058d15..2f9a14c0ed2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java @@ -63,7 +63,7 @@ private RiscPool(Operation operation) { ) public static RiscPool create(Scope scope, Operand value, List ksize, List strides, String poolingType, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscPool", scope.makeOpName("RiscPool")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscPool")); opBuilder.addInput(value.asOutput()); opBuilder = scope.apply(opBuilder); long[] ksizeArray = new long[ksize.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java index 657dc0dd80a..7b7a991c662 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java @@ -58,7 +58,7 @@ private RiscPow(Operation operation) { describeByClass = true ) public static RiscPow create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscPow", scope.makeOpName("RiscPow")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscPow")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java index af8f26b7802..4dfdc4d9f8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java @@ -57,7 +57,7 @@ private RiscRandomUniform(Operation operation) { ) public static RiscRandomUniform create(Scope scope, Operand shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscRandomUniform", scope.makeOpName("RiscRandomUniform")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscRandomUniform")); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java index 962dde9f7c2..0e15a35ff5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java @@ -62,7 +62,7 @@ private RiscReal(Operation operation) { ) public static RiscReal create(Scope scope, Operand input, Class Tout) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscReal", scope.makeOpName("RiscReal")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscReal")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Tout", Operands.toDataType(Tout)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java index f8a66eff454..813bd04c4bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java @@ -60,7 +60,7 @@ private RiscReduce(Operation operation) { ) public static RiscReduce create(Scope scope, Operand tensor, Operand axis, String reduceType) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscReduce", scope.makeOpName("RiscReduce")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscReduce")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java index 3cd94a30933..8efdc93dd16 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java @@ -58,7 +58,7 @@ private RiscRem(Operation operation) { describeByClass = true ) public static RiscRem create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscRem", scope.makeOpName("RiscRem")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscRem")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java index 8e0690bf7c1..e102911a274 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java @@ -59,7 +59,7 @@ private RiscReshape(Operation operation) { ) public static RiscReshape create(Scope scope, Operand tensor, Operand shape) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscReshape", scope.makeOpName("RiscReshape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscReshape")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(shape.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java index faacbbc2e0d..1717c78dea0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java @@ -59,7 +59,7 @@ private RiscReverse(Operation operation) { ) public static RiscReverse create(Scope scope, Operand tensor, Operand axis) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscReverse", scope.makeOpName("RiscReverse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscReverse")); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java index 6b412dc25d4..caa17adc322 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java @@ -61,7 +61,7 @@ private RiscScatter(Operation operation) { ) public static RiscScatter create(Scope scope, Operand indices, Operand updates, Operand shape) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscScatter", scope.makeOpName("RiscScatter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscScatter")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java index 0075fbabc2e..6e9fb8238eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java @@ -61,7 +61,7 @@ private RiscShape(Operation operation) { ) public static RiscShape create(Scope scope, Operand input, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscShape", scope.makeOpName("RiscShape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscShape")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("out_type", Operands.toDataType(outType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java index c4fbabe08ff..4cd3a6031b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java @@ -57,7 +57,7 @@ private RiscSign(Operation operation) { describeByClass = true ) public static RiscSign create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscSign", scope.makeOpName("RiscSign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscSign")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); return new RiscSign<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java index cbeb1e65a9d..7d6fdea814c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java @@ -61,7 +61,7 @@ private RiscSlice(Operation operation) { ) public static RiscSlice create(Scope scope, Operand input, Operand begin, Operand sizeOutput) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscSlice", scope.makeOpName("RiscSlice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscSlice")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(begin.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java index 54ed2a79e69..3739958cf2c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java @@ -60,7 +60,7 @@ private RiscSort(Operation operation) { ) public static RiscSort create(Scope scope, Operand input, Operand axis, String direction) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscSort", scope.makeOpName("RiscSort")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscSort")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(axis.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java index 1e14f2ea36d..4f40fdb43e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java @@ -61,7 +61,7 @@ private RiscSqueeze(Operation operation) { ) public static RiscSqueeze create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscSqueeze", scope.makeOpName("RiscSqueeze")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscSqueeze")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java index 4f1a9515914..d83078cf01d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java @@ -58,7 +58,7 @@ private RiscSub(Operation operation) { describeByClass = true ) public static RiscSub create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscSub", scope.makeOpName("RiscSub")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscSub")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java index 27a0a2879fc..a993d61e780 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java @@ -60,7 +60,7 @@ private RiscTranspose(Operation operation) { ) public static RiscTranspose create(Scope scope, Operand x, Operand perm) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscTranspose", scope.makeOpName("RiscTranspose")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscTranspose")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(perm.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java index 000b5c22b66..839392b2112 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java @@ -60,7 +60,7 @@ private RiscTriangularSolve(Operation operation) { ) public static RiscTriangularSolve create(Scope scope, Operand matrix, Operand rhs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscTriangularSolve", scope.makeOpName("RiscTriangularSolve")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscTriangularSolve")); opBuilder.addInput(matrix.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java index ef530d473a4..2931d94605a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java @@ -58,7 +58,7 @@ private RiscUnary(Operation operation) { describeByClass = true ) public static RiscUnary create(Scope scope, Operand x, String opType) { - OperationBuilder opBuilder = scope.env().opBuilder("RiscUnary", scope.makeOpName("RiscUnary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RiscUnary")); opBuilder.addInput(x.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("op_type", opType); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft.java index 4aa82a29d13..15b4c6d4c15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft.java @@ -59,7 +59,7 @@ private BatchFft(Operation operation) { describeByClass = true ) public static BatchFft create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchFFT", scope.makeOpName("BatchFft")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchFft")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchFft(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft2d.java index d35c420a4fc..86eeb226d49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft2d.java @@ -59,7 +59,7 @@ private BatchFft2d(Operation operation) { describeByClass = true ) public static BatchFft2d create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchFFT2D", scope.makeOpName("BatchFft2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchFft2d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchFft2d(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft3d.java index cec014617f6..3282b91163e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft3d.java @@ -59,7 +59,7 @@ private BatchFft3d(Operation operation) { describeByClass = true ) public static BatchFft3d create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchFFT3D", scope.makeOpName("BatchFft3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchFft3d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchFft3d(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft.java index ddd64123e1f..b14da1814ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft.java @@ -59,7 +59,7 @@ private BatchIfft(Operation operation) { describeByClass = true ) public static BatchIfft create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchIFFT", scope.makeOpName("BatchIfft")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchIfft")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchIfft(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft2d.java index 162d4111ec7..5b660ab4717 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft2d.java @@ -59,7 +59,7 @@ private BatchIfft2d(Operation operation) { describeByClass = true ) public static BatchIfft2d create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchIFFT2D", scope.makeOpName("BatchIfft2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchIfft2d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchIfft2d(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft3d.java index a3b30fcfa99..de17e51457d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft3d.java @@ -59,7 +59,7 @@ private BatchIfft3d(Operation operation) { describeByClass = true ) public static BatchIfft3d create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchIFFT3D", scope.makeOpName("BatchIfft3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchIfft3d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new BatchIfft3d(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java index 70f3d26624f..70a615edc54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java @@ -63,7 +63,7 @@ private Fft(Operation operation) { describeByClass = true ) public static Fft create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("FFT", scope.makeOpName("Fft")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Fft")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Fft<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java index 11269998d38..2a9e532fed5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java @@ -63,7 +63,7 @@ private Fft2d(Operation operation) { describeByClass = true ) public static Fft2d create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("FFT2D", scope.makeOpName("Fft2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Fft2d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Fft2d<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java index 034937793d3..6dffbd658d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java @@ -63,7 +63,7 @@ private Fft3d(Operation operation) { describeByClass = true ) public static Fft3d create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("FFT3D", scope.makeOpName("Fft3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Fft3d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Fft3d<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java index aee45785d2b..fe2a1572a49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java @@ -63,7 +63,7 @@ private Ifft(Operation operation) { describeByClass = true ) public static Ifft create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("IFFT", scope.makeOpName("Ifft")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Ifft")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Ifft<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java index 3e16e93f3b4..34a73ec59f8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java @@ -63,7 +63,7 @@ private Ifft2d(Operation operation) { describeByClass = true ) public static Ifft2d create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("IFFT2D", scope.makeOpName("Ifft2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Ifft2d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Ifft2d<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java index 518b8123129..b126e65bb71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java @@ -63,7 +63,7 @@ private Ifft3d(Operation operation) { describeByClass = true ) public static Ifft3d create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("IFFT3D", scope.makeOpName("Ifft3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Ifft3d")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Ifft3d<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java index d60e3507876..1246e75df6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java @@ -79,7 +79,7 @@ private Irfft(Operation operation) { ) public static Irfft create(Scope scope, Operand input, Operand fftLength, Class Treal) { - OperationBuilder opBuilder = scope.env().opBuilder("IRFFT", scope.makeOpName("Irfft")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Irfft")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(fftLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java index 944cb75feda..bd7239a5398 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java @@ -80,7 +80,7 @@ private Irfft2d(Operation operation) { ) public static Irfft2d create(Scope scope, Operand input, Operand fftLength, Class Treal) { - OperationBuilder opBuilder = scope.env().opBuilder("IRFFT2D", scope.makeOpName("Irfft2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Irfft2d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(fftLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java index 1e09ac9f401..9dde73296c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java @@ -80,7 +80,7 @@ private Irfft3d(Operation operation) { ) public static Irfft3d create(Scope scope, Operand input, Operand fftLength, Class Treal) { - OperationBuilder opBuilder = scope.env().opBuilder("IRFFT3D", scope.makeOpName("Irfft3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Irfft3d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(fftLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java index 36651f98d4a..08284d88051 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java @@ -75,7 +75,7 @@ private Rfft(Operation operation) { ) public static Rfft create(Scope scope, Operand input, Operand fftLength, Class Tcomplex) { - OperationBuilder opBuilder = scope.env().opBuilder("RFFT", scope.makeOpName("Rfft")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Rfft")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(fftLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java index 04fb89f3ec8..e48596b4329 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java @@ -76,7 +76,7 @@ private Rfft2d(Operation operation) { ) public static Rfft2d create(Scope scope, Operand input, Operand fftLength, Class Tcomplex) { - OperationBuilder opBuilder = scope.env().opBuilder("RFFT2D", scope.makeOpName("Rfft2d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Rfft2d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(fftLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java index 7786b8003ad..028fdf53c7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java @@ -76,7 +76,7 @@ private Rfft3d(Operation operation) { ) public static Rfft3d create(Scope scope, Operand input, Operand fftLength, Class Tcomplex) { - OperationBuilder opBuilder = scope.env().opBuilder("RFFT3D", scope.makeOpName("Rfft3d")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Rfft3d")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(fftLength.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java index 69cdfdc804f..3763cc869ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java @@ -83,7 +83,7 @@ private AddManySparseToTensorsMap(Operation operation) { ) public static AddManySparseToTensorsMap create(Scope scope, Operand sparseIndices, Operand sparseValues, Operand sparseShape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AddManySparseToTensorsMap", scope.makeOpName("AddManySparseToTensorsMap")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AddManySparseToTensorsMap")); opBuilder.addInput(sparseIndices.asOutput()); opBuilder.addInput(sparseValues.asOutput()); opBuilder.addInput(sparseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java index f8e7db00640..3a3e7a36687 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java @@ -75,7 +75,7 @@ private AddSparseToTensorsMap(Operation operation) { ) public static AddSparseToTensorsMap create(Scope scope, Operand sparseIndices, Operand sparseValues, Operand sparseShape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AddSparseToTensorsMap", scope.makeOpName("AddSparseToTensorsMap")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AddSparseToTensorsMap")); opBuilder.addInput(sparseIndices.asOutput()); opBuilder.addInput(sparseValues.asOutput()); opBuilder.addInput(sparseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java index b388165cf92..50c007410a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java @@ -71,7 +71,7 @@ private DenseCountSparseOutput(Operation operation) { public static DenseCountSparseOutput create(Scope scope, Operand values, Operand weights, Boolean binaryOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DenseCountSparseOutput", scope.makeOpName("DenseCountSparseOutput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DenseCountSparseOutput")); opBuilder.addInput(values.asOutput()); opBuilder.addInput(weights.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java index 56c3e4f1fb4..3bdf87ac4cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java @@ -80,7 +80,7 @@ private DenseToDenseSetOperation(Operation operation) { ) public static DenseToDenseSetOperation create(Scope scope, Operand set1, Operand set2, String setOperation, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DenseToDenseSetOperation", scope.makeOpName("DenseToDenseSetOperation")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DenseToDenseSetOperation")); opBuilder.addInput(set1.asOutput()); opBuilder.addInput(set2.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java index a960a1b9217..e00e1ca536a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java @@ -92,7 +92,7 @@ private DenseToSparseSetOperation(Operation operation) { public static DenseToSparseSetOperation create(Scope scope, Operand set1, Operand set2Indices, Operand set2Values, Operand set2Shape, String setOperation, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("DenseToSparseSetOperation", scope.makeOpName("DenseToSparseSetOperation")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DenseToSparseSetOperation")); opBuilder.addInput(set1.asOutput()); opBuilder.addInput(set2Indices.asOutput()); opBuilder.addInput(set2Values.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java index 710cee3d76a..fb60e31778d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java @@ -111,7 +111,7 @@ private DeserializeSparse(Operation operation) { ) public static DeserializeSparse create(Scope scope, Operand serializedSparse, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("DeserializeSparse", scope.makeOpName("DeserializeSparse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DeserializeSparse")); opBuilder.addInput(serializedSparse.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorApplyGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorApplyGradient.java index 993fcbf4bcf..d224c39509f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorApplyGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorApplyGradient.java @@ -69,7 +69,7 @@ public static SparseAccumulatorApplyGradient create(Scope scope, Operand localStep, Operand gradientIndices, Operand gradientValues, Operand gradientShape, Boolean hasKnownShape) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseAccumulatorApplyGradient", scope.makeOpName("SparseAccumulatorApplyGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseAccumulatorApplyGradient")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(localStep.asOutput()); opBuilder.addInput(gradientIndices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java index ffa4e52430f..896d1ebd74a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java @@ -81,7 +81,7 @@ private SparseAccumulatorTakeGradient(Operation operation) { ) public static SparseAccumulatorTakeGradient create(Scope scope, Operand handle, Operand numRequired, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseAccumulatorTakeGradient", scope.makeOpName("SparseAccumulatorTakeGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseAccumulatorTakeGradient")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(numRequired.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java index d7f3a908a6f..fca9b69819f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java @@ -89,7 +89,7 @@ private SparseAdd(Operation operation) { public static SparseAdd create(Scope scope, Operand aIndices, Operand aValues, Operand aShape, Operand bIndices, Operand bValues, Operand bShape, Operand thresh) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseAdd", scope.makeOpName("SparseAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseAdd")); opBuilder.addInput(aIndices.asOutput()); opBuilder.addInput(aValues.asOutput()); opBuilder.addInput(aShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java index 33c0d798313..248cecd3126 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java @@ -75,7 +75,7 @@ private SparseAddGrad(Operation operation) { ) public static SparseAddGrad create(Scope scope, Operand backpropValGrad, Operand aIndices, Operand bIndices, Operand sumIndices) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseAddGrad", scope.makeOpName("SparseAddGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseAddGrad")); opBuilder.addInput(backpropValGrad.asOutput()); opBuilder.addInput(aIndices.asOutput()); opBuilder.addInput(bIndices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java index f9eb554f0fb..c30dacda188 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java @@ -78,7 +78,7 @@ private SparseBincount(Operation operation) { public static SparseBincount create(Scope scope, Operand indices, Operand values, Operand denseShape, Operand sizeOutput, Operand weights, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseBincount", scope.makeOpName("SparseBincount")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseBincount")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(denseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java index d9dd3a19d20..152e475368f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java @@ -112,7 +112,7 @@ private SparseConcat(Operation operation) { public static SparseConcat create(Scope scope, Iterable> indices, Iterable> values, Iterable> shapes, Long concatDim) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseConcat", scope.makeOpName("SparseConcat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseConcat")); opBuilder.addInputList(Operands.asOutputs(indices)); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder.addInputList(Operands.asOutputs(shapes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConditionalAccumulator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConditionalAccumulator.java index 908b8a67212..2d2b5509fbc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConditionalAccumulator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConditionalAccumulator.java @@ -71,7 +71,7 @@ private SparseConditionalAccumulator(Operation operation) { ) public static SparseConditionalAccumulator create(Scope scope, Class dtype, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseConditionalAccumulator", scope.makeOpName("SparseConditionalAccumulator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseConditionalAccumulator")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java index 945f357b277..509f91943cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java @@ -73,7 +73,7 @@ private SparseCountSparseOutput(Operation operation) { public static SparseCountSparseOutput create(Scope scope, Operand indices, Operand values, Operand denseShape, Operand weights, Boolean binaryOutput, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseCountSparseOutput", scope.makeOpName("SparseCountSparseOutput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseCountSparseOutput")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(denseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCross.java index 24c2cb32e9c..8e48ce89324 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCross.java @@ -108,7 +108,7 @@ private SparseCross(Operation operation) { public static SparseCross create(Scope scope, Iterable> indices, Iterable> values, Iterable> shapes, Iterable> denseInputs, Operand sep) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseCrossV2", scope.makeOpName("SparseCross")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseCross")); opBuilder.addInputList(Operands.asOutputs(indices)); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder.addInputList(Operands.asOutputs(shapes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCrossHashed.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCrossHashed.java index cf1f10e8a77..39373955fde 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCrossHashed.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCrossHashed.java @@ -112,7 +112,7 @@ public static SparseCrossHashed create(Scope scope, Iterable> in Iterable> values, Iterable> shapes, Iterable> denseInputs, Operand numBuckets, Operand strongHash, Operand salt) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseCrossHashed", scope.makeOpName("SparseCrossHashed")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseCrossHashed")); opBuilder.addInputList(Operands.asOutputs(indices)); opBuilder.addInputList(Operands.asOutputs(values)); opBuilder.addInputList(Operands.asOutputs(shapes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java index 9056e35cc1d..5b90d60145f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java @@ -74,7 +74,7 @@ private SparseDenseCwiseAdd(Operation operation) { ) public static SparseDenseCwiseAdd create(Scope scope, Operand spIndices, Operand spValues, Operand spShape, Operand dense) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseDenseCwiseAdd", scope.makeOpName("SparseDenseCwiseAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseDenseCwiseAdd")); opBuilder.addInput(spIndices.asOutput()); opBuilder.addInput(spValues.asOutput()); opBuilder.addInput(spShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java index 10e57e9d2ef..26276a5729f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java @@ -69,7 +69,7 @@ private SparseDenseCwiseDiv(Operation operation) { ) public static SparseDenseCwiseDiv create(Scope scope, Operand spIndices, Operand spValues, Operand spShape, Operand dense) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseDenseCwiseDiv", scope.makeOpName("SparseDenseCwiseDiv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseDenseCwiseDiv")); opBuilder.addInput(spIndices.asOutput()); opBuilder.addInput(spValues.asOutput()); opBuilder.addInput(spShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java index 87fe9de55e0..bdd20f9da24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java @@ -72,7 +72,7 @@ private SparseDenseCwiseMul(Operation operation) { ) public static SparseDenseCwiseMul create(Scope scope, Operand spIndices, Operand spValues, Operand spShape, Operand dense) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseDenseCwiseMul", scope.makeOpName("SparseDenseCwiseMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseDenseCwiseMul")); opBuilder.addInput(spIndices.asOutput()); opBuilder.addInput(spValues.asOutput()); opBuilder.addInput(spShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java index b175002e7e0..e63b03b97ea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java @@ -113,7 +113,7 @@ private SparseFillEmptyRows(Operation operation) { public static SparseFillEmptyRows create(Scope scope, Operand indices, Operand values, Operand denseShape, Operand defaultValue) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseFillEmptyRows", scope.makeOpName("SparseFillEmptyRows")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseFillEmptyRows")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(denseShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java index 4e4056ad334..3733bf3eeab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java @@ -74,7 +74,7 @@ private SparseFillEmptyRowsGrad(Operation operation) { ) public static SparseFillEmptyRowsGrad create(Scope scope, Operand reverseIndexMap, Operand gradValues) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseFillEmptyRowsGrad", scope.makeOpName("SparseFillEmptyRowsGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseFillEmptyRowsGrad")); opBuilder.addInput(reverseIndexMap.asOutput()); opBuilder.addInput(gradValues.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseMatMul.java index 143221bf47d..a7b3ea3aa7f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseMatMul.java @@ -70,7 +70,7 @@ private SparseMatMul(Operation operation) { ) public static SparseMatMul create(Scope scope, Operand a, Operand b, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseMatMul", scope.makeOpName("SparseMatMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseMatMul")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java index e903970abdf..e76ad7af8e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java @@ -80,7 +80,7 @@ private SparseReduceMax(Operation operation) { public static SparseReduceMax create(Scope scope, Operand inputIndices, Operand inputValues, Operand inputShape, Operand reductionAxes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseReduceMax", scope.makeOpName("SparseReduceMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseReduceMax")); opBuilder.addInput(inputIndices.asOutput()); opBuilder.addInput(inputValues.asOutput()); opBuilder.addInput(inputShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java index 7861f41c263..85626a38039 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java @@ -86,7 +86,7 @@ private SparseReduceMaxSparse(Operation operation) { public static SparseReduceMaxSparse create(Scope scope, Operand inputIndices, Operand inputValues, Operand inputShape, Operand reductionAxes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseReduceMaxSparse", scope.makeOpName("SparseReduceMaxSparse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseReduceMaxSparse")); opBuilder.addInput(inputIndices.asOutput()); opBuilder.addInput(inputValues.asOutput()); opBuilder.addInput(inputShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java index 60edd502866..a047dc998ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java @@ -80,7 +80,7 @@ private SparseReduceSum(Operation operation) { public static SparseReduceSum create(Scope scope, Operand inputIndices, Operand inputValues, Operand inputShape, Operand reductionAxes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseReduceSum", scope.makeOpName("SparseReduceSum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseReduceSum")); opBuilder.addInput(inputIndices.asOutput()); opBuilder.addInput(inputValues.asOutput()); opBuilder.addInput(inputShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java index 5aa1b160680..ebc2f1889fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java @@ -86,7 +86,7 @@ private SparseReduceSumSparse(Operation operation) { public static SparseReduceSumSparse create(Scope scope, Operand inputIndices, Operand inputValues, Operand inputShape, Operand reductionAxes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseReduceSumSparse", scope.makeOpName("SparseReduceSumSparse")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseReduceSumSparse")); opBuilder.addInput(inputIndices.asOutput()); opBuilder.addInput(inputValues.asOutput()); opBuilder.addInput(inputShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java index bb63b87263d..dba473df550 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java @@ -75,7 +75,7 @@ private SparseReorder(Operation operation) { ) public static SparseReorder create(Scope scope, Operand inputIndices, Operand inputValues, Operand inputShape) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseReorder", scope.makeOpName("SparseReorder")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseReorder")); opBuilder.addInput(inputIndices.asOutput()); opBuilder.addInput(inputValues.asOutput()); opBuilder.addInput(inputShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReshape.java index 06e3b505ae5..1ac246fd9e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReshape.java @@ -77,7 +77,7 @@ private SparseReshape(Operation operation) { ) public static SparseReshape create(Scope scope, Operand inputIndices, Operand inputShape, Operand newShape) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseReshape", scope.makeOpName("SparseReshape")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseReshape")); opBuilder.addInput(inputIndices.asOutput()); opBuilder.addInput(inputShape.asOutput()); opBuilder.addInput(newShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java index 6a47dd27947..85b840b16e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java @@ -67,7 +67,7 @@ private SparseSegmentMean(Operation operation) { ) public static SparseSegmentMean create(Scope scope, Operand data, Operand indices, Operand segmentIds) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSegmentMean", scope.makeOpName("SparseSegmentMean")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSegmentMean")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(segmentIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java index f6b1f87a832..d071f487d35 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java @@ -69,7 +69,7 @@ private SparseSegmentMeanGrad(Operation operation) { public static SparseSegmentMeanGrad create(Scope scope, Operand grad, Operand indices, Operand segmentIds, Operand outputDim0) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSegmentMeanGrad", scope.makeOpName("SparseSegmentMeanGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSegmentMeanGrad")); opBuilder.addInput(grad.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(segmentIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java index b39c32e74f3..154c324b5bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java @@ -71,7 +71,7 @@ private SparseSegmentMeanWithNumSegments(Operation operation) { public static SparseSegmentMeanWithNumSegments create(Scope scope, Operand data, Operand indices, Operand segmentIds, Operand numSegments) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSegmentMeanWithNumSegments", scope.makeOpName("SparseSegmentMeanWithNumSegments")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSegmentMeanWithNumSegments")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(segmentIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java index ed7eae4bcce..0e984d8faa7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java @@ -66,7 +66,7 @@ private SparseSegmentSqrtN(Operation operation) { ) public static SparseSegmentSqrtN create(Scope scope, Operand data, Operand indices, Operand segmentIds) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSegmentSqrtN", scope.makeOpName("SparseSegmentSqrtN")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSegmentSqrtN")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(segmentIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java index 7322722e355..db99f27f376 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java @@ -69,7 +69,7 @@ private SparseSegmentSqrtNGrad(Operation operation) { public static SparseSegmentSqrtNGrad create(Scope scope, Operand grad, Operand indices, Operand segmentIds, Operand outputDim0) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSegmentSqrtNGrad", scope.makeOpName("SparseSegmentSqrtNGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSegmentSqrtNGrad")); opBuilder.addInput(grad.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(segmentIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java index f3d8174ea8b..2f936493b98 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java @@ -72,7 +72,7 @@ private SparseSegmentSqrtNWithNumSegments(Operation operation) { public static SparseSegmentSqrtNWithNumSegments create(Scope scope, Operand data, Operand indices, Operand segmentIds, Operand numSegments) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSegmentSqrtNWithNumSegments", scope.makeOpName("SparseSegmentSqrtNWithNumSegments")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSegmentSqrtNWithNumSegments")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(segmentIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java index 80a224d44b1..2df68c339df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java @@ -90,7 +90,7 @@ private SparseSegmentSum(Operation operation) { ) public static SparseSegmentSum create(Scope scope, Operand data, Operand indices, Operand segmentIds) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSegmentSum", scope.makeOpName("SparseSegmentSum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSegmentSum")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(segmentIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java index 5a704742473..8cc379497d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java @@ -90,7 +90,7 @@ private SparseSegmentSumWithNumSegments(Operation operation) { public static SparseSegmentSumWithNumSegments create(Scope scope, Operand data, Operand indices, Operand segmentIds, Operand numSegments) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSegmentSumWithNumSegments", scope.makeOpName("SparseSegmentSumWithNumSegments")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSegmentSumWithNumSegments")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(segmentIds.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java index 0c4d117518b..6e026b471fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java @@ -91,7 +91,7 @@ private SparseSlice(Operation operation) { ) public static SparseSlice create(Scope scope, Operand indices, Operand values, Operand shape, Operand start, Operand sizeOutput) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSlice", scope.makeOpName("SparseSlice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSlice")); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder.addInput(shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java index f66672e967a..06fae6bca46 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java @@ -70,7 +70,7 @@ private SparseSliceGrad(Operation operation) { ) public static SparseSliceGrad create(Scope scope, Operand backpropValGrad, Operand inputIndices, Operand inputStart, Operand outputIndices) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSliceGrad", scope.makeOpName("SparseSliceGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSliceGrad")); opBuilder.addInput(backpropValGrad.asOutput()); opBuilder.addInput(inputIndices.asOutput()); opBuilder.addInput(inputStart.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java index 8278006e830..75fc6fa5b76 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java @@ -78,7 +78,7 @@ private SparseSoftmax(Operation operation) { ) public static SparseSoftmax create(Scope scope, Operand spIndices, Operand spValues, Operand spShape) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSoftmax", scope.makeOpName("SparseSoftmax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSoftmax")); opBuilder.addInput(spIndices.asOutput()); opBuilder.addInput(spValues.asOutput()); opBuilder.addInput(spShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java index ec699a3a53c..8b26b4d31c3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java @@ -74,7 +74,7 @@ private SparseSparseMaximum(Operation operation) { public static SparseSparseMaximum create(Scope scope, Operand aIndices, Operand aValues, Operand aShape, Operand bIndices, Operand bValues, Operand bShape) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSparseMaximum", scope.makeOpName("SparseSparseMaximum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSparseMaximum")); opBuilder.addInput(aIndices.asOutput()); opBuilder.addInput(aValues.asOutput()); opBuilder.addInput(aShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java index 8483cbae9c4..d4b4102b8b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java @@ -74,7 +74,7 @@ private SparseSparseMinimum(Operation operation) { public static SparseSparseMinimum create(Scope scope, Operand aIndices, Operand aValues, Operand aShape, Operand bIndices, Operand bValues, Operand bShape) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSparseMinimum", scope.makeOpName("SparseSparseMinimum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSparseMinimum")); opBuilder.addInput(aIndices.asOutput()); opBuilder.addInput(aValues.asOutput()); opBuilder.addInput(aShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java index bc7eccb7dbf..35da11942be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java @@ -103,7 +103,7 @@ private SparseSplit(Operation operation) { ) public static SparseSplit create(Scope scope, Operand splitDim, Operand indices, Operand values, Operand shape, Long numSplit) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSplit", scope.makeOpName("SparseSplit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSplit")); opBuilder.addInput(splitDim.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(values.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java index 3131109d49f..81261298396 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java @@ -68,7 +68,7 @@ private SparseTensorDenseAdd(Operation operation) { ) public static SparseTensorDenseAdd create(Scope scope, Operand aIndices, Operand aValues, Operand aShape, Operand b) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseTensorDenseAdd", scope.makeOpName("SparseTensorDenseAdd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseTensorDenseAdd")); opBuilder.addInput(aIndices.asOutput()); opBuilder.addInput(aValues.asOutput()); opBuilder.addInput(aShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java index e11260f527e..4c89fcc00d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java @@ -77,7 +77,7 @@ private SparseTensorDenseMatMul(Operation operation) { public static SparseTensorDenseMatMul create(Scope scope, Operand aIndices, Operand aValues, Operand aShape, Operand b, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseTensorDenseMatMul", scope.makeOpName("SparseTensorDenseMatMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseTensorDenseMatMul")); opBuilder.addInput(aIndices.asOutput()); opBuilder.addInput(aValues.asOutput()); opBuilder.addInput(aShape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java index 559fe3b7819..6d24b010ace 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java @@ -88,7 +88,7 @@ private SparseToDense(Operation operation) { public static SparseToDense create(Scope scope, Operand sparseIndices, Operand outputShape, Operand sparseValues, Operand defaultValue, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseToDense", scope.makeOpName("SparseToDense")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseToDense")); opBuilder.addInput(sparseIndices.asOutput()); opBuilder.addInput(outputShape.asOutput()); opBuilder.addInput(sparseValues.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java index 8e1ee3ffd54..ac968d5dce7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java @@ -104,7 +104,7 @@ public static SparseToSparseSetOperation create(Scope scope Operand set1Indices, Operand set1Values, Operand set1Shape, Operand set2Indices, Operand set2Values, Operand set2Shape, String setOperation, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseToSparseSetOperation", scope.makeOpName("SparseToSparseSetOperation")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseToSparseSetOperation")); opBuilder.addInput(set1Indices.asOutput()); opBuilder.addInput(set1Values.asOutput()); opBuilder.addInput(set1Shape.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java index 92a51d8e3c3..bc913bd875d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java @@ -114,7 +114,7 @@ private TakeManySparseFromTensorsMap(Operation operation) { ) public static TakeManySparseFromTensorsMap create(Scope scope, Operand sparseHandles, Class dtype, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TakeManySparseFromTensorsMap", scope.makeOpName("TakeManySparseFromTensorsMap")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TakeManySparseFromTensorsMap")); opBuilder.addInput(sparseHandles.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Join.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Join.java index 8e0f658be2a..a654b7428b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Join.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Join.java @@ -73,7 +73,7 @@ private Join(Operation operation) { describeByClass = true ) public static Join create(Scope scope, Iterable> inputs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StringJoin", scope.makeOpName("Join")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Join")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java index aca6e8ed033..e3e322c9a22 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java @@ -68,7 +68,7 @@ private Lower(Operation operation) { describeByClass = true ) public static Lower create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StringLower", scope.makeOpName("Lower")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Lower")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java index f468b2937bc..b7d16e40739 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java @@ -85,7 +85,7 @@ private ReduceJoin(Operation operation) { ) public static ReduceJoin create(Scope scope, Operand inputs, Operand reductionIndices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ReduceJoin", scope.makeOpName("ReduceJoin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReduceJoin")); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(reductionIndices.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexFullMatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexFullMatch.java index 91f0c94c7cd..e12d78a14fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexFullMatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexFullMatch.java @@ -77,7 +77,7 @@ private RegexFullMatch(Operation operation) { ) public static RegexFullMatch create(Scope scope, Operand input, Operand pattern) { - OperationBuilder opBuilder = scope.env().opBuilder("RegexFullMatch", scope.makeOpName("RegexFullMatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RegexFullMatch")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(pattern.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java index c87bdc150fc..f3c822e6665 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java @@ -65,7 +65,7 @@ private RegexReplace(Operation operation) { ) public static RegexReplace create(Scope scope, Operand input, Operand pattern, Operand rewrite, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RegexReplace", scope.makeOpName("RegexReplace")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RegexReplace")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(pattern.asOutput()); opBuilder.addInput(rewrite.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java index e54ef367f2c..02380fe50d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java @@ -61,7 +61,7 @@ private StaticRegexFullMatch(Operation operation) { describeByClass = true ) public static StaticRegexFullMatch create(Scope scope, Operand input, String pattern) { - OperationBuilder opBuilder = scope.env().opBuilder("StaticRegexFullMatch", scope.makeOpName("StaticRegexFullMatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StaticRegexFullMatch")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("pattern", pattern); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java index 230d9da7e8e..30d9eea653f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java @@ -59,7 +59,7 @@ private StaticRegexReplace(Operation operation) { ) public static StaticRegexReplace create(Scope scope, Operand input, String pattern, String rewrite, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StaticRegexReplace", scope.makeOpName("StaticRegexReplace")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StaticRegexReplace")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("pattern", pattern); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringFormat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringFormat.java index 14df380f308..22de96a3b4f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringFormat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringFormat.java @@ -61,7 +61,7 @@ private StringFormat(Operation operation) { describeByClass = true ) public static StringFormat create(Scope scope, Iterable> inputs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StringFormat", scope.makeOpName("StringFormat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StringFormat")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java index 39f5a62c604..5f2984ffcd4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java @@ -72,7 +72,7 @@ private StringLength(Operation operation) { describeByClass = true ) public static StringLength create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StringLength", scope.makeOpName("StringLength")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StringLength")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java index ee33d1cd20f..5c8b8d5142d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java @@ -84,7 +84,7 @@ private StringNGrams(Operation operation) { public static StringNGrams create(Scope scope, Operand data, Operand dataSplits, String separator, List ngramWidths, String leftPad, String rightPad, Long padWidth, Boolean preserveShortSequences) { - OperationBuilder opBuilder = scope.env().opBuilder("StringNGrams", scope.makeOpName("StringNGrams")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StringNGrams")); opBuilder.addInput(data.asOutput()); opBuilder.addInput(dataSplits.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringSplit.java index 1edadbaa03d..cf3975f9db5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringSplit.java @@ -89,7 +89,7 @@ private StringSplit(Operation operation) { ) public static StringSplit create(Scope scope, Operand input, Operand sep, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StringSplitV2", scope.makeOpName("StringSplit")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StringSplit")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(sep.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Strip.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Strip.java index 3506f8f0329..6cfd6d3ab58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Strip.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Strip.java @@ -58,7 +58,7 @@ private Strip(Operation operation) { describeByClass = true ) public static Strip create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("StringStrip", scope.makeOpName("Strip")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Strip")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Strip(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java index a1562de32bf..4cad2a56b3a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java @@ -131,7 +131,7 @@ private Substr(Operation operation) { ) public static Substr create(Scope scope, Operand input, Operand pos, Operand len, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Substr", scope.makeOpName("Substr")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Substr")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(pos.asOutput()); opBuilder.addInput(len.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucket.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucket.java index 484de33eb41..02e3d2455a9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucket.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucket.java @@ -65,7 +65,7 @@ private ToHashBucket(Operation operation) { describeByClass = true ) public static ToHashBucket create(Scope scope, Operand stringTensor, Long numBuckets) { - OperationBuilder opBuilder = scope.env().opBuilder("StringToHashBucket", scope.makeOpName("ToHashBucket")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ToHashBucket")); opBuilder.addInput(stringTensor.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_buckets", numBuckets); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketFast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketFast.java index 7f5c34dcef0..baa4b880028 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketFast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketFast.java @@ -75,7 +75,7 @@ private ToHashBucketFast(Operation operation) { describeByClass = true ) public static ToHashBucketFast create(Scope scope, Operand input, Long numBuckets) { - OperationBuilder opBuilder = scope.env().opBuilder("StringToHashBucketFast", scope.makeOpName("ToHashBucketFast")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ToHashBucketFast")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_buckets", numBuckets); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketStrong.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketStrong.java index ad02539dce0..93f3ba45fab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketStrong.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketStrong.java @@ -84,7 +84,7 @@ private ToHashBucketStrong(Operation operation) { ) public static ToHashBucketStrong create(Scope scope, Operand input, Long numBuckets, List key) { - OperationBuilder opBuilder = scope.env().opBuilder("StringToHashBucketStrong", scope.makeOpName("ToHashBucketStrong")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ToHashBucketStrong")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_buckets", numBuckets); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java index 8c1fb9d77fd..76eb22c44d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java @@ -78,7 +78,7 @@ private ToNumber(Operation operation) { ) public static ToNumber create(Scope scope, Operand stringTensor, Class outType) { - OperationBuilder opBuilder = scope.env().opBuilder("StringToNumber", scope.makeOpName("ToNumber")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ToNumber")); opBuilder.addInput(stringTensor.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("out_type", Operands.toDataType(outType)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java index 04854c17c67..aace664a5f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java @@ -83,7 +83,7 @@ private UnicodeDecode(Operation operation) { ) public static UnicodeDecode create(Scope scope, Operand input, String inputEncoding, Class Tsplits, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("UnicodeDecode", scope.makeOpName("UnicodeDecode")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnicodeDecode")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("input_encoding", inputEncoding); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java index dd0911027d2..c852eefe39f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java @@ -90,7 +90,7 @@ private UnicodeDecodeWithOffsets(Operation operation) { ) public static UnicodeDecodeWithOffsets create(Scope scope, Operand input, String inputEncoding, Class Tsplits, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("UnicodeDecodeWithOffsets", scope.makeOpName("UnicodeDecodeWithOffsets")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnicodeDecodeWithOffsets")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("input_encoding", inputEncoding); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java index 6b10afd7927..e6c9682af5f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java @@ -74,7 +74,7 @@ private UnicodeEncode(Operation operation) { ) public static UnicodeEncode create(Scope scope, Operand inputValues, Operand inputSplits, String outputEncoding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("UnicodeEncode", scope.makeOpName("UnicodeEncode")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnicodeEncode")); opBuilder.addInput(inputValues.asOutput()); opBuilder.addInput(inputSplits.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeScript.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeScript.java index 7a2d6456e15..c700724b2f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeScript.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeScript.java @@ -77,7 +77,7 @@ private UnicodeScript(Operation operation) { describeByClass = true ) public static UnicodeScript create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("UnicodeScript", scope.makeOpName("UnicodeScript")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnicodeScript")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new UnicodeScript(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java index eea9ba6cfcd..6a296716025 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java @@ -99,7 +99,7 @@ private UnicodeTranscode(Operation operation) { ) public static UnicodeTranscode create(Scope scope, Operand input, String inputEncoding, String outputEncoding, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("UnicodeTranscode", scope.makeOpName("UnicodeTranscode")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnicodeTranscode")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("input_encoding", inputEncoding); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java index 27fd7951863..1a756c7c70b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java @@ -89,7 +89,7 @@ private UnsortedSegmentJoin(Operation operation) { public static UnsortedSegmentJoin create(Scope scope, Operand inputs, Operand segmentIds, Operand numSegments, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("UnsortedSegmentJoin", scope.makeOpName("UnsortedSegmentJoin")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("UnsortedSegmentJoin")); opBuilder.addInput(inputs.asOutput()); opBuilder.addInput(segmentIds.asOutput()); opBuilder.addInput(numSegments.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java index f25344e697d..b92a1b2c23d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java @@ -68,7 +68,7 @@ private Upper(Operation operation) { describeByClass = true ) public static Upper create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("StringUpper", scope.makeOpName("Upper")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Upper")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/AudioSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/AudioSummary.java index d5642d0fac6..0bfef4f10d8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/AudioSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/AudioSummary.java @@ -73,7 +73,7 @@ private AudioSummary(Operation operation) { ) public static AudioSummary create(Scope scope, Operand tag, Operand tensor, Operand sampleRate, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("AudioSummaryV2", scope.makeOpName("AudioSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AudioSummary")); opBuilder.addInput(tag.asOutput()); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(sampleRate.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java index f9a3abf4ad0..607d303ca66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java @@ -49,7 +49,7 @@ private CloseSummaryWriter(Operation operation) { describeByClass = true ) public static CloseSummaryWriter create(Scope scope, Operand writer) { - OperationBuilder opBuilder = scope.env().opBuilder("CloseSummaryWriter", scope.makeOpName("CloseSummaryWriter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CloseSummaryWriter")); opBuilder.addInput(writer.asOutput()); opBuilder = scope.apply(opBuilder); return new CloseSummaryWriter(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java index b8af67b0fd7..d65c5be39c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java @@ -56,7 +56,7 @@ private CreateSummaryDbWriter(Operation operation) { public static CreateSummaryDbWriter create(Scope scope, Operand writer, Operand dbUri, Operand experimentName, Operand runName, Operand userName) { - OperationBuilder opBuilder = scope.env().opBuilder("CreateSummaryDbWriter", scope.makeOpName("CreateSummaryDbWriter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CreateSummaryDbWriter")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(dbUri.asOutput()); opBuilder.addInput(experimentName.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java index bea08375663..12624b660bd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java @@ -57,7 +57,7 @@ private CreateSummaryFileWriter(Operation operation) { public static CreateSummaryFileWriter create(Scope scope, Operand writer, Operand logdir, Operand maxQueue, Operand flushMillis, Operand filenameSuffix) { - OperationBuilder opBuilder = scope.env().opBuilder("CreateSummaryFileWriter", scope.makeOpName("CreateSummaryFileWriter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CreateSummaryFileWriter")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(logdir.asOutput()); opBuilder.addInput(maxQueue.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java index c939ad76457..f84a9a7eb4e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java @@ -49,7 +49,7 @@ private FlushSummaryWriter(Operation operation) { describeByClass = true ) public static FlushSummaryWriter create(Scope scope, Operand writer) { - OperationBuilder opBuilder = scope.env().opBuilder("FlushSummaryWriter", scope.makeOpName("FlushSummaryWriter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("FlushSummaryWriter")); opBuilder.addInput(writer.asOutput()); opBuilder = scope.apply(opBuilder); return new FlushSummaryWriter(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/HistogramSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/HistogramSummary.java index 118292c93aa..c9ecf68ef74 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/HistogramSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/HistogramSummary.java @@ -65,7 +65,7 @@ private HistogramSummary(Operation operation) { ) public static HistogramSummary create(Scope scope, Operand tag, Operand values) { - OperationBuilder opBuilder = scope.env().opBuilder("HistogramSummary", scope.makeOpName("HistogramSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("HistogramSummary")); opBuilder.addInput(tag.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImageSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImageSummary.java index deba3e6cf00..468a708ed83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImageSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImageSummary.java @@ -99,7 +99,7 @@ private ImageSummary(Operation operation) { ) public static ImageSummary create(Scope scope, Operand tag, Operand tensor, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ImageSummary", scope.makeOpName("ImageSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ImageSummary")); opBuilder.addInput(tag.asOutput()); opBuilder.addInput(tensor.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java index 8b2e7a46176..0ad13d03818 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java @@ -52,7 +52,7 @@ private ImportEvent(Operation operation) { ) public static ImportEvent create(Scope scope, Operand writer, Operand event) { - OperationBuilder opBuilder = scope.env().opBuilder("ImportEvent", scope.makeOpName("ImportEvent")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ImportEvent")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(event.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/MergeSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/MergeSummary.java index f20f1b14f5c..ac2517267d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/MergeSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/MergeSummary.java @@ -66,7 +66,7 @@ private MergeSummary(Operation operation) { describeByClass = true ) public static MergeSummary create(Scope scope, Iterable> inputs) { - OperationBuilder opBuilder = scope.env().opBuilder("MergeSummary", scope.makeOpName("MergeSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MergeSummary")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); return new MergeSummary(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ScalarSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ScalarSummary.java index 6f697366e74..80c065a4e5f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ScalarSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ScalarSummary.java @@ -63,7 +63,7 @@ private ScalarSummary(Operation operation) { ) public static ScalarSummary create(Scope scope, Operand tags, Operand values) { - OperationBuilder opBuilder = scope.env().opBuilder("ScalarSummary", scope.makeOpName("ScalarSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ScalarSummary")); opBuilder.addInput(tags.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java index c60f467b0d4..4a60bfc3b67 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java @@ -55,7 +55,7 @@ private StatsAggregatorSummary(Operation operation) { describeByClass = true ) public static StatsAggregatorSummary create(Scope scope, Operand iterator) { - OperationBuilder opBuilder = scope.env().opBuilder("StatsAggregatorSummary", scope.makeOpName("StatsAggregatorSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("StatsAggregatorSummary")); opBuilder.addInput(iterator.asOutput()); opBuilder = scope.apply(opBuilder); return new StatsAggregatorSummary(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java index 98832a74fbf..7a96cfb9d61 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java @@ -55,7 +55,7 @@ private SummaryWriter(Operation operation) { describeByClass = true ) public static SummaryWriter create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SummaryWriter", scope.makeOpName("SummaryWriter")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SummaryWriter")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/TensorSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/TensorSummary.java index 3b46aa41fb6..7c303f23449 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/TensorSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/TensorSummary.java @@ -63,7 +63,7 @@ private TensorSummary(Operation operation) { ) public static TensorSummary create(Scope scope, Operand tag, Operand tensor, Operand serializedSummaryMetadata) { - OperationBuilder opBuilder = scope.env().opBuilder("TensorSummaryV2", scope.makeOpName("TensorSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TensorSummary")); opBuilder.addInput(tag.asOutput()); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(serializedSummaryMetadata.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java index 19dac002652..34c5c37773b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java @@ -61,7 +61,7 @@ private WriteAudioSummary(Operation operation) { public static WriteAudioSummary create(Scope scope, Operand writer, Operand step, Operand tag, Operand tensor, Operand sampleRate, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("WriteAudioSummary", scope.makeOpName("WriteAudioSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WriteAudioSummary")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(step.asOutput()); opBuilder.addInput(tag.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java index 8cfb7b8142e..039430491ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java @@ -55,7 +55,7 @@ private WriteGraphSummary(Operation operation) { ) public static WriteGraphSummary create(Scope scope, Operand writer, Operand step, Operand tensor) { - OperationBuilder opBuilder = scope.env().opBuilder("WriteGraphSummary", scope.makeOpName("WriteGraphSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WriteGraphSummary")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(step.asOutput()); opBuilder.addInput(tensor.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java index 6ec7407312c..19a50b23491 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java @@ -57,7 +57,7 @@ private WriteHistogramSummary(Operation operation) { ) public static WriteHistogramSummary create(Scope scope, Operand writer, Operand step, Operand tag, Operand values) { - OperationBuilder opBuilder = scope.env().opBuilder("WriteHistogramSummary", scope.makeOpName("WriteHistogramSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WriteHistogramSummary")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(step.asOutput()); opBuilder.addInput(tag.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java index 09cf9aeeb03..1f00bdaa30c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java @@ -62,7 +62,7 @@ private WriteImageSummary(Operation operation) { public static WriteImageSummary create(Scope scope, Operand writer, Operand step, Operand tag, Operand tensor, Operand badColor, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("WriteImageSummary", scope.makeOpName("WriteImageSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WriteImageSummary")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(step.asOutput()); opBuilder.addInput(tag.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java index 6d63c1f6125..21d7c92bb5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java @@ -55,7 +55,7 @@ private WriteRawProtoSummary(Operation operation) { ) public static WriteRawProtoSummary create(Scope scope, Operand writer, Operand step, Operand tensor) { - OperationBuilder opBuilder = scope.env().opBuilder("WriteRawProtoSummary", scope.makeOpName("WriteRawProtoSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WriteRawProtoSummary")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(step.asOutput()); opBuilder.addInput(tensor.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java index ef4c70ad165..2721b1b1f9a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java @@ -57,7 +57,7 @@ private WriteScalarSummary(Operation operation) { ) public static WriteScalarSummary create(Scope scope, Operand writer, Operand step, Operand tag, Operand value) { - OperationBuilder opBuilder = scope.env().opBuilder("WriteScalarSummary", scope.makeOpName("WriteScalarSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WriteScalarSummary")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(step.asOutput()); opBuilder.addInput(tag.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java index 5ec67f7c2cc..be3a6ee4c9e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java @@ -58,7 +58,7 @@ private WriteSummary(Operation operation) { public static WriteSummary create(Scope scope, Operand writer, Operand step, Operand tensor, Operand tag, Operand summaryMetadata) { - OperationBuilder opBuilder = scope.env().opBuilder("WriteSummary", scope.makeOpName("WriteSummary")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WriteSummary")); opBuilder.addInput(writer.asOutput()); opBuilder.addInput(step.asOutput()); opBuilder.addInput(tensor.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java index bbc6439c9a9..e43c65b899a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java @@ -79,7 +79,7 @@ private AllToAll(Operation operation) { ) public static AllToAll create(Scope scope, Operand input, Operand groupAssignment, Long concatDimension, Long splitDimension, Long splitCount) { - OperationBuilder opBuilder = scope.env().opBuilder("AllToAll", scope.makeOpName("AllToAll")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AllToAll")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupAssignment.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java index a0a26ad01cc..12ff7ad72f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java @@ -65,7 +65,7 @@ private CollectivePermute(Operation operation) { ) public static CollectivePermute create(Scope scope, Operand input, Operand sourceTargetPairs) { - OperationBuilder opBuilder = scope.env().opBuilder("CollectivePermute", scope.makeOpName("CollectivePermute")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CollectivePermute")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(sourceTargetPairs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java index 64eba9c5f65..5cf2ef9cf74 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java @@ -56,7 +56,7 @@ private CompilationResult(Operation operation) { describeByClass = true ) public static CompilationResult create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUCompilationResult", scope.makeOpName("CompilationResult")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CompilationResult")); opBuilder = scope.apply(opBuilder); return new CompilationResult(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompileSucceededAssert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompileSucceededAssert.java index 1eea28d492e..b002f863531 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompileSucceededAssert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompileSucceededAssert.java @@ -55,7 +55,7 @@ private CompileSucceededAssert(Operation operation) { describeByClass = true ) public static CompileSucceededAssert create(Scope scope, Operand compilationStatus) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUCompileSucceededAssert", scope.makeOpName("CompileSucceededAssert")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CompileSucceededAssert")); opBuilder.addInput(compilationStatus.asOutput()); opBuilder = scope.apply(opBuilder); return new CompileSucceededAssert(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java index 4f2c97ca5ad..0edbc20997b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java @@ -54,7 +54,7 @@ private ConfigureDistributedTPU(Operation operation) { describeByClass = true ) public static ConfigureDistributedTPU create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ConfigureDistributedTPU", scope.makeOpName("ConfigureDistributedTPU")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ConfigureDistributedTPU")); opBuilder = scope.apply(opBuilder); if (options != null) { for (Options opts : options) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java index 485bd0a7ec7..8adbdbfca5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java @@ -48,7 +48,7 @@ private ConfigureTPUEmbedding(Operation operation) { describeByClass = true ) public static ConfigureTPUEmbedding create(Scope scope, String config) { - OperationBuilder opBuilder = scope.env().opBuilder("ConfigureTPUEmbedding", scope.makeOpName("ConfigureTPUEmbedding")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ConfigureTPUEmbedding")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("config", config); return new ConfigureTPUEmbedding(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java index 6ac49cfc2f3..f372af713b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java @@ -67,7 +67,7 @@ private CrossReplicaSum(Operation operation) { ) public static CrossReplicaSum create(Scope scope, Operand input, Operand groupAssignment) { - OperationBuilder opBuilder = scope.env().opBuilder("CrossReplicaSum", scope.makeOpName("CrossReplicaSum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("CrossReplicaSum")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupAssignment.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java index 664d1bb6d3f..f334ee32906 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java @@ -65,7 +65,7 @@ private EmbeddingActivations(Operation operation) { ) public static EmbeddingActivations create(Scope scope, Operand embeddingVariable, Operand slicedActivations, Long tableId, Long lookupId) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUEmbeddingActivations", scope.makeOpName("EmbeddingActivations")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EmbeddingActivations")); opBuilder.addInput(embeddingVariable.asOutput()); opBuilder.addInput(slicedActivations.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java index c7108ae1d94..5856d48053f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java @@ -58,7 +58,7 @@ private EnqueueTPUEmbeddingIntegerBatch(Operation operation) { ) public static EnqueueTPUEmbeddingIntegerBatch create(Scope scope, Iterable> batch, Operand modeOverride, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EnqueueTPUEmbeddingIntegerBatch", scope.makeOpName("EnqueueTPUEmbeddingIntegerBatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EnqueueTPUEmbeddingIntegerBatch")); opBuilder.addInputList(Operands.asOutputs(batch)); opBuilder.addInput(modeOverride.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java index 7ad19e303bb..16db6381f07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java @@ -83,7 +83,7 @@ public static EnqueueTPUEmbeddingRaggedTensorBatch create(Scope scope, Iterable> embeddingIndices, Iterable> aggregationWeights, Operand modeOverride, List tableIds, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EnqueueTPUEmbeddingRaggedTensorBatch", scope.makeOpName("EnqueueTPUEmbeddingRaggedTensorBatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EnqueueTPUEmbeddingRaggedTensorBatch")); opBuilder.addInputList(Operands.asOutputs(sampleSplits)); opBuilder.addInputList(Operands.asOutputs(embeddingIndices)); opBuilder.addInputList(Operands.asOutputs(aggregationWeights)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java index 2f33704b828..6c28d685060 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java @@ -77,7 +77,7 @@ public static EnqueueTPUEmbeddingSparseBatch create(Scope scope, Iterable> embeddingIndices, Iterable> aggregationWeights, Operand modeOverride, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EnqueueTPUEmbeddingSparseBatch", scope.makeOpName("EnqueueTPUEmbeddingSparseBatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EnqueueTPUEmbeddingSparseBatch")); opBuilder.addInputList(Operands.asOutputs(sampleIndices)); opBuilder.addInputList(Operands.asOutputs(embeddingIndices)); opBuilder.addInputList(Operands.asOutputs(aggregationWeights)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java index 42f1a114864..0eb292670a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java @@ -81,7 +81,7 @@ public static EnqueueTPUEmbeddingSparseTensorBatch create(Scope scope, Iterable> embeddingIndices, Iterable> aggregationWeights, Operand modeOverride, List tableIds, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("EnqueueTPUEmbeddingSparseTensorBatch", scope.makeOpName("EnqueueTPUEmbeddingSparseTensorBatch")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("EnqueueTPUEmbeddingSparseTensorBatch")); opBuilder.addInputList(Operands.asOutputs(sampleIndices)); opBuilder.addInputList(Operands.asOutputs(embeddingIndices)); opBuilder.addInputList(Operands.asOutputs(aggregationWeights)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Execute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Execute.java index 6b19286c2a5..d1f340b993c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Execute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Execute.java @@ -70,7 +70,7 @@ private Execute(Operation operation) { ) public static Execute create(Scope scope, Iterable> args, Operand key, List> Tresults) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUExecute", scope.makeOpName("Execute")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Execute")); opBuilder.addInputList(Operands.asOutputs(args)); opBuilder.addInput(key.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ExecuteAndUpdateVariables.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ExecuteAndUpdateVariables.java index 529aaca1b98..d5ead20179b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ExecuteAndUpdateVariables.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ExecuteAndUpdateVariables.java @@ -79,7 +79,7 @@ private ExecuteAndUpdateVariables(Operation operation) { public static ExecuteAndUpdateVariables create(Scope scope, Iterable> args, Operand key, List> Tresults, List deviceVarReadsIndices, List deviceVarUpdatesIndices) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUExecuteAndUpdateVariables", scope.makeOpName("ExecuteAndUpdateVariables")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ExecuteAndUpdateVariables")); opBuilder.addInputList(Operands.asOutputs(args)); opBuilder.addInput(key.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java index ae20158f41e..753afadb6d8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java @@ -61,7 +61,7 @@ private InfeedDequeue(Operation operation) { ) public static InfeedDequeue create(Scope scope, Class dtype, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("InfeedDequeue", scope.makeOpName("InfeedDequeue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InfeedDequeue")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java index 256cd8e687f..cc138f3e1fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java @@ -64,7 +64,7 @@ private InfeedDequeueTuple(Operation operation) { ) public static InfeedDequeueTuple create(Scope scope, List> dtypes, List shapes) { - OperationBuilder opBuilder = scope.env().opBuilder("InfeedDequeueTuple", scope.makeOpName("InfeedDequeueTuple")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InfeedDequeueTuple")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); Shape[] shapesArray = new Shape[shapes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java index 059bcacccfe..8a578770116 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java @@ -54,7 +54,7 @@ private InfeedEnqueue(Operation operation) { ) public static InfeedEnqueue create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("InfeedEnqueue", scope.makeOpName("InfeedEnqueue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InfeedEnqueue")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java index 6883770b646..ce94c83e3cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java @@ -51,7 +51,7 @@ private InfeedEnqueuePrelinearizedBuffer(Operation operation) { ) public static InfeedEnqueuePrelinearizedBuffer create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("InfeedEnqueuePrelinearizedBuffer", scope.makeOpName("InfeedEnqueuePrelinearizedBuffer")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InfeedEnqueuePrelinearizedBuffer")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java index d72e70b184e..498c1a56d37 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java @@ -55,7 +55,7 @@ private InfeedEnqueueTuple(Operation operation) { ) public static InfeedEnqueueTuple create(Scope scope, Iterable> inputs, List shapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("InfeedEnqueueTuple", scope.makeOpName("InfeedEnqueueTuple")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("InfeedEnqueueTuple")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); Shape[] shapesArray = new Shape[shapes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java index 7a496e02efd..5abbb44190b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java @@ -61,7 +61,7 @@ private LoadTPUEmbeddingADAMParameters(Operation operation) { public static LoadTPUEmbeddingADAMParameters create(Scope scope, Operand parameters, Operand momenta, Operand velocities, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingADAMParameters", scope.makeOpName("LoadTPUEmbeddingADAMParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingADAMParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(momenta.asOutput()); opBuilder.addInput(velocities.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java index 98fd9566f14..6e22d829faf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java @@ -62,7 +62,7 @@ private LoadTPUEmbeddingADAMParametersGradAccumDebug(Operation operation) { public static LoadTPUEmbeddingADAMParametersGradAccumDebug create(Scope scope, Operand parameters, Operand momenta, Operand velocities, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingADAMParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingADAMParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingADAMParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(momenta.asOutput()); opBuilder.addInput(velocities.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java index 4369c4feca6..98dd747d864 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java @@ -61,7 +61,7 @@ private LoadTPUEmbeddingAdadeltaParameters(Operation operation) { public static LoadTPUEmbeddingAdadeltaParameters create(Scope scope, Operand parameters, Operand accumulators, Operand updates, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingAdadeltaParameters", scope.makeOpName("LoadTPUEmbeddingAdadeltaParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingAdadeltaParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java index 09fc85b0cec..be53836e037 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java @@ -62,7 +62,7 @@ private LoadTPUEmbeddingAdadeltaParametersGradAccumDebug(Operation operation) { public static LoadTPUEmbeddingAdadeltaParametersGradAccumDebug create(Scope scope, Operand parameters, Operand accumulators, Operand updates, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder.addInput(updates.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java index 947c20df3a1..95108793139 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java @@ -59,7 +59,7 @@ private LoadTPUEmbeddingAdagradParameters(Operation operation) { ) public static LoadTPUEmbeddingAdagradParameters create(Scope scope, Operand parameters, Operand accumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingAdagradParameters", scope.makeOpName("LoadTPUEmbeddingAdagradParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingAdagradParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java index 1c3e783315c..3b995b8cb36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java @@ -61,7 +61,7 @@ private LoadTPUEmbeddingAdagradParametersGradAccumDebug(Operation operation) { public static LoadTPUEmbeddingAdagradParametersGradAccumDebug create(Scope scope, Operand parameters, Operand accumulators, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingAdagradParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingAdagradParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingAdagradParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder.addInput(gradientAccumulators.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java index 5eb28d70048..9bbac55427b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java @@ -62,7 +62,7 @@ private LoadTPUEmbeddingCenteredRMSPropParameters(Operation operation) { public static LoadTPUEmbeddingCenteredRMSPropParameters create(Scope scope, Operand parameters, Operand ms, Operand mom, Operand mg, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingCenteredRMSPropParameters", scope.makeOpName("LoadTPUEmbeddingCenteredRMSPropParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingCenteredRMSPropParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(ms.asOutput()); opBuilder.addInput(mom.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java index 2716c7a612a..ea3aa91041b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java @@ -61,7 +61,7 @@ private LoadTPUEmbeddingFTRLParameters(Operation operation) { public static LoadTPUEmbeddingFTRLParameters create(Scope scope, Operand parameters, Operand accumulators, Operand linears, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingFTRLParameters", scope.makeOpName("LoadTPUEmbeddingFTRLParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingFTRLParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder.addInput(linears.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java index f192a59158b..ea9b4e6b4c3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java @@ -62,7 +62,7 @@ private LoadTPUEmbeddingFTRLParametersGradAccumDebug(Operation operation) { public static LoadTPUEmbeddingFTRLParametersGradAccumDebug create(Scope scope, Operand parameters, Operand accumulators, Operand linears, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingFTRLParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingFTRLParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingFTRLParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder.addInput(linears.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java index faa63392307..f4516f0b19a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java @@ -62,7 +62,7 @@ private LoadTPUEmbeddingMDLAdagradLightParameters(Operation operation) { public static LoadTPUEmbeddingMDLAdagradLightParameters create(Scope scope, Operand parameters, Operand accumulators, Operand weights, Operand benefits, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingMDLAdagradLightParameters", scope.makeOpName("LoadTPUEmbeddingMDLAdagradLightParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingMDLAdagradLightParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder.addInput(weights.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java index 73a429740b8..370d5702a67 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java @@ -59,7 +59,7 @@ private LoadTPUEmbeddingMomentumParameters(Operation operation) { ) public static LoadTPUEmbeddingMomentumParameters create(Scope scope, Operand parameters, Operand momenta, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingMomentumParameters", scope.makeOpName("LoadTPUEmbeddingMomentumParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingMomentumParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(momenta.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java index fc53f223334..07d265b9bb0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java @@ -61,7 +61,7 @@ private LoadTPUEmbeddingMomentumParametersGradAccumDebug(Operation operation) { public static LoadTPUEmbeddingMomentumParametersGradAccumDebug create(Scope scope, Operand parameters, Operand momenta, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingMomentumParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingMomentumParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingMomentumParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(momenta.asOutput()); opBuilder.addInput(gradientAccumulators.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java index ce4ce4ddb6a..0aff4832087 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java @@ -60,7 +60,7 @@ private LoadTPUEmbeddingProximalAdagradParameters(Operation operation) { public static LoadTPUEmbeddingProximalAdagradParameters create(Scope scope, Operand parameters, Operand accumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingProximalAdagradParameters", scope.makeOpName("LoadTPUEmbeddingProximalAdagradParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingProximalAdagradParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java index 858ee5dbf0a..01a15d09e59 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java @@ -61,7 +61,7 @@ private LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug(Operation operat public static LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug create(Scope scope, Operand parameters, Operand accumulators, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); opBuilder.addInput(gradientAccumulators.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java index 179616df417..40981674d7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java @@ -56,7 +56,7 @@ private LoadTPUEmbeddingProximalYogiParameters(Operation operation) { public static LoadTPUEmbeddingProximalYogiParameters create(Scope scope, Operand parameters, Operand v, Operand m, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingProximalYogiParameters", scope.makeOpName("LoadTPUEmbeddingProximalYogiParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingProximalYogiParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(v.asOutput()); opBuilder.addInput(m.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java index dc999e0b256..cd9dbf74078 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java @@ -57,7 +57,7 @@ private LoadTPUEmbeddingProximalYogiParametersGradAccumDebug(Operation operation public static LoadTPUEmbeddingProximalYogiParametersGradAccumDebug create(Scope scope, Operand parameters, Operand v, Operand m, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingProximalYogiParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingProximalYogiParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingProximalYogiParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(v.asOutput()); opBuilder.addInput(m.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java index 98492be993a..b3fca976f57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java @@ -61,7 +61,7 @@ private LoadTPUEmbeddingRMSPropParameters(Operation operation) { public static LoadTPUEmbeddingRMSPropParameters create(Scope scope, Operand parameters, Operand ms, Operand mom, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingRMSPropParameters", scope.makeOpName("LoadTPUEmbeddingRMSPropParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingRMSPropParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(ms.asOutput()); opBuilder.addInput(mom.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java index a610de483b1..60c13d0163d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java @@ -62,7 +62,7 @@ private LoadTPUEmbeddingRMSPropParametersGradAccumDebug(Operation operation) { public static LoadTPUEmbeddingRMSPropParametersGradAccumDebug create(Scope scope, Operand parameters, Operand ms, Operand mom, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingRMSPropParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingRMSPropParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingRMSPropParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(ms.asOutput()); opBuilder.addInput(mom.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java index 98edf6c04da..362186da66d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java @@ -58,7 +58,7 @@ private LoadTPUEmbeddingStochasticGradientDescentParameters(Operation operation) ) public static LoadTPUEmbeddingStochasticGradientDescentParameters create(Scope scope, Operand parameters, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingStochasticGradientDescentParameters", scope.makeOpName("LoadTPUEmbeddingStochasticGradientDescentParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingStochasticGradientDescentParameters")); opBuilder.addInput(parameters.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java index ec195d07ca1..e43d7866f75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java @@ -60,7 +60,7 @@ private LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(Operat public static LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug create( Scope scope, Operand parameters, Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", scope.makeOpName("LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug")); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(gradientAccumulators.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java index a3b286fd850..e47274b97f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java @@ -56,7 +56,7 @@ private OrdinalSelector(Operation operation) { describeByClass = true ) public static OrdinalSelector create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUOrdinalSelector", scope.makeOpName("OrdinalSelector")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OrdinalSelector")); opBuilder = scope.apply(opBuilder); return new OrdinalSelector(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java index a2aab40fad4..691d6030e01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java @@ -63,7 +63,7 @@ private OutfeedDequeue(Operation operation) { ) public static OutfeedDequeue create(Scope scope, Class dtype, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OutfeedDequeue", scope.makeOpName("OutfeedDequeue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OutfeedDequeue")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java index 51ece781c08..cfcf0a470d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java @@ -67,7 +67,7 @@ private OutfeedDequeueTuple(Operation operation) { ) public static OutfeedDequeueTuple create(Scope scope, List> dtypes, List shapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("OutfeedDequeueTuple", scope.makeOpName("OutfeedDequeueTuple")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OutfeedDequeueTuple")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); Shape[] shapesArray = new Shape[shapes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java index e6e5f3dfaf4..b83a2d2989d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java @@ -71,7 +71,7 @@ private OutfeedDequeueTupleV2(Operation operation) { ) public static OutfeedDequeueTupleV2 create(Scope scope, Operand deviceOrdinal, List> dtypes, List shapes) { - OperationBuilder opBuilder = scope.env().opBuilder("OutfeedDequeueTupleV2", scope.makeOpName("OutfeedDequeueTupleV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OutfeedDequeueTupleV2")); opBuilder.addInput(deviceOrdinal.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtypes", Operands.toDataTypes(dtypes)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java index 1f176f35da8..71b6be5fa75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java @@ -67,7 +67,7 @@ private OutfeedDequeueV2(Operation operation) { ) public static OutfeedDequeueV2 create(Scope scope, Operand deviceOrdinal, Class dtype, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("OutfeedDequeueV2", scope.makeOpName("OutfeedDequeueV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OutfeedDequeueV2")); opBuilder.addInput(deviceOrdinal.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java index 0f418ed124b..0563d432c56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java @@ -49,7 +49,7 @@ private OutfeedEnqueue(Operation operation) { describeByClass = true ) public static OutfeedEnqueue create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("OutfeedEnqueue", scope.makeOpName("OutfeedEnqueue")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OutfeedEnqueue")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new OutfeedEnqueue(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java index e5d407a6560..f8cb68f203e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java @@ -50,7 +50,7 @@ private OutfeedEnqueueTuple(Operation operation) { describeByClass = true ) public static OutfeedEnqueueTuple create(Scope scope, Iterable> inputs) { - OperationBuilder opBuilder = scope.env().opBuilder("OutfeedEnqueueTuple", scope.makeOpName("OutfeedEnqueueTuple")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("OutfeedEnqueueTuple")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); return new OutfeedEnqueueTuple(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java index 4ecdd4eac74..1ccb4ee19a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java @@ -64,7 +64,7 @@ private PartitionedInput(Operation operation) { ) public static PartitionedInput create(Scope scope, Iterable> inputs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUPartitionedInput", scope.makeOpName("PartitionedInput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PartitionedInput")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java index 3211bcd3faf..425a009730a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java @@ -71,7 +71,7 @@ private PartitionedOutput(Operation operation) { ) public static PartitionedOutput create(Scope scope, Operand inputs, Long numSplits, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUPartitionedOutput", scope.makeOpName("PartitionedOutput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PartitionedOutput")); opBuilder.addInput(inputs.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_splits", numSplits); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java index 473b898697d..70d1fa85507 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java @@ -60,7 +60,7 @@ private Prelinearize(Operation operation) { ) public static Prelinearize create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("Prelinearize", scope.makeOpName("Prelinearize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Prelinearize")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java index f8d5b308265..1d4c3fd6e34 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java @@ -62,7 +62,7 @@ private PrelinearizeTuple(Operation operation) { ) public static PrelinearizeTuple create(Scope scope, Iterable> inputs, List shapes, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("PrelinearizeTuple", scope.makeOpName("PrelinearizeTuple")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PrelinearizeTuple")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); Shape[] shapesArray = new Shape[shapes.size()]; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java index 059ea15ff30..9605e67d8b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java @@ -68,7 +68,7 @@ private RecvTPUEmbeddingActivations(Operation operation) { describeByClass = true ) public static RecvTPUEmbeddingActivations create(Scope scope, Long numOutputs, String config) { - OperationBuilder opBuilder = scope.env().opBuilder("RecvTPUEmbeddingActivations", scope.makeOpName("RecvTPUEmbeddingActivations")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RecvTPUEmbeddingActivations")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_outputs", numOutputs); opBuilder.setAttr("config", config); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java index 06d5c6a8ce8..7f9f50de1da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java @@ -51,7 +51,7 @@ private ReplicateMetadata(Operation operation) { describeByClass = true ) public static ReplicateMetadata create(Scope scope, Long numReplicas, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUReplicateMetadata", scope.makeOpName("ReplicateMetadata")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReplicateMetadata")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_replicas", numReplicas); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java index e581ecf060d..45ef66fe475 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java @@ -70,7 +70,7 @@ private ReplicatedInput(Operation operation) { ) public static ReplicatedInput create(Scope scope, Iterable> inputs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUReplicatedInput", scope.makeOpName("ReplicatedInput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReplicatedInput")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java index 53cbf8b412e..18030fbc955 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java @@ -73,7 +73,7 @@ private ReplicatedOutput(Operation operation) { ) public static ReplicatedOutput create(Scope scope, Operand input, Long numReplicas) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUReplicatedOutput", scope.makeOpName("ReplicatedOutput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReplicatedOutput")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_replicas", numReplicas); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java index acea126fd17..6ef132a4a6e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java @@ -66,7 +66,7 @@ private RetrieveTPUEmbeddingADAMParameters(Operation operation) { ) public static RetrieveTPUEmbeddingADAMParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingADAMParameters", scope.makeOpName("RetrieveTPUEmbeddingADAMParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingADAMParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java index ba381888271..6812e2c8a75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java @@ -69,7 +69,7 @@ private RetrieveTPUEmbeddingADAMParametersGradAccumDebug(Operation operation) { ) public static RetrieveTPUEmbeddingADAMParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingADAMParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingADAMParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingADAMParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java index 17f05b8bd5e..2afc96e2677 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java @@ -66,7 +66,7 @@ private RetrieveTPUEmbeddingAdadeltaParameters(Operation operation) { ) public static RetrieveTPUEmbeddingAdadeltaParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingAdadeltaParameters", scope.makeOpName("RetrieveTPUEmbeddingAdadeltaParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingAdadeltaParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java index c96c22897c1..1a2bf517d4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java @@ -69,7 +69,7 @@ private RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(Operation operation ) public static RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java index 1f7e6064936..07f0ce6cc67 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java @@ -63,7 +63,7 @@ private RetrieveTPUEmbeddingAdagradParameters(Operation operation) { ) public static RetrieveTPUEmbeddingAdagradParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingAdagradParameters", scope.makeOpName("RetrieveTPUEmbeddingAdagradParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingAdagradParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java index bec9e018904..71d118edab7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java @@ -66,7 +66,7 @@ private RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(Operation operation) ) public static RetrieveTPUEmbeddingAdagradParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java index e5be5990a3a..02054dd4fd6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java @@ -69,7 +69,7 @@ private RetrieveTPUEmbeddingCenteredRMSPropParameters(Operation operation) { ) public static RetrieveTPUEmbeddingCenteredRMSPropParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingCenteredRMSPropParameters", scope.makeOpName("RetrieveTPUEmbeddingCenteredRMSPropParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingCenteredRMSPropParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java index 491e59bdc38..972f0531eb6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java @@ -66,7 +66,7 @@ private RetrieveTPUEmbeddingFTRLParameters(Operation operation) { ) public static RetrieveTPUEmbeddingFTRLParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingFTRLParameters", scope.makeOpName("RetrieveTPUEmbeddingFTRLParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingFTRLParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java index 156e5e18788..2b8d87cba31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java @@ -69,7 +69,7 @@ private RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(Operation operation) { ) public static RetrieveTPUEmbeddingFTRLParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java index 5b23031e101..2909a342468 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java @@ -69,7 +69,7 @@ private RetrieveTPUEmbeddingMDLAdagradLightParameters(Operation operation) { ) public static RetrieveTPUEmbeddingMDLAdagradLightParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingMDLAdagradLightParameters", scope.makeOpName("RetrieveTPUEmbeddingMDLAdagradLightParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingMDLAdagradLightParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java index ea7b81b8166..f7e9669168c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java @@ -63,7 +63,7 @@ private RetrieveTPUEmbeddingMomentumParameters(Operation operation) { ) public static RetrieveTPUEmbeddingMomentumParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingMomentumParameters", scope.makeOpName("RetrieveTPUEmbeddingMomentumParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingMomentumParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java index 444ecb16153..bfc04d550a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java @@ -66,7 +66,7 @@ private RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(Operation operation ) public static RetrieveTPUEmbeddingMomentumParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java index 15ac2e4bbc4..ca4c8a6e2f8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java @@ -63,7 +63,7 @@ private RetrieveTPUEmbeddingProximalAdagradParameters(Operation operation) { ) public static RetrieveTPUEmbeddingProximalAdagradParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingProximalAdagradParameters", scope.makeOpName("RetrieveTPUEmbeddingProximalAdagradParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingProximalAdagradParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java index bbf763d123e..4caa040b0d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java @@ -66,7 +66,7 @@ private RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(Operation op ) public static RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java index 46c27514430..b9ae4dfcaf3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java @@ -62,7 +62,7 @@ private RetrieveTPUEmbeddingProximalYogiParameters(Operation operation) { ) public static RetrieveTPUEmbeddingProximalYogiParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingProximalYogiParameters", scope.makeOpName("RetrieveTPUEmbeddingProximalYogiParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingProximalYogiParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java index c3b827522d4..09e2a534790 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java @@ -65,7 +65,7 @@ private RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug(Operation opera ) public static RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java index bcad29d5d9e..ff79ff6fc51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java @@ -66,7 +66,7 @@ private RetrieveTPUEmbeddingRMSPropParameters(Operation operation) { ) public static RetrieveTPUEmbeddingRMSPropParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingRMSPropParameters", scope.makeOpName("RetrieveTPUEmbeddingRMSPropParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingRMSPropParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java index b3e83f8ab13..6d7af19a075 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java @@ -69,7 +69,7 @@ private RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(Operation operation) ) public static RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java index 71ae453a189..64d953b3695 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java @@ -61,7 +61,7 @@ private RetrieveTPUEmbeddingStochasticGradientDescentParameters(Operation operat ) public static RetrieveTPUEmbeddingStochasticGradientDescentParameters create(Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingStochasticGradientDescentParameters", scope.makeOpName("RetrieveTPUEmbeddingStochasticGradientDescentParameters")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingStochasticGradientDescentParameters")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java index 95a4620110d..ee38afd6690 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java @@ -64,7 +64,7 @@ private RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug( ) public static RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug create( Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", scope.makeOpName("RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java index dd2d1fa6992..1c8ba14034d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java @@ -64,7 +64,7 @@ private SendTPUEmbeddingGradients(Operation operation) { ) public static SendTPUEmbeddingGradients create(Scope scope, Iterable> inputs, Iterable> learningRates, String config, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SendTPUEmbeddingGradients", scope.makeOpName("SendTPUEmbeddingGradients")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SendTPUEmbeddingGradients")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder.addInputList(Operands.asOutputs(learningRates)); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java index c710c9617e6..843e31349b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java @@ -47,7 +47,7 @@ private ShutdownDistributedTPU(Operation operation) { describeByClass = true ) public static ShutdownDistributedTPU create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("ShutdownDistributedTPU", scope.makeOpName("ShutdownDistributedTPU")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ShutdownDistributedTPU")); opBuilder = scope.apply(opBuilder); return new ShutdownDistributedTPU(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java index 760196069c4..1d83fb87d7e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java @@ -59,7 +59,7 @@ private TPUCompilationResult(Operation operation) { describeByClass = true ) public static TPUCompilationResult create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUCompilationResult", scope.makeOpName("TPUCompilationResult")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TPUCompilationResult")); opBuilder = scope.apply(opBuilder); return new TPUCompilationResult(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java index 21b7820aca7..f30c3ecb796 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java @@ -68,7 +68,7 @@ private TPUEmbeddingActivations(Operation operation) { ) public static TPUEmbeddingActivations create(Scope scope, Operand embeddingVariable, Operand slicedActivations, Long tableId, Long lookupId) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUEmbeddingActivations", scope.makeOpName("TPUEmbeddingActivations")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TPUEmbeddingActivations")); opBuilder.addInput(embeddingVariable.asOutput()); opBuilder.addInput(slicedActivations.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java index fd9853a0e28..9baebd25a0b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java @@ -54,7 +54,7 @@ private TPUReplicateMetadata(Operation operation) { describeByClass = true ) public static TPUReplicateMetadata create(Scope scope, Long numReplicas, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUReplicateMetadata", scope.makeOpName("TPUReplicateMetadata")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TPUReplicateMetadata")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_replicas", numReplicas); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java index 2480ab6c427..193aefad52c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java @@ -73,7 +73,7 @@ private TPUReplicatedInput(Operation operation) { ) public static TPUReplicatedInput create(Scope scope, Iterable> inputs, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUReplicatedInput", scope.makeOpName("TPUReplicatedInput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TPUReplicatedInput")); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java index 7c2ab316647..45ded441fa6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java @@ -76,7 +76,7 @@ private TPUReplicatedOutput(Operation operation) { ) public static TPUReplicatedOutput create(Scope scope, Operand input, Long numReplicas) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUReplicatedOutput", scope.makeOpName("TPUReplicatedOutput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TPUReplicatedOutput")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("num_replicas", numReplicas); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java index 55f908d985b..77e2ec5ee98 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java @@ -59,7 +59,7 @@ private TPUReshardVariables(Operation operation) { ) public static TPUReshardVariables create(Scope scope, Iterable> vars, Operand newFormatKey, Operand formatStateVar) { - OperationBuilder opBuilder = scope.env().opBuilder("TPUReshardVariables", scope.makeOpName("TPUReshardVariables")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TPUReshardVariables")); opBuilder.addInputList(Operands.asOutputs(vars)); opBuilder.addInput(newFormatKey.asOutput()); opBuilder.addInput(formatStateVar.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java index 37657c395dd..8f5f20fb9bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java @@ -56,7 +56,7 @@ private WorkerHeartbeat(Operation operation) { describeByClass = true ) public static WorkerHeartbeat create(Scope scope, Operand request) { - OperationBuilder opBuilder = scope.env().opBuilder("WorkerHeartbeat", scope.makeOpName("WorkerHeartbeat")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("WorkerHeartbeat")); opBuilder.addInput(request.asOutput()); opBuilder = scope.apply(opBuilder); return new WorkerHeartbeat(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorApplyGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorApplyGradient.java index a8e61a8ae00..9e6e97c2368 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorApplyGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorApplyGradient.java @@ -59,7 +59,7 @@ private AccumulatorApplyGradient(Operation operation) { ) public static AccumulatorApplyGradient create(Scope scope, Operand handle, Operand localStep, Operand gradient) { - OperationBuilder opBuilder = scope.env().opBuilder("AccumulatorApplyGradient", scope.makeOpName("AccumulatorApplyGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AccumulatorApplyGradient")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(localStep.asOutput()); opBuilder.addInput(gradient.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorNumAccumulated.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorNumAccumulated.java index 9accba8bb08..7eb2131fe46 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorNumAccumulated.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorNumAccumulated.java @@ -59,7 +59,7 @@ private AccumulatorNumAccumulated(Operation operation) { describeByClass = true ) public static AccumulatorNumAccumulated create(Scope scope, Operand handle) { - OperationBuilder opBuilder = scope.env().opBuilder("AccumulatorNumAccumulated", scope.makeOpName("AccumulatorNumAccumulated")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AccumulatorNumAccumulated")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); return new AccumulatorNumAccumulated(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorSetGlobalStep.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorSetGlobalStep.java index a8ea01bf654..a15cb032c33 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorSetGlobalStep.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorSetGlobalStep.java @@ -58,7 +58,7 @@ private AccumulatorSetGlobalStep(Operation operation) { ) public static AccumulatorSetGlobalStep create(Scope scope, Operand handle, Operand newGlobalStep) { - OperationBuilder opBuilder = scope.env().opBuilder("AccumulatorSetGlobalStep", scope.makeOpName("AccumulatorSetGlobalStep")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AccumulatorSetGlobalStep")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(newGlobalStep.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java index b34ca228cbe..8ef2930e5a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java @@ -73,7 +73,7 @@ private AccumulatorTakeGradient(Operation operation) { ) public static AccumulatorTakeGradient create(Scope scope, Operand handle, Operand numRequired, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("AccumulatorTakeGradient", scope.makeOpName("AccumulatorTakeGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("AccumulatorTakeGradient")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(numRequired.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java index 2456c6bdef6..eb480cc02cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java @@ -71,7 +71,7 @@ private ApplyAdaMax(Operation operation) { public static ApplyAdaMax create(Scope scope, Operand var, Operand m, Operand v, Operand beta1Power, Operand lr, Operand beta1, Operand beta2, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyAdaMax", scope.makeOpName("ApplyAdaMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyAdaMax")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java index 5148311bfac..59e1e535f02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java @@ -74,7 +74,7 @@ private ApplyAdadelta(Operation operation) { public static ApplyAdadelta create(Scope scope, Operand var, Operand accum, Operand accumUpdate, Operand lr, Operand rho, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyAdadelta", scope.makeOpName("ApplyAdadelta")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyAdadelta")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(accumUpdate.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java index 63903e9cebe..f300a77b4ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java @@ -68,7 +68,7 @@ private ApplyAdagrad(Operation operation) { ) public static ApplyAdagrad create(Scope scope, Operand var, Operand accum, Operand lr, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyAdagrad", scope.makeOpName("ApplyAdagrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyAdagrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java index 5bf5c640275..981a2c987e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java @@ -72,7 +72,7 @@ private ApplyAdagradDa(Operation operation) { public static ApplyAdagradDa create(Scope scope, Operand var, Operand gradientAccumulator, Operand gradientSquaredAccumulator, Operand grad, Operand lr, Operand l1, Operand l2, Operand globalStep, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyAdagradDA", scope.makeOpName("ApplyAdagradDa")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyAdagradDa")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(gradientAccumulator.asOutput()); opBuilder.addInput(gradientSquaredAccumulator.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java index f35c5edd6e0..fb1f1ddd3ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java @@ -65,7 +65,7 @@ private ApplyAdagradV2(Operation operation) { ) public static ApplyAdagradV2 create(Scope scope, Operand var, Operand accum, Operand lr, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyAdagradV2", scope.makeOpName("ApplyAdagradV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyAdagradV2")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java index 5340dda8697..79a6e198389 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java @@ -77,7 +77,7 @@ private ApplyAdam(Operation operation) { public static ApplyAdam create(Scope scope, Operand var, Operand m, Operand v, Operand beta1Power, Operand beta2Power, Operand lr, Operand beta1, Operand beta2, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyAdam", scope.makeOpName("ApplyAdam")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyAdam")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java index cd78906708e..16c44e61fc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java @@ -73,7 +73,7 @@ private ApplyAddSign(Operation operation) { public static ApplyAddSign create(Scope scope, Operand var, Operand m, Operand lr, Operand alpha, Operand signDecay, Operand beta, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyAddSign", scope.makeOpName("ApplyAddSign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyAddSign")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java index 8b5c4862b70..56dbb1cd320 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java @@ -86,7 +86,7 @@ private ApplyCenteredRmsProp(Operation operation) { public static ApplyCenteredRmsProp create(Scope scope, Operand var, Operand mg, Operand ms, Operand mom, Operand lr, Operand rho, Operand momentum, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyCenteredRMSProp", scope.makeOpName("ApplyCenteredRmsProp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyCenteredRmsProp")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(mg.asOutput()); opBuilder.addInput(ms.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java index 2c8153a2480..65021524609 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java @@ -79,7 +79,7 @@ private ApplyFtrl(Operation operation) { public static ApplyFtrl create(Scope scope, Operand var, Operand accum, Operand linear, Operand grad, Operand lr, Operand l1, Operand l2, Operand l2Shrinkage, Operand lrPower, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyFtrlV2", scope.makeOpName("ApplyFtrl")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyFtrl")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(linear.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java index 3f35716e98d..7a39052bd9d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java @@ -65,7 +65,7 @@ private ApplyGradientDescent(Operation operation) { ) public static ApplyGradientDescent create(Scope scope, Operand var, Operand alpha, Operand delta, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyGradientDescent", scope.makeOpName("ApplyGradientDescent")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyGradientDescent")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(alpha.asOutput()); opBuilder.addInput(delta.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java index baf8e69a794..b9f2efe52e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java @@ -70,7 +70,7 @@ private ApplyMomentum(Operation operation) { ) public static ApplyMomentum create(Scope scope, Operand var, Operand accum, Operand lr, Operand grad, Operand momentum, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyMomentum", scope.makeOpName("ApplyMomentum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyMomentum")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java index 93563a4ba4a..a84b790af40 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java @@ -73,7 +73,7 @@ private ApplyPowerSign(Operation operation) { public static ApplyPowerSign create(Scope scope, Operand var, Operand m, Operand lr, Operand logbase, Operand signDecay, Operand beta, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyPowerSign", scope.makeOpName("ApplyPowerSign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyPowerSign")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java index 38ac095bdce..70707c6787b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java @@ -72,7 +72,7 @@ private ApplyProximalAdagrad(Operation operation) { public static ApplyProximalAdagrad create(Scope scope, Operand var, Operand accum, Operand lr, Operand l1, Operand l2, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyProximalAdagrad", scope.makeOpName("ApplyProximalAdagrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyProximalAdagrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java index 727f50ef9e3..43f9cdcd622 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java @@ -70,7 +70,7 @@ private ApplyProximalGradientDescent(Operation operation) { public static ApplyProximalGradientDescent create(Scope scope, Operand var, Operand alpha, Operand l1, Operand l2, Operand delta, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyProximalGradientDescent", scope.makeOpName("ApplyProximalGradientDescent")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyProximalGradientDescent")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(alpha.asOutput()); opBuilder.addInput(l1.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java index 44bc56ec3a7..4646318e109 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java @@ -79,7 +79,7 @@ private ApplyRmsProp(Operation operation) { public static ApplyRmsProp create(Scope scope, Operand var, Operand ms, Operand mom, Operand lr, Operand rho, Operand momentum, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ApplyRMSProp", scope.makeOpName("ApplyRmsProp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ApplyRmsProp")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(ms.asOutput()); opBuilder.addInput(mom.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java index 6fd46605550..ea1035986e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java @@ -84,7 +84,7 @@ private BatchMatMul(Operation operation) { ) public static BatchMatMul create(Scope scope, Operand x, Operand y, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("BatchMatMulV2", scope.makeOpName("BatchMatMul")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BatchMatMul")); opBuilder.addInput(x.asOutput()); opBuilder.addInput(y.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java index a0e4e4906f4..1eb33aba66e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java @@ -55,7 +55,7 @@ private ComputeBatchSize(Operation operation) { describeByClass = true ) public static ComputeBatchSize create(Scope scope, Operand inputDataset) { - OperationBuilder opBuilder = scope.env().opBuilder("ComputeBatchSize", scope.makeOpName("ComputeBatchSize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ComputeBatchSize")); opBuilder.addInput(inputDataset.asOutput()); opBuilder = scope.apply(opBuilder); return new ComputeBatchSize(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ConditionalAccumulator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ConditionalAccumulator.java index a075065c686..c66e06d387b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ConditionalAccumulator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ConditionalAccumulator.java @@ -71,7 +71,7 @@ private ConditionalAccumulator(Operation operation) { ) public static ConditionalAccumulator create(Scope scope, Class dtype, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ConditionalAccumulator", scope.makeOpName("ConditionalAccumulator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ConditionalAccumulator")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/GenerateVocabRemapping.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/GenerateVocabRemapping.java index 425698f9b52..e10213af654 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/GenerateVocabRemapping.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/GenerateVocabRemapping.java @@ -92,7 +92,7 @@ private GenerateVocabRemapping(Operation operation) { ) public static GenerateVocabRemapping create(Scope scope, Operand newVocabFile, Operand oldVocabFile, Long newVocabOffset, Long numNewVocab, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("GenerateVocabRemapping", scope.makeOpName("GenerateVocabRemapping")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("GenerateVocabRemapping")); opBuilder.addInput(newVocabFile.asOutput()); opBuilder.addInput(oldVocabFile.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/MergeV2Checkpoints.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/MergeV2Checkpoints.java index 8ced390a7d8..68afca884da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/MergeV2Checkpoints.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/MergeV2Checkpoints.java @@ -63,7 +63,7 @@ private MergeV2Checkpoints(Operation operation) { ) public static MergeV2Checkpoints create(Scope scope, Operand checkpointPrefixes, Operand destinationPrefix, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("MergeV2Checkpoints", scope.makeOpName("MergeV2Checkpoints")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("MergeV2Checkpoints")); opBuilder.addInput(checkpointPrefixes.asOutput()); opBuilder.addInput(destinationPrefix.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/NegTrain.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/NegTrain.java index e1c553f72af..0581d6a3f90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/NegTrain.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/NegTrain.java @@ -63,7 +63,7 @@ private NegTrain(Operation operation) { public static NegTrain create(Scope scope, Operand wIn, Operand wOut, Operand examples, Operand labels, Operand lr, List vocabCount, Long numNegativeSamples) { - OperationBuilder opBuilder = scope.env().opBuilder("NegTrain", scope.makeOpName("NegTrain")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("NegTrain")); opBuilder.addInput(wIn.asOutput()); opBuilder.addInput(wOut.asOutput()); opBuilder.addInput(examples.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java index 401bd65dfc8..89be9949afb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java @@ -69,7 +69,7 @@ private PreventGradient(Operation operation) { ) public static PreventGradient create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("PreventGradient", scope.makeOpName("PreventGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("PreventGradient")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java index b9a2fafc101..fbe5b7be7d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java @@ -55,7 +55,7 @@ private ResourceAccumulatorApplyGradient(Operation operation) { public static ResourceAccumulatorApplyGradient create(Scope scope, Operand handle, Operand localStep, Operand gradient) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceAccumulatorApplyGradient", scope.makeOpName("ResourceAccumulatorApplyGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceAccumulatorApplyGradient")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(localStep.asOutput()); opBuilder.addInput(gradient.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java index 4ab175b74ef..54d8f670440 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java @@ -56,7 +56,7 @@ private ResourceAccumulatorNumAccumulated(Operation operation) { ) public static ResourceAccumulatorNumAccumulated create(Scope scope, Operand handle) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceAccumulatorNumAccumulated", scope.makeOpName("ResourceAccumulatorNumAccumulated")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceAccumulatorNumAccumulated")); opBuilder.addInput(handle.asOutput()); opBuilder = scope.apply(opBuilder); return new ResourceAccumulatorNumAccumulated(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java index 40567653231..893337bfdb4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java @@ -54,7 +54,7 @@ private ResourceAccumulatorSetGlobalStep(Operation operation) { ) public static ResourceAccumulatorSetGlobalStep create(Scope scope, Operand handle, Operand newGlobalStep) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceAccumulatorSetGlobalStep", scope.makeOpName("ResourceAccumulatorSetGlobalStep")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceAccumulatorSetGlobalStep")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(newGlobalStep.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java index 7186d58b07e..48901d511ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java @@ -68,7 +68,7 @@ private ResourceAccumulatorTakeGradient(Operation operation) { ) public static ResourceAccumulatorTakeGradient create(Scope scope, Operand handle, Operand numRequired, Class dtype) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceAccumulatorTakeGradient", scope.makeOpName("ResourceAccumulatorTakeGradient")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceAccumulatorTakeGradient")); opBuilder.addInput(handle.asOutput()); opBuilder.addInput(numRequired.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java index c473ae1fd1e..c98a3596b83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java @@ -65,7 +65,7 @@ public static ResourceApplyAdaMax create(Scope scope, Operand var, Operand m, Operand v, Operand beta1Power, Operand lr, Operand beta1, Operand beta2, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyAdaMax", scope.makeOpName("ResourceApplyAdaMax")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyAdaMax")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdadelta.java index 30a3d40b670..075f4d1dfb2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdadelta.java @@ -68,7 +68,7 @@ public static ResourceApplyAdadelta create(Scope scope, Operand var, Operand accum, Operand accumUpdate, Operand lr, Operand rho, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyAdadelta", scope.makeOpName("ResourceApplyAdadelta")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyAdadelta")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(accumUpdate.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagrad.java index 9c579f6b8c4..74d8df3dd39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagrad.java @@ -59,7 +59,7 @@ private ResourceApplyAdagrad(Operation operation) { public static ResourceApplyAdagrad create(Scope scope, Operand var, Operand accum, Operand lr, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyAdagradV2", scope.makeOpName("ResourceApplyAdagrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyAdagrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagradDa.java index 1566e1d3219..c356bbe4721 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagradDa.java @@ -66,7 +66,7 @@ public static ResourceApplyAdagradDa create(Scope scope, Operand var, Operand gradientAccumulator, Operand gradientSquaredAccumulator, Operand grad, Operand lr, Operand l1, Operand l2, Operand globalStep, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyAdagradDA", scope.makeOpName("ResourceApplyAdagradDa")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyAdagradDa")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(gradientAccumulator.asOutput()); opBuilder.addInput(gradientSquaredAccumulator.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java index 927023ea74c..2d11f7de847 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java @@ -71,7 +71,7 @@ public static ResourceApplyAdam create(Scope scope, Operand var, Operand m, Operand v, Operand beta1Power, Operand beta2Power, Operand lr, Operand beta1, Operand beta2, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyAdam", scope.makeOpName("ResourceApplyAdam")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyAdam")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdamWithAmsgrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdamWithAmsgrad.java index b1a5400470b..585bd064b24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdamWithAmsgrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdamWithAmsgrad.java @@ -73,7 +73,7 @@ public static ResourceApplyAdamWithAmsgrad create(Scope scope, Operand var, Operand m, Operand v, Operand vhat, Operand beta1Power, Operand beta2Power, Operand lr, Operand beta1, Operand beta2, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyAdamWithAmsgrad", scope.makeOpName("ResourceApplyAdamWithAmsgrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyAdamWithAmsgrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(v.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAddSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAddSign.java index 67742f1c152..67d32d5ce5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAddSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAddSign.java @@ -66,7 +66,7 @@ private ResourceApplyAddSign(Operation operation) { public static ResourceApplyAddSign create(Scope scope, Operand var, Operand m, Operand lr, Operand alpha, Operand signDecay, Operand beta, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyAddSign", scope.makeOpName("ResourceApplyAddSign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyAddSign")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyCenteredRmsProp.java index 0b6b7f6edc0..80097dad1a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyCenteredRmsProp.java @@ -80,7 +80,7 @@ public static ResourceApplyCenteredRmsProp create(Scope scope, Operand var, Operand mg, Operand ms, Operand mom, Operand lr, Operand rho, Operand momentum, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyCenteredRMSProp", scope.makeOpName("ResourceApplyCenteredRmsProp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyCenteredRmsProp")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(mg.asOutput()); opBuilder.addInput(ms.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyFtrl.java index 82f325c9873..d9c9234275b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyFtrl.java @@ -73,7 +73,7 @@ public static ResourceApplyFtrl create(Scope scope, Operand var, Operand accum, Operand linear, Operand grad, Operand lr, Operand l1, Operand l2, Operand l2Shrinkage, Operand lrPower, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyFtrlV2", scope.makeOpName("ResourceApplyFtrl")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyFtrl")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(linear.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyGradientDescent.java index bb055fc76c6..75ecda0e653 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyGradientDescent.java @@ -58,7 +58,7 @@ private ResourceApplyGradientDescent(Operation operation) { ) public static ResourceApplyGradientDescent create(Scope scope, Operand var, Operand alpha, Operand delta, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyGradientDescent", scope.makeOpName("ResourceApplyGradientDescent")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyGradientDescent")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(alpha.asOutput()); opBuilder.addInput(delta.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyKerasMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyKerasMomentum.java index 0e947d6fe90..8047862ad11 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyKerasMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyKerasMomentum.java @@ -64,7 +64,7 @@ private ResourceApplyKerasMomentum(Operation operation) { public static ResourceApplyKerasMomentum create(Scope scope, Operand var, Operand accum, Operand lr, Operand grad, Operand momentum, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyKerasMomentum", scope.makeOpName("ResourceApplyKerasMomentum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyKerasMomentum")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyMomentum.java index 51f14af776f..b996a588e1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyMomentum.java @@ -64,7 +64,7 @@ private ResourceApplyMomentum(Operation operation) { public static ResourceApplyMomentum create(Scope scope, Operand var, Operand accum, Operand lr, Operand grad, Operand momentum, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyMomentum", scope.makeOpName("ResourceApplyMomentum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyMomentum")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyPowerSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyPowerSign.java index fa3567e9580..54eed271806 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyPowerSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyPowerSign.java @@ -66,7 +66,7 @@ private ResourceApplyPowerSign(Operation operation) { public static ResourceApplyPowerSign create(Scope scope, Operand var, Operand m, Operand lr, Operand logbase, Operand signDecay, Operand beta, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyPowerSign", scope.makeOpName("ResourceApplyPowerSign")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyPowerSign")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(m.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalAdagrad.java index 95ccd7906fe..35aca767c42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalAdagrad.java @@ -65,7 +65,7 @@ private ResourceApplyProximalAdagrad(Operation operation) { public static ResourceApplyProximalAdagrad create(Scope scope, Operand var, Operand accum, Operand lr, Operand l1, Operand l2, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyProximalAdagrad", scope.makeOpName("ResourceApplyProximalAdagrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyProximalAdagrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalGradientDescent.java index c870479aa0a..b3dfa606bb3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalGradientDescent.java @@ -63,7 +63,7 @@ private ResourceApplyProximalGradientDescent(Operation operation) { public static ResourceApplyProximalGradientDescent create(Scope scope, Operand var, Operand alpha, Operand l1, Operand l2, Operand delta, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyProximalGradientDescent", scope.makeOpName("ResourceApplyProximalGradientDescent")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyProximalGradientDescent")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(alpha.asOutput()); opBuilder.addInput(l1.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyRmsProp.java index 6b59b3896a4..af8279533aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyRmsProp.java @@ -73,7 +73,7 @@ public static ResourceApplyRmsProp create(Scope scope, Operand var, Operand ms, Operand mom, Operand lr, Operand rho, Operand momentum, Operand epsilon, Operand grad, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceApplyRMSProp", scope.makeOpName("ResourceApplyRmsProp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceApplyRmsProp")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(ms.asOutput()); opBuilder.addInput(mom.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceConditionalAccumulator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceConditionalAccumulator.java index 6596c2c7b59..4c9bbc9249c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceConditionalAccumulator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceConditionalAccumulator.java @@ -69,7 +69,7 @@ private ResourceConditionalAccumulator(Operation operation) { ) public static ResourceConditionalAccumulator create(Scope scope, Class dtype, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceConditionalAccumulator", scope.makeOpName("ResourceConditionalAccumulator")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceConditionalAccumulator")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdadelta.java index dd35cff9bc7..3c8b09cb025 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdadelta.java @@ -66,7 +66,7 @@ public static ResourceSparseApplyAdadelta create(Scope scope, Operand var, Operand accum, Operand accumUpdate, Operand lr, Operand rho, Operand epsilon, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyAdadelta", scope.makeOpName("ResourceSparseApplyAdadelta")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyAdadelta")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(accumUpdate.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java index 34225335edf..321aeab094b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java @@ -65,7 +65,7 @@ private ResourceSparseApplyAdagrad(Operation operation) { public static ResourceSparseApplyAdagrad create(Scope scope, Operand var, Operand accum, Operand lr, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyAdagrad", scope.makeOpName("ResourceSparseApplyAdagrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyAdagrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradDa.java index a704cffcfef..39363dfe817 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradDa.java @@ -69,7 +69,7 @@ public static ResourceSparseApplyAdagradDa create(Scope scope, Operand gradientSquaredAccumulator, Operand grad, Operand indices, Operand lr, Operand l1, Operand l2, Operand globalStep, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyAdagradDA", scope.makeOpName("ResourceSparseApplyAdagradDa")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyAdagradDa")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(gradientAccumulator.asOutput()); opBuilder.addInput(gradientSquaredAccumulator.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradV2.java index e5d730c9fe0..5ab7b56ae68 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradV2.java @@ -62,7 +62,7 @@ private ResourceSparseApplyAdagradV2(Operation operation) { public static ResourceSparseApplyAdagradV2 create(Scope scope, Operand var, Operand accum, Operand lr, Operand epsilon, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyAdagradV2", scope.makeOpName("ResourceSparseApplyAdagradV2")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyAdagradV2")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyCenteredRmsProp.java index dc6bb2f8533..edc5d692e0c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyCenteredRmsProp.java @@ -81,7 +81,7 @@ public static ResourceSparseApplyCenteredRmsProp create(Scope Operand var, Operand mg, Operand ms, Operand mom, Operand lr, Operand rho, Operand momentum, Operand epsilon, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyCenteredRMSProp", scope.makeOpName("ResourceSparseApplyCenteredRmsProp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyCenteredRmsProp")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(mg.asOutput()); opBuilder.addInput(ms.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyFtrl.java index b0864f79e68..1193b2dab3c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyFtrl.java @@ -76,7 +76,7 @@ public static ResourceSparseApplyFtrl create(Scope scope, Operand var, Operand accum, Operand linear, Operand grad, Operand indices, Operand lr, Operand l1, Operand l2, Operand l2Shrinkage, Operand lrPower, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyFtrlV2", scope.makeOpName("ResourceSparseApplyFtrl")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyFtrl")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(linear.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyKerasMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyKerasMomentum.java index 6c4a59a3ee5..a4559f906bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyKerasMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyKerasMomentum.java @@ -67,7 +67,7 @@ private ResourceSparseApplyKerasMomentum(Operation operation) { public static ResourceSparseApplyKerasMomentum create(Scope scope, Operand var, Operand accum, Operand lr, Operand grad, Operand indices, Operand momentum, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyKerasMomentum", scope.makeOpName("ResourceSparseApplyKerasMomentum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyKerasMomentum")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyMomentum.java index 7731995c245..29805679b05 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyMomentum.java @@ -67,7 +67,7 @@ private ResourceSparseApplyMomentum(Operation operation) { public static ResourceSparseApplyMomentum create(Scope scope, Operand var, Operand accum, Operand lr, Operand grad, Operand indices, Operand momentum, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyMomentum", scope.makeOpName("ResourceSparseApplyMomentum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyMomentum")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalAdagrad.java index b1cb878b436..5f1fd5b5969 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalAdagrad.java @@ -69,7 +69,7 @@ private ResourceSparseApplyProximalAdagrad(Operation operation) { public static ResourceSparseApplyProximalAdagrad create(Scope scope, Operand var, Operand accum, Operand lr, Operand l1, Operand l2, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyProximalAdagrad", scope.makeOpName("ResourceSparseApplyProximalAdagrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyProximalAdagrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalGradientDescent.java index eb6df20428d..59c7f23f814 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalGradientDescent.java @@ -66,7 +66,7 @@ private ResourceSparseApplyProximalGradientDescent(Operation operation) { public static ResourceSparseApplyProximalGradientDescent create(Scope scope, Operand var, Operand alpha, Operand l1, Operand l2, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyProximalGradientDescent", scope.makeOpName("ResourceSparseApplyProximalGradientDescent")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyProximalGradientDescent")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(alpha.asOutput()); opBuilder.addInput(l1.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyRmsProp.java index 434d1c6c042..ce8cd1552f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyRmsProp.java @@ -75,7 +75,7 @@ public static ResourceSparseApplyRmsProp create(Scope scope, Operand var, Operand ms, Operand mom, Operand lr, Operand rho, Operand momentum, Operand epsilon, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("ResourceSparseApplyRMSProp", scope.makeOpName("ResourceSparseApplyRmsProp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ResourceSparseApplyRmsProp")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(ms.asOutput()); opBuilder.addInput(mom.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Restore.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Restore.java index 21b540b041b..d08a97c9f7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Restore.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Restore.java @@ -85,7 +85,7 @@ private Restore(Operation operation) { ) public static Restore create(Scope scope, Operand prefix, Operand tensorNames, Operand shapeAndSlices, List> dtypes) { - OperationBuilder opBuilder = scope.env().opBuilder("RestoreV2", scope.makeOpName("Restore")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Restore")); opBuilder.addInput(prefix.asOutput()); opBuilder.addInput(tensorNames.asOutput()); opBuilder.addInput(shapeAndSlices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java index 4710481547c..75fad7a8d67 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java @@ -77,7 +77,7 @@ private RestoreSlice(Operation operation) { public static RestoreSlice create(Scope scope, Operand filePattern, Operand tensorName, Operand shapeAndSlice, Class dt, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("RestoreSlice", scope.makeOpName("RestoreSlice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("RestoreSlice")); opBuilder.addInput(filePattern.asOutput()); opBuilder.addInput(tensorName.asOutput()); opBuilder.addInput(shapeAndSlice.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Save.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Save.java index 9bd622da54f..377803d42dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Save.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Save.java @@ -63,7 +63,7 @@ private Save(Operation operation) { ) public static Save create(Scope scope, Operand prefix, Operand tensorNames, Operand shapeAndSlices, Iterable> tensors) { - OperationBuilder opBuilder = scope.env().opBuilder("SaveV2", scope.makeOpName("Save")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Save")); opBuilder.addInput(prefix.asOutput()); opBuilder.addInput(tensorNames.asOutput()); opBuilder.addInput(shapeAndSlices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SaveSlices.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SaveSlices.java index e1db978c5d1..47155cdfafc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SaveSlices.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SaveSlices.java @@ -80,7 +80,7 @@ private SaveSlices(Operation operation) { ) public static SaveSlices create(Scope scope, Operand filename, Operand tensorNames, Operand shapesAndSlices, Iterable> data) { - OperationBuilder opBuilder = scope.env().opBuilder("SaveSlices", scope.makeOpName("SaveSlices")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SaveSlices")); opBuilder.addInput(filename.asOutput()); opBuilder.addInput(tensorNames.asOutput()); opBuilder.addInput(shapesAndSlices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaFprint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaFprint.java index 047615b901f..a0e4b452faa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaFprint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaFprint.java @@ -59,7 +59,7 @@ private SdcaFprint(Operation operation) { describeByClass = true ) public static SdcaFprint create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("SdcaFprint", scope.makeOpName("SdcaFprint")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SdcaFprint")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new SdcaFprint(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java index e309842a2b0..45f17fe0497 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java @@ -111,7 +111,7 @@ public static SdcaOptimizer create(Scope scope, Iterable> sparse Iterable> sparseIndices, Iterable> sparseWeights, Iterable> denseWeights, Operand exampleStateData, String lossType, Float l1, Float l2, Long numLossPartitions, Long numInnerIterations, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SdcaOptimizerV2", scope.makeOpName("SdcaOptimizer")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SdcaOptimizer")); opBuilder.addInputList(Operands.asOutputs(sparseExampleIndices)); opBuilder.addInputList(Operands.asOutputs(sparseFeatureIndices)); opBuilder.addInputList(Operands.asOutputs(sparseFeatureValues)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaShrinkL1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaShrinkL1.java index ae6713ee6f0..1c68ed4e07c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaShrinkL1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaShrinkL1.java @@ -58,7 +58,7 @@ private SdcaShrinkL1(Operation operation) { ) public static SdcaShrinkL1 create(Scope scope, Iterable> weights, Float l1, Float l2) { - OperationBuilder opBuilder = scope.env().opBuilder("SdcaShrinkL1", scope.makeOpName("SdcaShrinkL1")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SdcaShrinkL1")); opBuilder.addInputList(Operands.asOutputs(weights)); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("l1", l1); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java index 92b8c8d1152..0102cfcb1d6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java @@ -72,7 +72,7 @@ private SparseApplyAdadelta(Operation operation) { public static SparseApplyAdadelta create(Scope scope, Operand var, Operand accum, Operand accumUpdate, Operand lr, Operand rho, Operand epsilon, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyAdadelta", scope.makeOpName("SparseApplyAdadelta")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyAdadelta")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(accumUpdate.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java index da285feddfb..3b38f3694a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java @@ -69,7 +69,7 @@ private SparseApplyAdagrad(Operation operation) { public static SparseApplyAdagrad create(Scope scope, Operand var, Operand accum, Operand lr, Operand epsilon, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyAdagradV2", scope.makeOpName("SparseApplyAdagrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyAdagrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java index 2152c4bcabe..89b5a446e78 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java @@ -75,7 +75,7 @@ public static SparseApplyAdagradDa create(Scope scope, Oper Operand gradientAccumulator, Operand gradientSquaredAccumulator, Operand grad, Operand indices, Operand lr, Operand l1, Operand l2, Operand globalStep, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyAdagradDA", scope.makeOpName("SparseApplyAdagradDa")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyAdagradDa")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(gradientAccumulator.asOutput()); opBuilder.addInput(gradientSquaredAccumulator.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java index 0fc5b4dadf4..15c6d62c956 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java @@ -88,7 +88,7 @@ public static SparseApplyCenteredRmsProp create(Scope scope Operand mg, Operand ms, Operand mom, Operand lr, Operand rho, Operand momentum, Operand epsilon, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyCenteredRMSProp", scope.makeOpName("SparseApplyCenteredRmsProp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyCenteredRmsProp")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(mg.asOutput()); opBuilder.addInput(ms.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java index 18d3a370873..165e3ac72dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java @@ -83,7 +83,7 @@ public static SparseApplyFtrl create(Scope scope, Operand accum, Operand linear, Operand grad, Operand indices, Operand lr, Operand l1, Operand l2, Operand l2Shrinkage, Operand lrPower, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyFtrlV2", scope.makeOpName("SparseApplyFtrl")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyFtrl")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(linear.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java index 79263d46e0f..5abaf6bd277 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java @@ -74,7 +74,7 @@ private SparseApplyMomentum(Operation operation) { public static SparseApplyMomentum create(Scope scope, Operand var, Operand accum, Operand lr, Operand grad, Operand indices, Operand momentum, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyMomentum", scope.makeOpName("SparseApplyMomentum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyMomentum")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java index 85631664327..4cb6d7ce765 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java @@ -76,7 +76,7 @@ private SparseApplyProximalAdagrad(Operation operation) { public static SparseApplyProximalAdagrad create(Scope scope, Operand var, Operand accum, Operand lr, Operand l1, Operand l2, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyProximalAdagrad", scope.makeOpName("SparseApplyProximalAdagrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyProximalAdagrad")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(accum.asOutput()); opBuilder.addInput(lr.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java index 712dbebad5e..2086701eb0c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java @@ -73,7 +73,7 @@ private SparseApplyProximalGradientDescent(Operation operation) { public static SparseApplyProximalGradientDescent create(Scope scope, Operand var, Operand alpha, Operand l1, Operand l2, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyProximalGradientDescent", scope.makeOpName("SparseApplyProximalGradientDescent")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyProximalGradientDescent")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(alpha.asOutput()); opBuilder.addInput(l1.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java index f175008ef44..717e40da997 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java @@ -81,7 +81,7 @@ private SparseApplyRmsProp(Operation operation) { public static SparseApplyRmsProp create(Scope scope, Operand var, Operand ms, Operand mom, Operand lr, Operand rho, Operand momentum, Operand epsilon, Operand grad, Operand indices, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseApplyRMSProp", scope.makeOpName("SparseApplyRmsProp")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseApplyRmsProp")); opBuilder.addInput(var.asOutput()); opBuilder.addInput(ms.asOutput()); opBuilder.addInput(mom.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java index 6bb4ef8dea5..4c1370d811b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java @@ -67,7 +67,7 @@ private TileGrad(Operation operation) { ) public static TileGrad create(Scope scope, Operand input, Operand multiples) { - OperationBuilder opBuilder = scope.env().opBuilder("TileGrad", scope.makeOpName("TileGrad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("TileGrad")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(multiples.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/BroadcastHelper.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/BroadcastHelper.java index 4e474a0360b..353db60eb03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/BroadcastHelper.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/BroadcastHelper.java @@ -71,7 +71,7 @@ private BroadcastHelper(Operation operation) { ) public static BroadcastHelper create(Scope scope, Operand lhs, Operand rhs, Operand broadcastDims) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaBroadcastHelper", scope.makeOpName("BroadcastHelper")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("BroadcastHelper")); opBuilder.addInput(lhs.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder.addInput(broadcastDims.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ClusterOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ClusterOutput.java index 17a5adef536..b2dcd06538d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ClusterOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ClusterOutput.java @@ -61,7 +61,7 @@ private ClusterOutput(Operation operation) { describeByClass = true ) public static ClusterOutput create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaClusterOutput", scope.makeOpName("ClusterOutput")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ClusterOutput")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new ClusterOutput<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java index b352153faee..1fe493c4223 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java @@ -76,7 +76,7 @@ public static Conv create(Scope scope, O Operand rhs, Operand windowStrides, Operand padding, Operand lhsDilation, Operand rhsDilation, Operand featureGroupCount, String dimensionNumbers, String precisionConfig) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaConv", scope.makeOpName("Conv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Conv")); opBuilder.addInput(lhs.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder.addInput(windowStrides.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dequantize.java index 5f3f1b625c9..6ced962c063 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dequantize.java @@ -66,7 +66,7 @@ private Dequantize(Operation operation) { ) public static Dequantize create(Scope scope, Operand input, Float minRange, Float maxRange, String mode, Boolean transposeOutput) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaDequantize", scope.makeOpName("Dequantize")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Dequantize")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("min_range", minRange); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dot.java index db734e6ef69..3de0491e718 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dot.java @@ -67,7 +67,7 @@ private Dot(Operation operation) { ) public static Dot create(Scope scope, Operand lhs, Operand rhs, String dimensionNumbers, String precisionConfig) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaDot", scope.makeOpName("Dot")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Dot")); opBuilder.addInput(lhs.asOutput()); opBuilder.addInput(rhs.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicSlice.java index c7f7407d6e0..52b632adcff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicSlice.java @@ -76,7 +76,7 @@ private DynamicSlice(Operation operation) { ) public static DynamicSlice create(Scope scope, Operand input, Operand startIndices, Operand sizeIndices) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaDynamicSlice", scope.makeOpName("DynamicSlice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DynamicSlice")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(startIndices.asOutput()); opBuilder.addInput(sizeIndices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicUpdateSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicUpdateSlice.java index 8dee468d397..7beece641f8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicUpdateSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicUpdateSlice.java @@ -73,7 +73,7 @@ private DynamicUpdateSlice(Operation operation) { ) public static DynamicUpdateSlice create(Scope scope, Operand input, Operand update, Operand indices) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaDynamicUpdateSlice", scope.makeOpName("DynamicUpdateSlice")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("DynamicUpdateSlice")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(update.asOutput()); opBuilder.addInput(indices.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Einsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Einsum.java index eed296226c9..b9f68a068f8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Einsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Einsum.java @@ -66,7 +66,7 @@ private Einsum(Operation operation) { ) public static Einsum create(Scope scope, Operand a, Operand b, String equation) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaEinsum", scope.makeOpName("Einsum")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Einsum")); opBuilder.addInput(a.asOutput()); opBuilder.addInput(b.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Gather.java index 626cebdf108..152020b8b01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Gather.java @@ -70,7 +70,7 @@ private Gather(Operation operation) { public static Gather create(Scope scope, Operand operand, Operand startIndices, Operand sliceSizes, String dimensionNumbers, Boolean indicesAreSorted) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaGather", scope.makeOpName("Gather")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Gather")); opBuilder.addInput(operand.asOutput()); opBuilder.addInput(startIndices.asOutput()); opBuilder.addInput(sliceSizes.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/KeyValueSort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/KeyValueSort.java index 41ed8f23428..a6572ae3b5e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/KeyValueSort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/KeyValueSort.java @@ -73,7 +73,7 @@ private KeyValueSort(Operation operation) { ) public static KeyValueSort create(Scope scope, Operand keys, Operand values) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaKeyValueSort", scope.makeOpName("KeyValueSort")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("KeyValueSort")); opBuilder.addInput(keys.asOutput()); opBuilder.addInput(values.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java index 78bf07d3a42..f0122134568 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java @@ -75,7 +75,7 @@ private Pad(Operation operation) { public static Pad create(Scope scope, Operand input, Operand paddingValue, Operand paddingLow, Operand paddingHigh, Operand paddingInterior) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaPad", scope.makeOpName("Pad")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Pad")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(paddingValue.asOutput()); opBuilder.addInput(paddingLow.asOutput()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Recv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Recv.java index bfd24e0a20d..c81c6994186 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Recv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Recv.java @@ -68,7 +68,7 @@ private Recv(Operation operation) { ) public static Recv create(Scope scope, Class dtype, String tensorName, Shape shape) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaRecv", scope.makeOpName("Recv")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Recv")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("dtype", Operands.toDataType(dtype)); opBuilder.setAttr("tensor_name", tensorName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReplicaId.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReplicaId.java index 22cf5cdb5c8..7b990fd7c3b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReplicaId.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReplicaId.java @@ -57,7 +57,7 @@ private ReplicaId(Operation operation) { describeByClass = true ) public static ReplicaId create(Scope scope) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaReplicaId", scope.makeOpName("ReplicaId")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("ReplicaId")); opBuilder = scope.apply(opBuilder); return new ReplicaId(opBuilder.build()); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelfAdjointEig.java index 7e552eb5bc4..c9f43898c11 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelfAdjointEig.java @@ -76,7 +76,7 @@ private SelfAdjointEig(Operation operation) { ) public static SelfAdjointEig create(Scope scope, Operand a, Boolean lower, Long maxIter, Float epsilon) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSelfAdjointEig", scope.makeOpName("SelfAdjointEig")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SelfAdjointEig")); opBuilder.addInput(a.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("lower", lower); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Send.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Send.java index 55e766b9cd8..8ad5e93eb8f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Send.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Send.java @@ -56,7 +56,7 @@ private Send(Operation operation) { describeByClass = true ) public static Send create(Scope scope, Operand tensor, String tensorName) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSend", scope.makeOpName("Send")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Send")); opBuilder.addInput(tensor.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("tensor_name", tensorName); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java index bf5e754d5a7..90921b3a594 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java @@ -63,7 +63,7 @@ private Sharding(Operation operation) { ) public static Sharding create(Scope scope, Operand input, Options... options) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSharding", scope.makeOpName("Sharding")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sharding")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); if (options != null) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sort.java index e04b43bdbfa..62fccc9d61d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sort.java @@ -64,7 +64,7 @@ private Sort(Operation operation) { describeByClass = true ) public static Sort create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSort", scope.makeOpName("Sort")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Sort")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); return new Sort<>(opBuilder.build()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Svd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Svd.java index ddcb4f9c2d7..f124d84e379 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Svd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Svd.java @@ -77,7 +77,7 @@ private Svd(Operation operation) { ) public static Svd create(Scope scope, Operand a, Long maxIter, Float epsilon, String precisionConfig) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSvd", scope.makeOpName("Svd")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("Svd")); opBuilder.addInput(a.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("max_iter", maxIter); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java index 17064728c67..aa5e274885c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java @@ -70,7 +70,7 @@ private XlaRecvFromHost(Operation operation) { ) public static XlaRecvFromHost create(Scope scope, Class Toutput, Shape shape, String key) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaRecvFromHost", scope.makeOpName("XlaRecvFromHost")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("XlaRecvFromHost")); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("Toutput", Operands.toDataType(Toutput)); opBuilder.setAttr("shape", shape); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSendToHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSendToHost.java index d996d1e9e95..6ddcf5b977b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSendToHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSendToHost.java @@ -57,7 +57,7 @@ private XlaSendToHost(Operation operation) { describeByClass = true ) public static XlaSendToHost create(Scope scope, Operand input, String key) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSendToHost", scope.makeOpName("XlaSendToHost")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("XlaSendToHost")); opBuilder.addInput(input.asOutput()); opBuilder = scope.apply(opBuilder); opBuilder.setAttr("key", key); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSetBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSetBound.java index 35262eb9f3a..6303be12289 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSetBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSetBound.java @@ -62,7 +62,7 @@ private XlaSetBound(Operation operation) { describeByClass = true ) public static XlaSetBound create(Scope scope, Operand input, Operand bound) { - OperationBuilder opBuilder = scope.env().opBuilder("XlaSetBound", scope.makeOpName("XlaSetBound")); + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("XlaSetBound")); opBuilder.addInput(input.asOutput()); opBuilder.addInput(bound.asOutput()); opBuilder = scope.apply(opBuilder); diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/op/generator/ClassGenerator.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/op/generator/ClassGenerator.java index 20e5edf4989..be6d0a32392 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/op/generator/ClassGenerator.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/op/generator/ClassGenerator.java @@ -1,19 +1,18 @@ -/* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow.op.generator; import static org.tensorflow.op.generator.GeneratorUtils.javaizeMemberName; @@ -52,106 +51,85 @@ import org.tensorflow.proto.framework.OpDef.ArgDef; import org.tensorflow.proto.framework.OpDef.AttrDef; -/** - * A generator to generate a op class - */ +/** A generator to generate a op class */ final class ClassGenerator { - /** - * Return true if we can generate the operation class for {@code op}. - */ + /** Return true if we can generate the operation class for {@code op}. */ static boolean canGenerateOp(OpDef op, ApiDef apiDef) { return apiDef.getVisibility() != Visibility.SKIP && !op.getAttrList().stream().anyMatch(x -> x.getType().contains("func")) - && !op.getName().startsWith("_"); //TODO do I want this? Some interesting ops like _XlaCompile + && !op.getName() + .startsWith("_"); // TODO do I want this? Some interesting ops like _XlaCompile } + private static final String OP_NAME_FIELD_NAME = "OP_NAME"; + enum RenderMode { - DEFAULT, LIST_OPERAND, OPERAND; + DEFAULT, + LIST_OPERAND, + OPERAND; } - /** - * The in-progress class builder for the top level op class. - */ + /** The in-progress class builder for the top level op class. */ private final TypeSpec.Builder builder; - /** - * The op to build. - */ + /** The op to build. */ private final OpDef op; - /** - * The api definition for the current op. - */ + /** The api definition for the current op. */ private final ApiDef apiDef; - /** - * A type resolver for the current op. - */ + /** A type resolver for the current op. */ private final TypeResolver resolver; - /** - * The full package of the class. - */ + /** The full package of the class. */ private final String fullPackage; - /** - * The base package for this op generation run. - */ + /** The base package for this op generation run. */ private final String basePackage; - /** - * The group of this op. - */ + /** The group of this op. */ private final String group; - /** - * The class name for this op. - */ + /** The class name for this op. */ private final String className; - /** - * The endpoint being generated in this class. - */ + /** The endpoint being generated in this class. */ private final Endpoint endpoint; /** - * The generated options class, or null if it doesn't have one or {@link #buildOptionsClass()} has not been ran. + * The generated options class, or null if it doesn't have one or {@link #buildOptionsClass()} has + * not been ran. */ private TypeSpec optionsClass = null; - /** - * What type of op this is. - */ + /** What type of op this is. */ private RenderMode mode = RenderMode.DEFAULT; - /** - * The required attributes of this op. - */ + /** The required attributes of this op. */ private final List requiredAttributes = new ArrayList<>(); - /** - * The optional attributes of this op. - */ + /** The optional attributes of this op. */ private final List optionalAttributes = new ArrayList<>(); - /** - * The class's type parameters, initialized in {@link #buildClass()}. - */ + /** The class's type parameters, initialized in {@link #buildClass()}. */ private final Set typeParams = new LinkedHashSet<>(); - /** - * The api defs for the arguments. - */ + /** The api defs for the arguments. */ private final Map argApis = new HashMap<>(); - /** - * The api defs for the attributes. - */ + /** The api defs for the attributes. */ private final Map attrApis = new HashMap<>(); - ClassGenerator(Builder builder, OpDef op, ApiDef apiDef, - String basePackage, String fullPackage, String group, String className, Endpoint endpoint) { + ClassGenerator( + Builder builder, + OpDef op, + ApiDef apiDef, + String basePackage, + String fullPackage, + String group, + String className, + Endpoint endpoint) { this.builder = builder; this.op = op; @@ -163,32 +141,44 @@ enum RenderMode { this.className = className; this.endpoint = endpoint; - op.getAttrList().forEach(attr -> { - if (attr.hasDefaultValue() && !attr.getType().contains("type")) { - optionalAttributes.add(attr); - } else { - requiredAttributes.add(attr); - } - }); + op.getAttrList() + .forEach( + attr -> { + if (attr.hasDefaultValue() && !attr.getType().contains("type")) { + optionalAttributes.add(attr); + } else { + requiredAttributes.add(attr); + } + }); for (AttrDef attr : op.getAttrList()) { - ApiDef.Attr api = apiDef.getAttrList().stream().filter(x -> x.getName().equals(attr.getName())).findFirst().get(); + ApiDef.Attr api = + apiDef.getAttrList().stream() + .filter(x -> x.getName().equals(attr.getName())) + .findFirst() + .get(); attrApis.put(attr, api); } for (ArgDef arg : op.getInputArgList()) { - ApiDef.Arg api = apiDef.getInArgList().stream().filter(x -> x.getName().equals(arg.getName())).findFirst().get(); + ApiDef.Arg api = + apiDef.getInArgList().stream() + .filter(x -> x.getName().equals(arg.getName())) + .findFirst() + .get(); argApis.put(arg, api); } for (ArgDef arg : op.getOutputArgList()) { - ApiDef.Arg api = apiDef.getOutArgList().stream().filter(x -> x.getName().equals(arg.getName())).findFirst().get(); + ApiDef.Arg api = + apiDef.getOutArgList().stream() + .filter(x -> x.getName().equals(arg.getName())) + .findFirst() + .get(); argApis.put(arg, api); } } - /** - * Get the Java variable name for an argument. - */ + /** Get the Java variable name for an argument. */ private String getJavaName(ArgDef arg) { String name = arg.getName(); String rename = argApis.get(arg).getRenameTo(); @@ -199,9 +189,7 @@ private String getJavaName(ArgDef arg) { } } - /** - * Get the Java variable name for an attribute. - */ + /** Get the Java variable name for an attribute. */ private String getJavaName(AttrDef arg) { String name = arg.getName(); String rename = attrApis.get(arg).getRenameTo(); @@ -212,16 +200,12 @@ private String getJavaName(AttrDef arg) { } } - /** - * Get the fully qualified name of the class being generated. - */ + /** Get the fully qualified name of the class being generated. */ private String fullClassName() { return fullPackage + "." + className; } - /** - * Build the class. - */ + /** Build the class. */ void buildClass() { builder.addModifiers(Modifier.PUBLIC, Modifier.FINAL); builder.superclass(Names.RawOp); @@ -244,13 +228,13 @@ void buildClass() { ResolvedType rType = resolver.typeOf(output); TypeName type = rType.unwrapArg(); boolean iterable = rType.iterable; - TypeName operandTypeParam = - type instanceof WildcardTypeName ? Names.TType : type; + TypeName operandTypeParam = type instanceof WildcardTypeName ? Names.TType : type; TypeName operandType = ParameterizedTypeName.get(Names.Operand, operandTypeParam); if (iterable) { mode = RenderMode.LIST_OPERAND; - builder.addSuperinterface(ParameterizedTypeName.get(ClassName.get(Iterable.class), operandType)); + builder.addSuperinterface( + ParameterizedTypeName.get(ClassName.get(Iterable.class), operandType)); } else { mode = RenderMode.OPERAND; builder.addSuperinterface(operandType); @@ -304,8 +288,13 @@ void buildClass() { } // add op name field - builder - .addField(FieldSpec.builder(TypeResolver.STRING, "OP_NAME", Modifier.PUBLIC, Modifier.STATIC, Modifier.FINAL) + builder.addField( + FieldSpec.builder( + TypeResolver.STRING, + OP_NAME_FIELD_NAME, + Modifier.PUBLIC, + Modifier.STATIC, + Modifier.FINAL) .addJavadoc("$L", "The name of this op, as known by TensorFlow core engine") .initializer("$S", op.getName()) .build()); @@ -313,24 +302,25 @@ void buildClass() { // add output fields if (op.getOutputArgCount() > 0) { for (ArgDef output : op.getOutputArgList()) { - builder - .addField(resolver.typeOf(output).listIfIterable().javaType, getJavaName(output), Modifier.PRIVATE); + builder.addField( + resolver.typeOf(output).listIfIterable().javaType, + getJavaName(output), + Modifier.PRIVATE); } } buildConstructor(); } - /** - * Add a nested class for Options - */ + /** Add a nested class for Options */ private void buildOptionsClass() { if (optionalAttributes.isEmpty()) { return; } - TypeSpec.Builder optionsBuilder = TypeSpec.classBuilder("Options").addModifiers(Modifier.PUBLIC, Modifier.STATIC); + TypeSpec.Builder optionsBuilder = + TypeSpec.classBuilder("Options").addModifiers(Modifier.PUBLIC, Modifier.STATIC); optionsBuilder.addJavadoc("$L", "Optional attributes for {@link " + fullClassName() + "}"); ClassName optionsClassName = ClassName.get(fullPackage, className, "Options"); @@ -387,37 +377,46 @@ private void buildOptionsClass() { } // add the field - optionsBuilder.addField(type.classIfGeneric().listIfIterable().javaType, name, Modifier.PRIVATE); + optionsBuilder.addField( + type.classIfGeneric().listIfIterable().javaType, name, Modifier.PRIVATE); } // add a private constructor - optionsBuilder.addMethod(MethodSpec.constructorBuilder().addModifiers(Modifier.PRIVATE).build()); + optionsBuilder.addMethod( + MethodSpec.constructorBuilder().addModifiers(Modifier.PRIVATE).build()); optionsClass = optionsBuilder.build(); builder.addType(optionsClass); } /** - * Write statements to set an attribute in an OperationBuilder. Meant to be used in {@link #buildFactoryMethods()} + * Write statements to set an attribute in an OperationBuilder. Meant to be used in {@link + * #buildFactoryMethods()} * * @param body the body to write to * @param attr the attribute to set * @param type the type of the attribute, or null to get it ourselves * @param optional whether the attribute is optional */ - private void writeSetAttr(CodeBlock.Builder body, AttrDef attr, ResolvedType type, boolean optional) { + private void writeSetAttr( + CodeBlock.Builder body, AttrDef attr, ResolvedType type, boolean optional) { String varName = optional ? "opts." + getJavaName(attr) : getJavaName(attr); if (type == null) { type = resolver.typeOf(attr); } if (type.jniType.equals(ClassName.get(DataType.class))) { - body.addStatement("opBuilder.setAttr($S, $T.$L($L))", attr.getName(), - Names.Operands, type.iterable ? "toDataTypes" : "toDataType", varName); + body.addStatement( + "opBuilder.setAttr($S, $T.$L($L))", + attr.getName(), + Names.Operands, + type.iterable ? "toDataTypes" : "toDataType", + varName); } else { if (type.iterable) { String arrayName = javaizeMemberName(attr.getName()) + "Array"; - body.addStatement("$T[] $L = new $T[$L.size()]", type.jniType, arrayName, type.jniType, varName); + body.addStatement( + "$T[] $L = new $T[$L.size()]", type.jniType, arrayName, type.jniType, varName); body.beginControlFlow("for (int i = 0 ; i < $L.length ; i++)", arrayName); @@ -432,39 +431,39 @@ private void writeSetAttr(CodeBlock.Builder body, AttrDef attr, ResolvedType typ } } - /** - * Add the {@code create} factory methods. - */ + /** Add the {@code create} factory methods. */ private void buildFactoryMethods() { - MethodSpec.Builder factoryBuilder = MethodSpec.methodBuilder("create") - .addModifiers(Modifier.PUBLIC, Modifier.STATIC); + MethodSpec.Builder factoryBuilder = + MethodSpec.methodBuilder("create").addModifiers(Modifier.PUBLIC, Modifier.STATIC); // the main creator will inherit any class type params TypeName returnType = ClassName.get(fullPackage, className); if (!typeParams.isEmpty()) { - returnType = ParameterizedTypeName.get((ClassName) returnType, typeParams.toArray(new TypeName[0])); + returnType = + ParameterizedTypeName.get((ClassName) returnType, typeParams.toArray(new TypeName[0])); } factoryBuilder.returns(returnType); factoryBuilder.addAnnotation( - AnnotationSpec.builder(Names.Endpoint).addMember("describeByClass", "true") - .build()); + AnnotationSpec.builder(Names.Endpoint).addMember("describeByClass", "true").build()); - factoryBuilder.addJavadoc("Factory method to create a class wrapping a new $L operation.\n", op.getName()); + factoryBuilder.addJavadoc( + "Factory method to create a class wrapping a new $L operation.\n", op.getName()); // we're going to build the body as add arguments CodeBlock.Builder body = CodeBlock.builder(); Map paramTags = new LinkedHashMap<>(); - factoryBuilder - .addParameter(ParameterSpec.builder(Names.Scope, "scope").build()); + factoryBuilder.addParameter(ParameterSpec.builder(Names.Scope, "scope").build()); paramTags.put("scope", CodeBlock.of("current scope")); Set typeVars = new LinkedHashSet<>(typeParams); - body.addStatement("$T opBuilder = scope.env().opBuilder($S, scope.makeOpName($S))", - Names.OperationBuilder, op.getName(), + body.addStatement( + "$T opBuilder = scope.env().opBuilder($L, scope.makeOpName($S))", + Names.OperationBuilder, + OP_NAME_FIELD_NAME, className); // add the inputs as parameters, and add them to the op builder @@ -488,12 +487,12 @@ private void buildFactoryMethods() { } else { body.addStatement("opBuilder.addInput($L.asOutput())", name); } - } body.addStatement("opBuilder = scope.apply(opBuilder)"); - // add the required attribute params, and build the default type maps for use in the secondary factory + // add the required attribute params, and build the default type maps for use in the secondary + // factory Map defaultTypes = new HashMap<>(); Map defaultTypeVars = new HashMap<>(); for (AttrDef attr : requiredAttributes) { @@ -504,8 +503,8 @@ private void buildFactoryMethods() { ResolvedType type = resolver.typeOf(attr); ApiDef.Attr apiAttr = attrApis.get(attr); - ParameterSpec.Builder builder = ParameterSpec - .builder(type.classIfGeneric().listIfIterable().javaType, getJavaName(attr)); + ParameterSpec.Builder builder = + ParameterSpec.builder(type.classIfGeneric().listIfIterable().javaType, getJavaName(attr)); String javaName = getJavaName(attr); String description = @@ -533,7 +532,9 @@ private void buildFactoryMethods() { // add optional attributes if (optionsClass != null) { factoryBuilder.addParameter( - ParameterSpec.builder(ArrayTypeName.of(ClassName.get(fullPackage, className, "Options")), "options").build()); + ParameterSpec.builder( + ArrayTypeName.of(ClassName.get(fullPackage, className, "Options")), "options") + .build()); paramTags.put("options", CodeBlock.of("$L", "carries optional attribute values")); factoryBuilder.varargs(); @@ -551,18 +552,17 @@ private void buildFactoryMethods() { body.endControlFlow(); body.endControlFlow(); - } - body.addStatement("return new $L(opBuilder.build())", typeParams.isEmpty() ? className : (className + "<>")); + body.addStatement( + "return new $L(opBuilder.build())", typeParams.isEmpty() ? className : (className + "<>")); factoryBuilder.addCode(body.build()); paramTags.forEach( (param, doc) -> { String description = doc.toString(); if (description.isEmpty() || description.equals("\n")) { - factoryBuilder.addJavadoc( - "\n@param $L the $L property", param, param); + factoryBuilder.addJavadoc("\n@param $L the $L property", param, param); } else { factoryBuilder.addJavadoc("\n@param $L $L", param, doc); } @@ -586,20 +586,26 @@ private void buildFactoryMethods() { } } - /** - * Add a secondary factory method with the provided default type maps - */ - private void buildSecondaryFactory(Map defaultTypes, Map defaultTypeVars, - MethodSpec mainFactory, Map paramTags) { - MethodSpec.Builder factoryBuilder = MethodSpec.methodBuilder(mainFactory.name) - .addModifiers(mainFactory.modifiers) - .returns(ParameterizedTypeName.get(ClassName.get(fullPackage, className), typeParams.stream() - .map(x -> defaultTypeVars.getOrDefault(x.name, x)).toArray(TypeName[]::new))); + /** Add a secondary factory method with the provided default type maps */ + private void buildSecondaryFactory( + Map defaultTypes, + Map defaultTypeVars, + MethodSpec mainFactory, + Map paramTags) { + MethodSpec.Builder factoryBuilder = + MethodSpec.methodBuilder(mainFactory.name) + .addModifiers(mainFactory.modifiers) + .returns( + ParameterizedTypeName.get( + ClassName.get(fullPackage, className), + typeParams.stream() + .map(x -> defaultTypeVars.getOrDefault(x.name, x)) + .toArray(TypeName[]::new))); factoryBuilder.addAnnotations(mainFactory.annotations); - factoryBuilder - .addJavadoc("Factory method to create a class wrapping a new $L operation, with the default output types.\n", - op.getName()); + factoryBuilder.addJavadoc( + "Factory method to create a class wrapping a new $L operation, with the default output types.\n", + op.getName()); CodeBlock.Builder body = CodeBlock.builder(); body.add("return create("); @@ -614,9 +620,14 @@ private void buildSecondaryFactory(Map defaultTypes, Map getJavaName(x).equals(param.name)).findFirst() - .orElse(null); - if (attr != null && resolver.typeOf(attr).shouldWrapInClass() && defaultTypes.containsKey(attr)) { + AttrDef attr = + op.getAttrList().stream() + .filter(x -> getJavaName(x).equals(param.name)) + .findFirst() + .orElse(null); + if (attr != null + && resolver.typeOf(attr).shouldWrapInClass() + && defaultTypes.containsKey(attr)) { body.add("$T.class", defaultTypes.get(attr)); } else { factoryBuilder.addParameter(param); @@ -638,7 +649,8 @@ private void buildSecondaryFactory(Map defaultTypes, Map Date: Fri, 26 Mar 2021 17:54:15 -0400 Subject: [PATCH 40/60] Moved high level tf.nn ops to framework. Moved tf.raw.nn Ops to tf.nn. Changed generation to generate SoftmaxCrossEntropyWithLogits and SparseSoftmaxCrossEntropyWithLogits to core NNOps (tf.nn). --- ...pi_def_SoftmaxCrossEntropyWithLogits.pbtxt | 2 +- ..._SparseSoftmaxCrossEntropyWithLogits.pbtxt | 2 +- .../annotations/org/tensorflow/op/NnOps.java | 194 +++--------------- .../org/tensorflow/op/NnRawOps.java | 83 -------- .../SoftmaxCrossEntropyWithLogits.java | 61 +++--- .../SparseSoftmaxCrossEntropyWithLogits.java | 66 +++--- .../op/nn/SigmoidCrossEntropyWithLogits.java | 14 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 44 ++-- .../SparseSoftmaxCrossEntropyWithLogits.java | 47 +++-- 9 files changed, 154 insertions(+), 359 deletions(-) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/{raw => }/SoftmaxCrossEntropyWithLogits.java (79%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/{raw => }/SparseSoftmaxCrossEntropyWithLogits.java (75%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SigmoidCrossEntropyWithLogits.java (91%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SoftmaxCrossEntropyWithLogits.java (87%) rename {tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow => tensorflow-framework/src/main/java/org/tensorflow/framework}/op/nn/SparseSoftmaxCrossEntropyWithLogits.java (83%) diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt index 5dba2164cd6..e064562c0f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SoftmaxCrossEntropyWithLogits.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "SoftmaxCrossEntropyWithLogits" endpoint { - name: "nn.raw.SoftmaxCrossEntropyWithLogits" + name: "nn.SoftmaxCrossEntropyWithLogits" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt index cf80ff77565..7627d5f6074 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "SparseSoftmaxCrossEntropyWithLogits" endpoint { - name: "nn.raw.SparseSoftmaxCrossEntropyWithLogits" + name: "nn.SparseSoftmaxCrossEntropyWithLogits" } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 8b25a15522f..1cf8b910297 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -83,7 +83,6 @@ import org.tensorflow.op.nn.Relu; import org.tensorflow.op.nn.Relu6; import org.tensorflow.op.nn.Selu; -import org.tensorflow.op.nn.SigmoidCrossEntropyWithLogits; import org.tensorflow.op.nn.Softmax; import org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits; import org.tensorflow.op.nn.Softsign; @@ -103,8 +102,6 @@ * @see {@link Ops} */ public final class NnOps { - public final NnRawOps raw; - private final Scope scope; private final Ops ops; @@ -112,7 +109,6 @@ public final class NnOps { NnOps(Ops ops) { this.scope = ops.scope(); this.ops = ops; - raw = new NnRawOps(ops); } /** @@ -954,26 +950,8 @@ public Dilation2dBackpropInput dilation2dBackpropInput(Op } /** - * Computes the exponential linear function. - * The ELU function is defined as: - *

        - *
      • $ e ^ x - 1 $ if $ x < 0 $
      • - *
      • $ x $ if $ x >= 0 $
      • - *
      - *

      Examples: - *

      - *
      - *
      - *

      tf.nn.elu(1.0) - * <tf.Tensor: shape=(), dtype=float32, numpy=1.0> - * tf.nn.elu(0.0) - * <tf.Tensor: shape=(), dtype=float32, numpy=0.0> - * tf.nn.elu(-1000.0) - * <tf.Tensor: shape=(), dtype=float32, numpy=-1.0> - *

      - *
      - *
      - *

      See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * Computes exponential linear: {@code exp(features) - 1} if < 0, {@code features} otherwise. + * See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * * * @param data type for {@code activations} output @@ -1771,8 +1749,8 @@ public QuantizedReluX quantizedReluX(Operand *

      *
      - *

      tf.nn.relu([-2., 0., 3.]).numpy() - * array([0., 0., 3.], dtype=float32) + *

      tf.nn.relu([-2., 0., -0., 3.]).numpy() + * array([ 0., 0., -0., 3.], dtype=float32) *

      *
      * @@ -1815,55 +1793,6 @@ public Selu selu(Operand features) { return Selu.create(scope, features); } - /** - * Computes sigmoid cross entropy given logits. - * - *

      Measures the probability error in discrete classification tasks in which each class is - * independent and not mutually exclusive. For instance, one could perform multilabel - * classification where a picture can contain both an elephant and a dog at the same time. - * - *

      For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is - * - *

      -   *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
      -   *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
      -   *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
      -   *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
      -   *   = (1 - z) * x + log(1 + exp(-x))
      -   *   = x - x * z + log(1 + exp(-x))
      -   *  
      - * - *

      For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - *

      -   *  x - x * z + log(1 + exp(-x))
      -   *   = log(exp(x)) - x * z + log(1 + exp(-x))
      -   *   = - x * z + log(1 + exp(x))
      -   *  
      - * - *

      Hence, to ensure stability and avoid overflow, the implementation uses this equivalent - * formulation - * - *

      -   *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
      -   *  
      - * - *

      logits and labels must have the same type and shape. - * - *

      - * - * @param labels the labels - * @param logits the logits of type float32 or float64 - * @param the type of labels and logits - * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape - */ - public Operand sigmoidCrossEntropyWithLogits(Operand labels, - Operand logits) { - return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); - } - /** * Computes softmax activations. * For each batch {@code i} and class {@code j} we have @@ -1881,53 +1810,20 @@ public Softmax softmax(Operand logits) { } /** - * Computes softmax cross entropy between logits and labels. - * - *

      Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

      NOTE: - * - *

      While the classes are mutually exclusive, their probabilities need not be. All that is - * required is that each row of labels is a valid probability distribution. If they - * are not, the computation of the gradient will be incorrect. - * - *

      If using exclusive labels (wherein one and only one class is true at a time), - * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} - * - *

      Usage: - * - *

      -   *    Operand<TFloat32> logits =
      -   *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
      -   *    Operand<TFloat32> labels =
      -   *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
      -   *    Operand<TFloat32> output =
      -   *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
      -   *    // output Shape = [2]
      -   *    // dataType = FLOAT (1)
      -   *    // values { 0.169846, 0.824745 }
      -   *  
      - * - *

      Backpropagation will happen into both logits and labels. To - * disallow backpropagation into labels, pass label tensors through - * tf.stopGradient before feeding it to this function. + * Computes softmax cross entropy cost and gradients to backpropagate. + *

      + * Inputs are the logits, not probabilities. * - * @param labels Each vector along the class dimension should hold a valid probability - * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] - * , each row of labels[i] must be a valid probability distribution. - * @param logits Per-label activations, typically a linear output. These activation energies are - * interpreted as unnormalized log probabilities. - * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands - * @return the softmax cross entropy loss. Its type is the same as logits and its - * shape is the same as labels except that it does not have the last dimension of - * labels. + * @param data type for {@code loss()} output + * @param features batch_size x num_classes matrix + * @param labels batch_size x num_classes matrix + * The caller must ensure that each batch of labels represents a valid + * probability distribution. + * @return a new instance of SoftmaxCrossEntropyWithLogits */ - public Operand softmaxCrossEntropyWithLogits( - Operand labels, Operand logits, int axis) { - return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( + Operand features, Operand labels) { + return SoftmaxCrossEntropyWithLogits.create(scope, features, labels); } /** @@ -2114,50 +2010,24 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo } /** - * Computes sparse softmax cross entropy between logits and labels. - * - *

      Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

      NOTE: - * - *

      For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link - * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. - * - *

      WARNING: - * - *

      This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, - * as it will produce incorrect results. - * - *

      A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. + * Computes softmax cross entropy cost and gradients to backpropagate. + *

      + * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * a matrix of label probabilities, but rather a single label per row + * of features. This label is considered to have probability 1.0 for the + * given row. + *

      + * Inputs are the logits, not probabilities. * - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log - * probabilities. - * @return A Tensor of the same shape as labels and of the same type as - * logits with the softmax cross entropy loss. - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank - * of the labels is not equal to the rank of the logits minus one. + * @param data type for {@code loss()} output + * @param features batch_size x num_classes matrix + * @param labels batch_size vector with values in [0, num_classes). + * This is the label for the given minibatch entry. + * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ - public Operand sparseSoftmaxCrossEntropyWithLogits( - Operand labels, Operand logits) { - return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); + public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( + Operand features, Operand labels) { + return SparseSoftmaxCrossEntropyWithLogits.create(scope, features, labels); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java deleted file mode 100644 index c287459c460..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnRawOps.java +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2020 The TensorFlow Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ============================================================================== -// -// This class has been generated, DO NOT EDIT! -// -package org.tensorflow.op; - -import org.tensorflow.Operand; -import org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits; -import org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits; -import org.tensorflow.types.family.TNumber; - -/** - * An API for building {@code nn.raw} operations as {@link Op Op}s - * - * @see {@link Ops} - */ -public final class NnRawOps { - private final Scope scope; - - private final Ops ops; - - NnRawOps(Ops ops) { - this.scope = ops.scope(); - this.ops = ops; - } - - /** - * Computes softmax cross entropy cost and gradients to backpropagate. - * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output - * @param features batch_size x num_classes matrix - * @param labels batch_size x num_classes matrix - * The caller must ensure that each batch of labels represents a valid - * probability distribution. - * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands - * @return a new instance of SoftmaxCrossEntropyWithLogits - */ - public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( - Operand features, Operand labels) { - return SoftmaxCrossEntropyWithLogits.create(scope, features, labels); - } - - /** - * Computes softmax cross entropy cost and gradients to backpropagate. - * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept - * a matrix of label probabilities, but rather a single label per row - * of features. This label is considered to have probability 1.0 for the - * given row. - *

      Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output - * @param features batch_size x num_classes matrix - * @param labels batch_size vector with values in [0, num_classes). - * This is the label for the given minibatch entry. - * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands - * @return a new instance of SparseSoftmaxCrossEntropyWithLogits - */ - public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( - Operand features, Operand labels) { - return SparseSoftmaxCrossEntropyWithLogits.create(scope, features, labels); - } - - /** - * Get the parent {@link Ops} object. - */ - public final Ops ops() { - return ops; - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java similarity index 79% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 0f2d50289b9..5d3ab3c1100 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.nn.raw; +package org.tensorflow.op.nn; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -29,68 +29,57 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. + *

      * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output + * + * @param data type for {@code loss()} output */ -@Operator( - group = "nn.raw" -) +@Operator(group = "nn") public final class SoftmaxCrossEntropyWithLogits extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; - - private Output loss; - - private Output backprop; - - private SoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx++); - } - + /** * Factory method to create a class wrapping a new SoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. - * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ - @Endpoint( - describeByClass = true - ) - public static SoftmaxCrossEntropyWithLogits create(Scope scope, - Operand features, Operand labels) { - OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SoftmaxCrossEntropyWithLogits")); + @Endpoint(describeByClass = true) + public static SoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { + OperationBuilder opBuilder = scope.env().opBuilder("SoftmaxCrossEntropyWithLogits", scope.makeOpName("SoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** - * Gets loss. * Per example loss (batch_size vector). - * @return loss. */ public Output loss() { return loss; } - + /** - * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). - * @return backprop. */ public Output backprop() { return backprop; } + + /** The name of this op, as known by TensorFlow core engine */ + public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; + + private Output loss; + private Output backprop; + + private SoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java similarity index 75% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 30eec4c3c05..794beab4ded 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/raw/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.nn.raw; +package org.tensorflow.op.nn; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -29,71 +29,61 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept + *

      + * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

      Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output + *

      + * Inputs are the logits, not probabilities. + * + * @param data type for {@code loss()} output */ -@Operator( - group = "nn.raw" -) +@Operator(group = "nn") public final class SparseSoftmaxCrossEntropyWithLogits extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; - - private Output loss; - - private Output backprop; - - private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx++); - } - + /** * Factory method to create a class wrapping a new SparseSoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. - * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ - @Endpoint( - describeByClass = true - ) - public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, - Operand features, Operand labels) { - OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); + @Endpoint(describeByClass = true) + public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { + OperationBuilder opBuilder = scope.env().opBuilder("SparseSoftmaxCrossEntropyWithLogits", scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SparseSoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** - * Gets loss. * Per example loss (batch_size vector). - * @return loss. */ public Output loss() { return loss; } - + /** - * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). - * @return backprop. */ public Output backprop() { return backprop; } + + /** The name of this op, as known by TensorFlow core engine */ + public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; + + private Output loss; + private Output backprop; + + private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java similarity index 91% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index 92c413f7e52..b55385839d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -1,4 +1,4 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; @@ -8,11 +8,17 @@ import org.tensorflow.op.core.Select; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; -import org.tensorflow.op.math.*; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.Exp; +import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.Log1p; +import org.tensorflow.op.math.Mul; +import org.tensorflow.op.math.Neg; +import org.tensorflow.op.math.Sub; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -@Operator(group = "nn") +//@Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** @@ -60,7 +66,7 @@ public class SigmoidCrossEntropyWithLogits { * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape */ - @Endpoint(name = "sigmoidCrossEntropyWithLogits") + //@Endpoint(name = "sigmoidCrossEntropyWithLogits") public static Operand sigmoidCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { if (!isCompatible(labels.shape(), logits.shape())) { diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java similarity index 87% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index ddeacbea4d4..0f5b8197f1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -1,11 +1,15 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; -import org.tensorflow.op.core.*; +import org.tensorflow.op.core.Concat; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.Reshape; +import org.tensorflow.op.core.Slice; import org.tensorflow.op.dtypes.Cast; import org.tensorflow.op.linalg.Transpose; import org.tensorflow.op.math.Sub; @@ -14,12 +18,11 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; import java.util.Arrays; import java.util.List; -@Operator(group = "nn") +// @Operator(group = "nn") public class SoftmaxCrossEntropyWithLogits { /** @@ -68,6 +71,7 @@ public class SoftmaxCrossEntropyWithLogits { * shape is the same as labels except that it does not have the last dimension of * labels. */ + @SuppressWarnings("unchecked") @Endpoint(name = "softmaxCrossEntropyWithLogits") public static Operand softmaxCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits, int axis) { @@ -78,7 +82,9 @@ public static Operand softmaxCrossEntr } if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { - Operand result = softmaxCrossEntropyWithLogits(scope, + Operand result = + softmaxCrossEntropyWithLogits( + scope, Cast.create(scope, labels, TFloat32.class), Cast.create(scope, logits, TFloat32.class), axis); @@ -86,10 +92,8 @@ public static Operand softmaxCrossEntr } if (logits.asOutput().type() != labels.asOutput().type()) { - return softmaxCrossEntropyWithLogits(scope, - Cast.create(scope, labels, logits.asOutput().type()), - logits, - axis); + return softmaxCrossEntropyWithLogits( + scope, Cast.create(scope, labels, logits.asOutput().type()), logits, axis); } Operand inputRank = Cast.create(scope, Rank.create(scope, logits), TInt64.class); @@ -101,13 +105,20 @@ public static Operand softmaxCrossEntr labels = moveDimToEnd(scope, labels, axis, inputRank); } + Operand tLabels; + if (labels.type() != logits.type()) { + tLabels = Cast.create(scope, labels, logits.type()); + } else { + // Unchecked warning checked in if statement. + tLabels = (Operand) labels; + } + Shape inputShape = logits.shape(); logits = flattenOuterDims(scope, logits); - labels = flattenOuterDims(scope, labels); + tLabels = flattenOuterDims(scope, tLabels); - org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits.create( - scope, logits, (Operand)labels); + org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits.create(scope, logits, tLabels); /* cannot use generic on cost, because cost may be recast later. */ Operand cost = smax.loss(); Operand outputShape = @@ -119,6 +130,9 @@ public static Operand softmaxCrossEntr cost = Reshape.create(scope, cost, outputShape); if (scope.env().isGraph() && !shape.hasUnknownDimension()) { long[] array = shape.asArray(); + if (array == null) { + array = new long[0]; + } long[] newArray = new long[array.length - 1]; if (axis < 0) { axis = shape.numDimensions() + axis; @@ -153,7 +167,7 @@ private static Operand flattenOuterDims(Scope scope, Oper boolean productValid = true; for (int i = ndims - 2; i >= 0; i--) { long d = shape.size(i); - if (d == org.tensorflow.ndarray.Shape.UNKNOWN_SIZE) { + if (d == Shape.UNKNOWN_SIZE) { productValid = false; break; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java similarity index 83% rename from tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 54b32bb5c63..64faa7c5d70 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -1,11 +1,10 @@ -package org.tensorflow.op.nn; +package org.tensorflow.framework.op.nn; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; import org.tensorflow.op.core.AssertThat; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.Reshape; @@ -22,7 +21,7 @@ import java.util.Collections; import java.util.List; -@Operator(group = "nn") +// @Operator(group = "nn") public class SparseSoftmaxCrossEntropyWithLogits { /** @@ -63,19 +62,24 @@ public class SparseSoftmaxCrossEntropyWithLogits { * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, * or TFloat64. These activation energies are interpreted as unnormalized log * probabilities. + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return A Tensor of the same shape as labels and of the same type as * logits with the softmax cross entropy loss. * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank * of the labels is not equal to the rank of the logits minus one. */ + @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") - public static Operand sparseSoftmaxCrossEntropyWithLogits( - Scope scope, Operand labels, Operand logits) { + public static + Operand sparseSoftmaxCrossEntropyWithLogits( + Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); - /** cannot use generics on preciseLogits as it may be recast later */ - Operand preciseLogits = logits; + Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { preciseLogits = Cast.create(scope, logits, TFloat32.class); + } else { + preciseLogits = logits; } Shape labelsStaticShape = labels.shape(); org.tensorflow.op.core.Shape labelsShape = @@ -108,14 +112,16 @@ public static Operand sparseSoftmaxCrossE } // Check if no reshapes are required. if (logitsShape.numDimensions() == 2) { - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits.create( + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( scope, preciseLogits, labels); - Operand loss = smax.loss(); - if (logits.asOutput().type() == TFloat16.class) { - loss = Cast.create(scope, loss, TFloat16.class); + Operand cost = smax.loss(); + if (cost.type() != logits.type()) { + return Cast.create(scope, cost, logits.type()); + } else { + // Unchecked cast already checked with previous if + return (Operand) cost; } - return loss; } List shapeChecks = new ArrayList<>(); @@ -145,14 +151,17 @@ public static Operand sparseSoftmaxCrossE preciseLogits = Reshape.create(scope, preciseLogits, Constant.arrayOf(scope, -1L, numClassses)); labels = Reshape.create(scope, labels, Constant.scalarOf(scope, -1)); scope.withControlDependencies(shapeChecks); - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits smax = - org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits.create( + // call raw op + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = + org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( scope, preciseLogits, labels); - Operand cost = smax.loss(); + Operand cost = smax.loss(); cost = Reshape.create(scope, cost, labelsShape); - if (logits.asOutput().type() == TFloat16.class) { - cost = Cast.create(scope, cost, TFloat16.class); + if (cost.type() != logits.type()) { + return Cast.create(scope, cost, logits.type()); + } else { + // Unchecked cast already checked with previous if + return (Operand) cost; } - return cost; } } From e4837929eebf482a0824ca4839d5be6ca93f0efd Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 26 Mar 2021 18:02:55 -0400 Subject: [PATCH 41/60] Added FrameworkOps analogous to Ops. Added NnOps and SetOps as groups. Fixed MetricsHelper and Losses to use the bew FrameworkOps. Moved SetsOps to framework.op. --- .../tensorflow/framework/losses/Losses.java | 17 +- .../framework/metrics/impl/MetricsHelper.java | 28 ++- .../framework/metrics/impl/SetsOps.java | 147 ------------- .../tensorflow/framework/op/FrameworkOps.java | 136 ++++++++++++ .../org/tensorflow/framework/op/NnOps.java | 197 ++++++++++++++++++ .../org/tensorflow/framework/op/SetsOps.java | 161 ++++++++++++++ .../SparseSoftmaxCrossEntropyWithLogits.java | 3 +- .../{SetsOpsTest.java => SetOpsTest.java} | 18 +- 8 files changed, 539 insertions(+), 168 deletions(-) delete mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java rename tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/{SetsOpsTest.java => SetOpsTest.java} (86%) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 9aa94cf7fcf..aa5fa4ada6d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -19,6 +19,7 @@ import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.op.core.ReduceAll; import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceSum; @@ -181,7 +182,8 @@ public static Operand binaryCrossentropy( */ private static Operand binaryCrossentropyHelper( Ops tf, Operand target, Operand output, boolean fromLogits) { - if (fromLogits) return tf.nn.sigmoidCrossEntropyWithLogits(target, output); + FrameworkOps fop = FrameworkOps.create(tf); + if (fromLogits) { return fop.nn.sigmoidCrossEntropyWithLogits(target, output);} /* TODO - skip this logic for now. It requires walking back the inputs which is not yet possible if (!(output instanceof Variable) && (!tf.scope().env().isEager())) { @@ -191,7 +193,7 @@ private static Operand binaryCrossentropyHelper( // TODO if (output.op().numInputess() != 1) // TODO throw new IllegalArgumentException("output can only have 1 output"); // TODO output = output.op().inout(0); - // TODO return tf.nn.sigmoidCrossEntropyWithLogits(target, output); + // TODO return fop.nn.sigmoidCrossEntropyWithLogits(target, output); // TODO} } */ @@ -235,6 +237,7 @@ public static Operand categoricalCrossentropy( boolean fromLogits, float labelSmoothing, int axis) { + FrameworkOps fop = FrameworkOps.create(tf); Class predictionType = predictions.type(); Operand tLabels = cast(tf, labels, predictionType); LossTuple ops = LossesHelper.squeezeOrExpandDimensions(tf, tLabels, predictions, null); @@ -245,7 +248,7 @@ public static Operand categoricalCrossentropy( tLabels = smoothCategoricalLabels(tf, tLabels, labelSmoothing); } if (fromLogits) { - return tf.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, axis); + return fop.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, axis); } /* TODO if (!(predictions instanceof Variable) && (!tf.scope().env().isEager())) { @@ -255,7 +258,7 @@ public static Operand categoricalCrossentropy( if (predictions.op().numOutputs() != 1) throw new IllegalArgumentException("output can only have 1 output"); predictions = predictions.op().output(0); - return tf.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, -1); + return fop.nn.softmaxCrossEntropyWithLogits(tLabels, predictions, -1); } } */ @@ -516,6 +519,7 @@ public static Operand sparseCategoricalCrossentropy( boolean fromLogits, int axis) { Class predictionType = predictions.type(); + FrameworkOps fop = FrameworkOps.create(tf); Operand epsilonConst = cast(tf, tf.constant(EPSILON), predictionType); Operand one = cast(tf, tf.constant(1), predictionType); Operand oneMinusEpsilonConst = tf.math.sub(one, epsilonConst); @@ -568,9 +572,8 @@ public static Operand sparseCategoricalCrossentropy( tf.constant( new long[] {-1L, predictionsShape.size(predictionsShape.numDimensions() - 1)})); } - - @SuppressWarnings("unchecked") - Operand loss = tf.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); + + Operand loss = fop.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); if (updateShape && predictionsRank >= 3) { Shape newShape = predictionsShape.take(predictionsShape.numDimensions() - 1); loss = tf.reshape(loss, tf.constant(newShape)); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index 7d265ef7651..7572adc366a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -28,7 +28,7 @@ import org.tensorflow.framework.losses.impl.LossTuple; import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.framework.metrics.exceptions.NotBroadcastableException; -import org.tensorflow.framework.utils.SparseTensor; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; @@ -48,6 +48,17 @@ import org.tensorflow.types.family.TIntegral; import org.tensorflow.types.family.TNumber; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; +import static org.tensorflow.framework.utils.CastHelper.cast; + /** * These are helper methods for Metrics and will be module private when Java modularity is applied * to TensorFlow Java. These methods should not be used outside of the metrics packages. @@ -58,7 +69,8 @@ public class MetricsHelper { "weights can not be broadcast to values."; /** - * Asserts that the {@code sampleWeights} can be broadcast to the same shape as {@code values } + * Asserts that the {@code sampleWeights} can be broadcast to the same shape as {@code values + * } * *

      In losses and metrics, limited weight broadcasting is supported. Weights must be either * scalar, or the same rank as the target values, with each dimension either 1, or the same as the @@ -67,8 +79,8 @@ public class MetricsHelper { * @param tf the TensorFlow Ops * @param sampleWeights the sample weights. * @param values the values to which weights are applied. - * @return {@code Operation} with control dependencies to ensure {@code sampleWeight} can be - * broadcast to {@code values} + * @return {@code Operation} with control dependencies to ensure {@code sampleWeight} + * can be broadcast to {@code values} * @param the type of Operand * @throws NotBroadcastableException If static checks determine {@code sampleWeights} has an * incorrect shape that prohibit broadcasting to {@code values} @@ -112,7 +124,10 @@ public static Op assertBroadcastable( throw new NotBroadcastableException( String.format( "%s Mismatch at dim %d. values.shape=%s weights.shape=%s.", - ASSERT_BROADCAST_ERROR_PREFIX, i, valuesShapeStatic, weightsShapeStatic)); + ASSERT_BROADCAST_ERROR_PREFIX, + i, + valuesShapeStatic, + weightsShapeStatic)); } } return tf.withSubScope("staticDimsCheckSuccess") @@ -185,12 +200,13 @@ private static Operand canBroadcastNonscalarShapes( private static Operand canBroadcastDims( Ops tf, Operand weightsShape, Operand valuesShape) { tf = tf.withSubScope("canBroadcastDims"); + FrameworkOps fops = FrameworkOps.create(tf); Operand valuesShape2d = tf.expandDims(valuesShape, tf.constant(-1)); Operand validDims = tf.concat(Arrays.asList(valuesShape2d, tf.onesLike(valuesShape2d)), tf.constant(1)); Operand weightsShape2D = tf.expandDims(weightsShape, tf.constant(-1)); - Operand diffResult = SetsOps.difference(tf, weightsShape2D, validDims); + Operand diffResult = fops.sets.difference(weightsShape2D, validDims); Operand numInvalidDims = tf.size(diffResult); return tf.math.equal(tf.constant(0), numInvalidDims); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java deleted file mode 100644 index dd77a1be4aa..00000000000 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/SetsOps.java +++ /dev/null @@ -1,147 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ -package org.tensorflow.framework.metrics.impl; - -import static org.tensorflow.framework.utils.CastHelper.cast; - -import org.tensorflow.Operand; -import org.tensorflow.op.Ops; -import org.tensorflow.op.SparseOps; -import org.tensorflow.op.sparse.DenseToDenseSetOperation; -import org.tensorflow.types.family.TNumber; - -/** Implementation of set operations */ -public class SetsOps { - - /** - * Computes set difference of elements in last dimension of {@code a} and {@code b} with {@code - * aMinusB} set to true. - * - *

      All but the last dimension of {@code a} and {@code b} must match - * - * @param tf the TensorFlow Ops - * @param a The first operand representing set {@code a} - * @param b The other operand representing set {@code b} - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last - * dimension the * same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand difference(Ops tf, Operand a, Operand b) { - return difference(tf, a, b, true); - } - - /** - * Computes set difference of elements in last dimension of {@code a} and {@code b}. - * - *

      All but the last dimension of {@code a} and {@code b} must match - * - * @param tf the TensorFlow Ops - * @param a The first operand representing set {@code a} - * @param b The other operand representing set {@code b} - * @param aMinusB whether to subtract b from a, vs vice versa. - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last - * dimension the * same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand difference( - Ops tf, Operand a, Operand b, boolean aMinusB) { - return setOperation(tf, a, b, aMinusB ? Operation.A_MINUS_B : Operation.B_MINUS_A); - } - - /** - * Computes set union of elements in last dimension of {@code a} and {@code b}. - * - * @param tf the TensorFlow Ops - * @param a The first operand representing set {@code a} - * @param b The other operand representing set {@code b} - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last - * dimension the * same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand union(Ops tf, Operand a, Operand b) { - return setOperation(tf, a, b, Operation.UNION); - } - - /** - * Computes set intersection of elements in last dimension of {@code a} and {@code b}. - * - * @param tf the TensorFlow Ops - * @param a The first operand representing set {@code a} - * @param b The other operand representing set {@code b} - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last - * dimension the * same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand intersection(Ops tf, Operand a, Operand b) { - return setOperation(tf, a, b, Operation.INTERSECTION); - } - - /** - * Compute set operation of elements in last dimension of {@code a} and {@code b}. - * - * @param tf the TensorFlow Ops - * @param a The first set operation operand - * @param b The other et operation operand - * @param setOperation The set operation to perform, {@link Operation}. - * @param the data type for the sets - * @return An Operand with the same rank as {@code a} and {@code b}, and all but the last - * dimension the same. Elements along the last dimension contain the results of the set - * operation. - */ - public static Operand setOperation( - Ops tf, Operand a, Operand b, Operation setOperation) { - - DenseToDenseSetOperation setOperationResult = - tf.sparse.denseToDenseSetOperation( - a, b, setOperation.getSetOperation(), DenseToDenseSetOperation.validateIndices(true)); - - return tf.sparse.sparseToDense( - setOperationResult.resultIndices(), - setOperationResult.resultShape(), - setOperationResult.resultValues(), - cast(tf, tf.constant(0), a.type())); - } - - /** - * Enumeration containing the string operation values to be passed to the TensorFlow Sparse Ops - * function {@link SparseOps#denseToDenseSetOperation} - */ - public enum Operation { - A_MINUS_B("a-b"), - B_MINUS_A("b-a"), - INTERSECTION("intersection"), - UNION("union"); - - private final String setOperation; - - Operation(String setOperation) { - this.setOperation = setOperation; - } - - /** - * Gets the set operation String value used to pass as the stringOperation value to {@link - * SparseOps#denseToDenseSetOperation} - * - * @return the set operation String value - */ - public String getSetOperation() { - return setOperation; - } - } -} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java new file mode 100644 index 00000000000..cecbecfed15 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -0,0 +1,136 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.DeviceSpec; +import org.tensorflow.EagerSession; +import org.tensorflow.ExecutionEnvironment; +import org.tensorflow.op.Op; +import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; + +/** + * An API for building framework operations as {@link Op Op}s + * + *

      These are higher level ops that may invoke core ops. Higher level Ops may perform the + * operation solely in the TensorFlow framework or do preprocessing of the Operands before invoking + * a core level Op. + */ +public class FrameworkOps { + public final Ops coreOps; + private final Scope scope; + + public final NnOps nn; + public final SetsOps sets; + + /** + * Creates a FrameworkOps instance with the provided scope + * + * @param scope the scope + */ + private FrameworkOps(Scope scope) { + this.coreOps = Ops.create(scope.env()); + this.scope = scope; + nn = new NnOps(this); + sets = new SetsOps(this); + } + + /** + * Creates a FrameworkOps instance based on the provided Core Ops + * + * @param coreOps The TensorFlow Core Ops + */ + private FrameworkOps(Ops coreOps) { + this.coreOps = coreOps; + this.scope = coreOps.scope(); + nn = new NnOps(this); + sets = new SetsOps(this); + } + + + /** Returns the current {@link Scope scope} of this API */ + public final Scope scope() { + return scope; + } + + /** + * Gets the core Ops + * + * @return coreOps + */ + public final Ops coreOps() { + return coreOps; + } + + /** + * Returns an API that builds operations with the provided name prefix. + * + *

      @link Scope#withSubScope(String)} + */ + public FrameworkOps withSubScope(String childScopeName) { + return new FrameworkOps(scope.withSubScope(childScopeName)); + } + + /** + * Returns an API that uses the provided name for an op. + * + *

      {@link Scope#withName(String)} + */ + public FrameworkOps withName(String opName) { + return new FrameworkOps(scope.withName(opName)); + } + + /** + * Returns an API that places the created operations on the device(s) matching the provided spec. + * + *

      {@link Scope#withDevice(DeviceSpec)} + */ + public FrameworkOps withDevice(DeviceSpec deviceSpec) { + return new FrameworkOps(scope.withDevice(deviceSpec)); + } + + /** + * Returns an API that adds operations to the graph with the provided control dependencies. + * + *

      {@link Scope#withControlDependencies(Iterable)} + */ + public FrameworkOps withControlDependencies(Iterable controls) { + return new FrameworkOps(scope.withControlDependencies(controls)); + } + + /** Creates an API for building operations in the provided execution environment */ + public static FrameworkOps create(ExecutionEnvironment env) { + return new FrameworkOps(new Scope(env)); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + *

      Invoking this method is equivalent to {@code + * FrameworkOps.create(EagerSession.getDefault())}. + */ + public static FrameworkOps create() { + return new FrameworkOps(new Scope(EagerSession.getDefault())); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + * @param coreOps the TensorFlow core Ops + */ + public static FrameworkOps create(Ops coreOps) { + return new FrameworkOps(coreOps); + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java new file mode 100644 index 00000000000..4054f3ddbb5 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -0,0 +1,197 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.framework.op.nn.SigmoidCrossEntropyWithLogits; +import org.tensorflow.framework.op.nn.SoftmaxCrossEntropyWithLogits; +import org.tensorflow.framework.op.nn.SparseSoftmaxCrossEntropyWithLogits; +import org.tensorflow.op.Op; +import org.tensorflow.op.Scope; +import org.tensorflow.types.family.TNumber; + +/** + * An API for building {@code nn} operations as {@link Op Op}s + * + *

      These are higher level ops that may invoke core ops. Higher level Ops may perform the + * operation solely in the TensorFlow framework or do preprocessing of the Operands before invoking + * a core level Op. + * + *

      {@link FrameworkOps} + */ +public class NnOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * @param frameworkOps the TensorFLow framework Ops + */ + NnOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Computes sigmoid cross entropy given logits. + * + *

      Measures the probability error in discrete classification tasks in which each class is + * independent and not mutually exclusive. For instance, one could perform multilabel + * classification where a picture can contain both an elephant and a dog at the same time. + * + *

      For brevity, let x = logits, z = labels. The logistic loss in + * pseudo-code is + * + *

      +     *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
      +     *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
      +     *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
      +     *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
      +     *   = (1 - z) * x + log(1 + exp(-x))
      +     *   = x - x * z + log(1 + exp(-x))
      +     *  
      + * + *

      For x < 0, to avoid overflow in exp(-x), we reformulate the above + * + *

      +     *  x - x * z + log(1 + exp(-x))
      +     *   = log(exp(x)) - x * z + log(1 + exp(-x))
      +     *   = - x * z + log(1 + exp(x))
      +     *  
      + * + *

      Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * formulation + * + *

      +     *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
      +     *  
      + * + *

      logits and labels must have the same type and shape. + * + *

      + * + * @param labels the labels + * @param logits the logits of type float32 or float64 + * @param the type of labels and logits + * @return the component-wise logistic losses. + * @throws IllegalArgumentException if logits' and labels' do not have the same shape + */ + public Operand sigmoidCrossEntropyWithLogits(Operand labels, + Operand logits) { + return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); + } + + /** + * Computes softmax cross entropy between logits and labels. + * + *

      Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

      NOTE: + * + *

      While the classes are mutually exclusive, their probabilities need not be. All that is + * required is that each row of labels is a valid probability distribution. If they + * are not, the computation of the gradient will be incorrect. + * + *

      If using exclusive labels (wherein one and only one class is true at a time), + * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} + * + *

      Usage: + * + *

      +     *    Operand<TFloat32> logits =
      +     *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
      +     *    Operand<TFloat32> labels =
      +     *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
      +     *    Operand<TFloat32> output =
      +     *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
      +     *    // output Shape = [2]
      +     *    // dataType = FLOAT (1)
      +     *    // values { 0.169846, 0.824745 }
      +     *  
      + * + *

      Backpropagation will happen into both logits and labels. To + * disallow backpropagation into labels, pass label tensors through + * tf.stopGradient before feeding it to this function. + * + * @param labels Each vector along the class dimension should hold a valid probability + * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] + * , each row of labels[i] must be a valid probability distribution. + * @param logits Per-label activations, typically a linear output. These activation energies are + * interpreted as unnormalized log probabilities. + * @param axis The class dimension. -1 is the last dimension. + * @param the number type of the operands + * @return the softmax cross entropy loss. Its type is the same as logits and its + * shape is the same as labels except that it does not have the last dimension of + * labels. + */ + public Operand softmaxCrossEntropyWithLogits( + Operand labels, Operand logits, int axis) { + return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + } + + /** + * Computes sparse softmax cross entropy between logits and labels. + * + *

      Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

      NOTE: + * + *

      For this operation, the probability of a given label is considered exclusive. That is, soft + * classes are not allowed, and the labels vector must provide a single specific + * index for the true class for each row of logits (each minibatch entry). For soft + * softmax classification with a probability distribution for each entry, {@link + * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. + * + *

      WARNING: + * + *

      This op expects unscaled logits, since it performs a softmax on logits + * internally for efficiency. Do not call this op with the output of softmax, + * as it will produce incorrect results. + * + *

      A common use case is to have logits of shape [batchSize, numClasses] and have + * labels of shape [batchSize], but higher dimensions are supported, in which case + * the dim-th dimension is assumed to be of size numClasses. + * logits must have the dataType of TFloat16, TFloat32 + * , or TFloat64, and labels must have the dtype of TInt32 + * or TInt64. + * + * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r + * is rank of labels and result) and the dataType is TInt32 + * or TInt64. Each entry in labels must be an index in [0, + * numClasses). Other values will raise an exception when this op is run on CPU, and + * return NaN for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., + * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, + * or TFloat64. These activation energies are interpreted as unnormalized log + * probabilities. + * @param The data type for the labels + * @param The data type for the logits and loss + * @return the loss + * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * of the labels is not equal to the rank of the logits minus one. + */ + + public Operand sparseSoftmaxCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); + } + + +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java new file mode 100644 index 00000000000..d7833cdbb06 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java @@ -0,0 +1,161 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.Scope; +import org.tensorflow.op.SparseOps; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.sparse.DenseToDenseSetOperation; +import org.tensorflow.op.sparse.SparseToDense; +import org.tensorflow.types.family.TNumber; + +/** Implementation of set operations */ +public class SetsOps { + + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + SetsOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Computes set difference of elements in last dimension of a and b with + * aMinusB set to true. + * + *

      All but the last dimension of a and b must match + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand difference(Operand a, Operand b) { + return difference(a, b, true); + } + + /** + * Computes set difference of elements in last dimension of a and b. + * + *

      All but the last dimension of a and b must match + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param aMinusB whether to subtract b from a, vs vice versa. + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand difference(Operand a, Operand b, boolean aMinusB) { + return setOperation(a, b, aMinusB ? Operation.A_MINUS_B : Operation.B_MINUS_A); + } + + /** + * Computes set union of elements in last dimension of a and b. + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand union(Operand a, Operand b) { + return setOperation(a, b, Operation.UNION); + } + + /** + * Computes set intersection of elements in last dimension of a and b. + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand intersection(Operand a, Operand b) { + return setOperation(a, b, Operation.INTERSECTION); + } + + /** + * Compute set operation of elements in last dimension of a and b. + * + * @param a The first set operation operand + * @param b The other et operation operand + * @param setOperation The set operation to perform, {@link Operation}. + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the same. Elements along the last dimension contain the results of the set + * operation. + */ + public Operand setOperation( + Operand a, Operand b, Operation setOperation) { + + DenseToDenseSetOperation setOperationResult = + DenseToDenseSetOperation.create( + scope, + a, + b, + setOperation.getSetOperation(), + DenseToDenseSetOperation.validateIndices(true)); + + return SparseToDense.create( + scope, + setOperationResult.resultIndices(), + setOperationResult.resultShape(), + setOperationResult.resultValues(), + Cast.create(scope, Constant.scalarOf(scope, 0), a.type())); + } + + /** + * Enumeration containing the string operation values to be passed to the TensorFlow Sparse Ops + * function {@link SparseOps#denseToDenseSetOperation} + */ + public enum Operation { + A_MINUS_B("a-b"), + B_MINUS_A("b-a"), + INTERSECTION("intersection"), + UNION("union"); + + private final String setOperation; + + Operation(String setOperation) { + this.setOperation = setOperation; + } + + /** + * Gets the set operation String value used to pass as the stringOperation value to {@link + * SparseOps#denseToDenseSetOperation} + * + * @return the set operation String value + */ + public String getSetOperation() { + return setOperation; + } + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 64faa7c5d70..75766cf9bfb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -64,8 +64,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { * probabilities. * @param the data type for the labels * @param the data tyoe for the loss and logits. - * @return A Tensor of the same shape as labels and of the same type as - * logits with the softmax cross entropy loss. + * @return the loss * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank * of the labels is not equal to the rank of the logits minus one. */ diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java similarity index 86% rename from tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java rename to tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java index eceff2797f8..e10f016bd94 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetsOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java @@ -2,6 +2,8 @@ import org.junit.jupiter.api.Test; import org.tensorflow.Operand; +import org.tensorflow.framework.op.FrameworkOps; +import org.tensorflow.framework.op.SetsOps; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; @@ -15,7 +17,7 @@ import static org.tensorflow.framework.utils.CastHelper.cast; -class SetsOpsTest { +class SetOpsTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -28,6 +30,7 @@ public void testSetIntersectionMultirow2() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); Operand b = tf.constant(new int[][] {{1, 9}, {1, 5}}); int[][] expected = new int[][] {{1, 9}, {0, 0}}; @@ -35,7 +38,7 @@ public void testSetIntersectionMultirow2() { for (Class type : types) { Operand aa = cast(tf, a, type); Operand bb = cast(tf, b, type); - Operand intersection = SetsOps.intersection(tf, aa, bb); + Operand intersection = fops.sets.intersection(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); } @@ -49,6 +52,7 @@ public void testSetIntersectionDuplicates2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{1, 1, 3}}); Operand b = tf.constant(new int[][] {{1, 1}}); int[][] expected = {{1}}; @@ -56,7 +60,7 @@ public void testSetIntersectionDuplicates2d() { for (Class type : types) { Operand aa = cast(tf, a, type); Operand bb = cast(tf, b, type); - Operand intersection = SetsOps.intersection(tf, aa, bb); + Operand intersection = fops.sets.intersection(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); @@ -72,6 +76,7 @@ public void testDenseSetDifferenceMultirow2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{1, 5, 9}, {4, 5, 3}}); Operand b = tf.constant(new int[][] {{1, 2, 6}, {1, 2, 2}}); @@ -81,14 +86,14 @@ public void testDenseSetDifferenceMultirow2d() { int[][] expected = {{5, 9, 0}, {3, 4, 5}}; // a- b Shape expectedShape = Shape.of(2, 3); - Operand intersection = SetsOps.difference(tf, aa, bb); + Operand intersection = fops.sets.difference(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); // b - a expected = new int[][] {{2, 6}, {1, 2}}; expectedShape = Shape.of(2, 2); - intersection = SetsOps.difference(tf, aa, bb, false); + intersection = fops.sets.difference(aa, bb, false); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); @@ -103,6 +108,7 @@ public void testDenseUnionMultirow2d() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); Operand b = tf.constant(new int[][] {{1, 9}, {1, 2}}); int[][] expected = new int[][] {{5, 0}, {3, 4}}; @@ -111,7 +117,7 @@ public void testDenseUnionMultirow2d() { Operand bb = cast(tf, b, type); Shape expectedShape = Shape.of(2, 2); // a- b - Operand intersection = SetsOps.difference(tf, aa, bb); + Operand intersection = fops.sets.difference(aa, bb); session.evaluate(cast(tf, tf.constant(expected), type), intersection); session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); } From 9480126a255cb4ce607427ed785b19a889d4d929 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 15:21:11 -0400 Subject: [PATCH 42/60] Added FrameworkOps analogous to Ops. Added NnOps and SetOps as groups. Fixed MetricsHelper and Losses to use the bew FrameworkOps. Moved SetsOps to framework.op. --- .../src/main/java/org/tensorflow/framework/losses/Losses.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index aa5fa4ada6d..33c8d50409d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -572,7 +572,7 @@ public static Operand sparseCategoricalCrossentropy( tf.constant( new long[] {-1L, predictionsShape.size(predictionsShape.numDimensions() - 1)})); } - + Operand loss = fop.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); if (updateShape && predictionsRank >= 3) { Shape newShape = predictionsShape.take(predictionsShape.numDimensions() - 1); From 074794b2d69dfc4a50a46a5c0adbf2730cca256e Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 15:36:41 -0400 Subject: [PATCH 43/60] Move l2Normalize to MathOps --- .../tensorflow/framework/losses/Losses.java | 23 ++----- .../tensorflow/framework/op/FrameworkOps.java | 3 + .../org/tensorflow/framework/op/MathOps.java | 67 +++++++++++++++++++ 3 files changed, 74 insertions(+), 19 deletions(-) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 33c8d50409d..398588cee67 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -337,13 +337,14 @@ public static Operand categoricalHinge( */ public static Operand cosineSimilarity( Ops tf, Operand labels, Operand predictions, int[] axis) { + FrameworkOps fops = FrameworkOps.create(tf); Operand tLabels = cast(tf, labels, predictions.type()); LossTuple lossTuple = LossesHelper.squeezeOrExpandDimensions(tf, tLabels, predictions, null); predictions = lossTuple.getTarget(); tLabels = lossTuple.getLabels(); - tLabels = l2Normalize(tf, tLabels, axis); - predictions = l2Normalize(tf, predictions, axis); + tLabels = fops.math.l2Normalize(tLabels, axis); + predictions = fops.math.l2Normalize(predictions, axis); Operand mathMul = tf.math.mul(tLabels, predictions); return tf.reduceSum(mathMul, tf.constant(axis), ReduceSum.keepDims(Boolean.FALSE)); } @@ -651,23 +652,7 @@ private static Operand smoothCategoricalLabels( return tf.math.add(tf.math.mul(labels, oneMinusSmoothing), tf.math.div(smoothing, numClasses)); } - // TODO this was tf.math.l2_normalize in TF Python - /** - * Normalizes along dimension axis using an L2 norm. - * - * @param tf The TensorFlow Ops - * @param x the input - * @param axis Dimension along which to normalize. - * @param the data type for the input and the result - * @return the normalized values based on L2 norm - */ - public static Operand l2Normalize(Ops tf, Operand x, int[] axis) { - Operand squareSum = - tf.reduceSum(tf.math.square(x), tf.constant(axis), ReduceSum.keepDims(Boolean.TRUE)); - Operand invNorm = - tf.math.rsqrt(tf.math.maximum(squareSum, cast(tf, tf.constant(1e-12F), x.type()))); - return tf.math.mul(x, invNorm); - } + /** * Converts binary labels into -1/1. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index cecbecfed15..18fb8ada6b7 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -34,6 +34,7 @@ public class FrameworkOps { public final NnOps nn; public final SetsOps sets; + public final MathOps math; /** * Creates a FrameworkOps instance with the provided scope @@ -45,6 +46,7 @@ private FrameworkOps(Scope scope) { this.scope = scope; nn = new NnOps(this); sets = new SetsOps(this); + math = new MathOps(this); } /** @@ -57,6 +59,7 @@ private FrameworkOps(Ops coreOps) { this.scope = coreOps.scope(); nn = new NnOps(this); sets = new SetsOps(this); + math = new MathOps(this); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java new file mode 100644 index 00000000000..57a18fc63c2 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -0,0 +1,67 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.Ops; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Maximum; +import org.tensorflow.op.math.Mul; +import org.tensorflow.op.math.Rsqrt; +import org.tensorflow.op.math.Square; +import org.tensorflow.types.family.TNumber; + +import static org.tensorflow.framework.utils.CastHelper.cast; + +public class MathOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + MathOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Normalizes along dimension axis using an L2 norm. + * + * @param x the input + * @param axis Dimension along which to normalize. + * @param the data type for the input and the result + * @return the normalized values based on L2 norm + */ + public Operand l2Normalize(Operand x, int[] axis) { + Operand squareSum = + ReduceSum.create(scope, + Square.create(scope, x), + Constant.vectorOf(scope, axis), + ReduceSum.keepDims(Boolean.TRUE)); + Operand invNorm = + Rsqrt.create(scope, + Maximum.create(scope, squareSum, + Cast.create(scope, + Constant.scalarOf(scope, 1e-12F), x.type()))); + return Mul.create(scope, x, invNorm); + } +} From 7526b7e9db059f50cb369ced4f8ee26674852f85 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 18:50:26 -0400 Subject: [PATCH 44/60] Reformat code, fix javadocs --- .../tensorflow/framework/op/FrameworkOps.java | 76 +++-- .../org/tensorflow/framework/op/MathOps.java | 68 ++-- .../org/tensorflow/framework/op/NnOps.java | 312 +++++++++--------- .../op/nn/SigmoidCrossEntropyWithLogits.java | 14 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 3 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 52 +-- 6 files changed, 271 insertions(+), 254 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index 18fb8ada6b7..c8b234f2c51 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -30,11 +30,10 @@ */ public class FrameworkOps { public final Ops coreOps; - private final Scope scope; - public final NnOps nn; public final SetsOps sets; public final MathOps math; + private final Scope scope; /** * Creates a FrameworkOps instance with the provided scope @@ -62,8 +61,43 @@ private FrameworkOps(Ops coreOps) { math = new MathOps(this); } + /** + * Creates an API for building operations in the provided execution environment + * + * @param env the exection environment + * @return the FrameworkOps + */ + public static FrameworkOps create(ExecutionEnvironment env) { + return new FrameworkOps(new Scope(env)); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + *

      Invoking this method is equivalent to {@code + * FrameworkOps.create(EagerSession.getDefault())}. + * + * @return the FrameworkOps + */ + public static FrameworkOps create() { + return new FrameworkOps(new Scope(EagerSession.getDefault())); + } + + /** + * Creates an API for building operations in the default eager execution environment + * + * @param coreOps the TensorFlow core Ops + * @return the FrameworkOps + */ + public static FrameworkOps create(Ops coreOps) { + return new FrameworkOps(coreOps); + } - /** Returns the current {@link Scope scope} of this API */ + /** + * Returns the current {@link Scope scope} of this API + * + * @return the current {@link Scope scope} of this API + */ public final Scope scope() { return scope; } @@ -81,6 +115,9 @@ public final Ops coreOps() { * Returns an API that builds operations with the provided name prefix. * *

      @link Scope#withSubScope(String)} + * + * @param childScopeName the name of the child scope + * @return the FrameworkOps */ public FrameworkOps withSubScope(String childScopeName) { return new FrameworkOps(scope.withSubScope(childScopeName)); @@ -90,6 +127,9 @@ public FrameworkOps withSubScope(String childScopeName) { * Returns an API that uses the provided name for an op. * *

      {@link Scope#withName(String)} + * + * @param opName the name of the scope + * @return the FrameworkOps */ public FrameworkOps withName(String opName) { return new FrameworkOps(scope.withName(opName)); @@ -99,6 +139,9 @@ public FrameworkOps withName(String opName) { * Returns an API that places the created operations on the device(s) matching the provided spec. * *

      {@link Scope#withDevice(DeviceSpec)} + * + * @param deviceSpec the device specification for the scope + * @return the FrameworkOps */ public FrameworkOps withDevice(DeviceSpec deviceSpec) { return new FrameworkOps(scope.withDevice(deviceSpec)); @@ -108,32 +151,11 @@ public FrameworkOps withDevice(DeviceSpec deviceSpec) { * Returns an API that adds operations to the graph with the provided control dependencies. * *

      {@link Scope#withControlDependencies(Iterable)} + * + * @param controls the operations + * @return the FrameworkOps */ public FrameworkOps withControlDependencies(Iterable controls) { return new FrameworkOps(scope.withControlDependencies(controls)); } - - /** Creates an API for building operations in the provided execution environment */ - public static FrameworkOps create(ExecutionEnvironment env) { - return new FrameworkOps(new Scope(env)); - } - - /** - * Creates an API for building operations in the default eager execution environment - * - *

      Invoking this method is equivalent to {@code - * FrameworkOps.create(EagerSession.getDefault())}. - */ - public static FrameworkOps create() { - return new FrameworkOps(new Scope(EagerSession.getDefault())); - } - - /** - * Creates an API for building operations in the default eager execution environment - * - * @param coreOps the TensorFlow core Ops - */ - public static FrameworkOps create(Ops coreOps) { - return new FrameworkOps(coreOps); - } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 57a18fc63c2..5208cde98f3 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -15,7 +15,6 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; -import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.ReduceSum; @@ -26,42 +25,41 @@ import org.tensorflow.op.math.Square; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - public class MathOps { - private final Scope scope; + private final Scope scope; - private final FrameworkOps frameworkOps; + private final FrameworkOps frameworkOps; - /** - * Creates Framework {@code nn} Operations - * - * @param frameworkOps the TensorFLow framework Ops - */ - MathOps(FrameworkOps frameworkOps) { - this.scope = frameworkOps.scope(); - this.frameworkOps = frameworkOps; - } + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + MathOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } - /** - * Normalizes along dimension axis using an L2 norm. - * - * @param x the input - * @param axis Dimension along which to normalize. - * @param the data type for the input and the result - * @return the normalized values based on L2 norm - */ - public Operand l2Normalize(Operand x, int[] axis) { - Operand squareSum = - ReduceSum.create(scope, - Square.create(scope, x), - Constant.vectorOf(scope, axis), - ReduceSum.keepDims(Boolean.TRUE)); - Operand invNorm = - Rsqrt.create(scope, - Maximum.create(scope, squareSum, - Cast.create(scope, - Constant.scalarOf(scope, 1e-12F), x.type()))); - return Mul.create(scope, x, invNorm); - } + /** + * Normalizes along dimension axis using an L2 norm. + * + * @param x the input + * @param axis Dimension along which to normalize. + * @param the data type for the input and the result + * @return the normalized values based on L2 norm + */ + public Operand l2Normalize(Operand x, int[] axis) { + Operand squareSum = + ReduceSum.create( + scope, + Square.create(scope, x), + Constant.vectorOf(scope, axis), + ReduceSum.keepDims(Boolean.TRUE)); + Operand invNorm = + Rsqrt.create( + scope, + Maximum.create( + scope, squareSum, Cast.create(scope, Constant.scalarOf(scope, 1e-12F), x.type()))); + return Mul.create(scope, x, invNorm); + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java index 4054f3ddbb5..0fea3743d95 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -32,166 +32,164 @@ *

      {@link FrameworkOps} */ public class NnOps { - private final Scope scope; + private final Scope scope; - private final FrameworkOps frameworkOps; + private final FrameworkOps frameworkOps; - /** - * Creates Framework {@code nn} Operations - * @param frameworkOps the TensorFLow framework Ops - */ - NnOps(FrameworkOps frameworkOps) { - this.scope = frameworkOps.scope(); - this.frameworkOps = frameworkOps; - } + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + NnOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } - /** - * Computes sigmoid cross entropy given logits. - * - *

      Measures the probability error in discrete classification tasks in which each class is - * independent and not mutually exclusive. For instance, one could perform multilabel - * classification where a picture can contain both an elephant and a dog at the same time. - * - *

      For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is - * - *

      -     *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
      -     *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
      -     *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
      -     *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
      -     *   = (1 - z) * x + log(1 + exp(-x))
      -     *   = x - x * z + log(1 + exp(-x))
      -     *  
      - * - *

      For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - *

      -     *  x - x * z + log(1 + exp(-x))
      -     *   = log(exp(x)) - x * z + log(1 + exp(-x))
      -     *   = - x * z + log(1 + exp(x))
      -     *  
      - * - *

      Hence, to ensure stability and avoid overflow, the implementation uses this equivalent - * formulation - * - *

      -     *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
      -     *  
      - * - *

      logits and labels must have the same type and shape. - * - *

      - * - * @param labels the labels - * @param logits the logits of type float32 or float64 - * @param the type of labels and logits - * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape - */ - public Operand sigmoidCrossEntropyWithLogits(Operand labels, - Operand logits) { - return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); - } - - /** - * Computes softmax cross entropy between logits and labels. - * - *

      Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

      NOTE: - * - *

      While the classes are mutually exclusive, their probabilities need not be. All that is - * required is that each row of labels is a valid probability distribution. If they - * are not, the computation of the gradient will be incorrect. - * - *

      If using exclusive labels (wherein one and only one class is true at a time), - * see {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} - * - *

      Usage: - * - *

      -     *    Operand<TFloat32> logits =
      -     *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
      -     *    Operand<TFloat32> labels =
      -     *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
      -     *    Operand<TFloat32> output =
      -     *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
      -     *    // output Shape = [2]
      -     *    // dataType = FLOAT (1)
      -     *    // values { 0.169846, 0.824745 }
      -     *  
      - * - *

      Backpropagation will happen into both logits and labels. To - * disallow backpropagation into labels, pass label tensors through - * tf.stopGradient before feeding it to this function. - * - * @param labels Each vector along the class dimension should hold a valid probability - * distribution e.g. for the case in which labels are of shape [batch_size, num_classes] - * , each row of labels[i] must be a valid probability distribution. - * @param logits Per-label activations, typically a linear output. These activation energies are - * interpreted as unnormalized log probabilities. - * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands - * @return the softmax cross entropy loss. Its type is the same as logits and its - * shape is the same as labels except that it does not have the last dimension of - * labels. - */ - public Operand softmaxCrossEntropyWithLogits( - Operand labels, Operand logits, int axis) { - return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); - } - - /** - * Computes sparse softmax cross entropy between logits and labels. - * - *

      Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - *

      NOTE: - * - *

      For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link - * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. - * - *

      WARNING: - * - *

      This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, - * as it will produce incorrect results. - * - *

      A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. - * - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log - * probabilities. - * @param The data type for the labels - * @param The data type for the logits and loss - * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank - * of the labels is not equal to the rank of the logits minus one. - */ - - public Operand sparseSoftmaxCrossEntropyWithLogits( - Operand labels, Operand logits) { - return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits(scope, labels, logits); - } + /** + * Computes sigmoid cross entropy given {@code logits}. + * + *

      Measures the probability error in discrete classification tasks in which each class is + * independent and not mutually exclusive. For instance, one could perform multilabel + * classification where a picture can contain both an elephant and a dog at the same time. + * + *

      For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in pseudo-code is + * + *

      +   *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
      +   *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
      +   *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
      +   *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
      +   *   = (1 - z) * x + log(1 + exp(-x))
      +   *   = x - x * z + log(1 + exp(-x))
      +   *  
      + * + *

      For {@code x < 0}, to avoid overflow in {@code exp(-x)}, we reformulate the above + * + *

      +   *  x - x * z + log(1 + exp(-x))
      +   *   = log(exp(x)) - x * z + log(1 + exp(-x))
      +   *   = - x * z + log(1 + exp(x))
      +   *  
      + * + *

      Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * formulation + * + *

      +   *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
      +   *  
      + * + *

      {@code logits} and {@code labels} must have the same type and shape. + * + *

      + * + * @param labels the labels + * @param logits the logits of type float32 or float64 + * @param the type of labels and logits + * @return the component-wise logistic losses. + * @throws IllegalArgumentException if logits' and labels' do not have the same shape + */ + public Operand sigmoidCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); + } + /** + * Computes softmax cross entropy between {@code logits} and {@code labels}. + * + *

      Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

      NOTE: + * + *

      While the classes are mutually exclusive, their probabilities need not be. All that is + * required is that each row of {@code labels} is a valid probability distribution. If they are + * not, the computation of the gradient will be incorrect. + * + *

      If using exclusive {@code labels} (wherein one and only one class is true at a time), see + * {@link org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} + * + *

      Usage: + * + *

      +   *    Operand<TFloat32> logits =
      +   *        tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} );
      +   *    Operand<TFloat32> labels =
      +   *        tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} );
      +   *    Operand<TFloat32> output =
      +   *        tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1);
      +   *    // output Shape = [2]
      +   *    // dataType = FLOAT (1)
      +   *    // values { 0.169846, 0.824745 }
      +   *  
      + * + *

      Backpropagation will happen into both {@code logits} and {@code labels}. To disallow + * backpropagation into {@code labels}, pass label tensors through {@code tf.stopGradient} before + * feeding it to this function. + * + * @param labels Each vector along the class dimension should hold a valid probability + * distribution e.g. for the case in which labels are of shape {@code [batch_size, + * num_classes] }, each row of {@code labels[i]} must be a valid probability distribution. + * @param logits Per-label activations, typically a linear output. These activation energies are + * interpreted as unnormalized log probabilities. + * @param axis The class dimension. -1 is the last dimension. + * @param the number type of the operands + * @param the data type for the labels. + * @return the softmax cross entropy loss. Its type is the same as {@code logits} and its shape is + * the same as {@code labels} except that it does not have the last dimension of {@code + * labels}. + * + */ + public Operand softmaxCrossEntropyWithLogits( + Operand labels, Operand logits, int axis) { + return SoftmaxCrossEntropyWithLogits.softmaxCrossEntropyWithLogits(scope, labels, logits, axis); + } + /** + * Computes sparse softmax cross entropy between {@code logits} and {@code labels}. + * + *

      Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + *

      NOTE: + * + *

      For this operation, the probability of a given label is considered exclusive. That is, soft + * classes are not allowed, and the {@code labels} vector must provide a single specific index for + * the true class for each row of {@code logits} (each minibatch entry). For soft softmax + * classification with a probability distribution for each entry, {@link + * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. + * + *

      WARNING: + * + *

      This op expects unscaled logits, since it performs a {@code softmax} on {@code logits } + * internally for efficiency. Do not call this op with the output of {@code softmax}, as it will + * produce incorrect results. + * + *

      A common use case is to have logits of shape {@code [batchSize, numClasses]} and have labels + * of shape {@code [batchSize]}, but higher dimensions are supported, in which case the {@code + * dim}-th dimension is assumed to be of size {@code numClasses}. {@code logits} must have the + * {@code dataType} of {@code TFloat16}, {@code TFloat32} , or {@code TFloat64}, and {@code + * labels} must have the dtype of {@code TInt32} or {@code TInt64}. + * + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r } is + * rank of {@code labels} and result) and the dataType is {@code TInt32} or {@code TInt64}. + * Each entry in {@code labels} must be an index in {@code [0, numClasses)}. Other values will + * raise an exception when this op is run on CPU, and return {@code NaN} for corresponding + * loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code + * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. + * @param The data type for the labels + * @param The data type for the logits and loss + * @return the loss + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank + * of the labels is not equal to the rank of the logits minus one. + */ + public Operand sparseSoftmaxCrossEntropyWithLogits( + Operand labels, Operand logits) { + return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits( + scope, labels, logits); + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index b55385839d3..fc3f7739363 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -3,8 +3,6 @@ import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; import org.tensorflow.op.core.Select; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; @@ -18,17 +16,17 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -//@Operator(group = "nn") +// @Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** - * Computes sigmoid cross entropy given logits. + * Computes sigmoid cross entropy given {@code logits}. * *

      Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - *

      For brevity, let x = logits, z = labels. The logistic loss in + *

      For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in * pseudo-code is * *

      @@ -40,7 +38,7 @@ public class SigmoidCrossEntropyWithLogits {
          *  = x - x * z + log(1 + exp(-x))
          * 
      * - *

      For x < 0, to avoid overflow in exp(-x), we reformulate the above + *

      For {@code x < 0}, to avoid overflow in {@code exp(-x)}, we reformulate the above * *

          * x - x * z + log(1 + exp(-x))
      @@ -55,7 +53,7 @@ public class SigmoidCrossEntropyWithLogits {
          *   max(x, 0) - x * z + log(1 + exp(-abs(x)))
          * 
      * - *

      logits and labels must have the same type and shape. + *

      {@code logits} and {@code labels} must have the same type and shape. * *

      * @@ -66,7 +64,7 @@ public class SigmoidCrossEntropyWithLogits { * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape */ - //@Endpoint(name = "sigmoidCrossEntropyWithLogits") + // @Endpoint(name = "sigmoidCrossEntropyWithLogits") public static Operand sigmoidCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { if (!isCompatible(labels.shape(), logits.shape())) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index 0f5b8197f1e..7d59941f27a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -66,7 +66,8 @@ public class SoftmaxCrossEntropyWithLogits { * @param logits Per-label activations, typically a linear output. These activation energies are * interpreted as unnormalized log probabilities. * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands + * @param the data type for the logits and return operand + * @param the data type for the labels * @return the softmax cross entropy loss. Its type is the same as logits and its * shape is the same as labels except that it does not have the last dimension of * labels. diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 75766cf9bfb..0b2d29d6092 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -25,7 +25,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { /** - * Computes sparse softmax cross entropy between logits and labels. + * Computes sparse softmax cross entropy between {@code logits} and {@code labels}. * *

      Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is @@ -34,45 +34,45 @@ public class SparseSoftmaxCrossEntropyWithLogits { *

      NOTE: * *

      For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft + * classes are not allowed, and the {@code labels} vector must provide a single specific + * index for the true class for each row of {@code logits} (each minibatch entry). For soft * softmax classification with a probability distribution for each entry, {@link * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. * *

      WARNING: * - *

      This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, + *

      This op expects unscaled logits, since it performs a {@code softmax} on {@code logits + * } internally for efficiency. Do not call this op with the output of {@code softmax}, * as it will produce incorrect results. * - *

      A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. + *

      A common use case is to have logits of shape {@code [batchSize, numClasses]} and have + * labels of shape {@code [batchSize]}, but higher dimensions are supported, in which case + * the {@code dim}-th dimension is assumed to be of size {@code numClasses}. {@code + * logits} must have the {@code dataType} of {@code TFloat16}, {@code TFloat32} + * , or {@code TFloat64}, and {@code labels} must have the dtype of {@code TInt32} + * or {@code TInt64}. * * @param scope current scope - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r + * } is rank of {@code labels} and result) and the dataType is {@code TInt32} + * or {@code TInt64}. Each entry in {@code labels} must be an index in {@code [0, + * numClasses)}. Other values will raise an exception when this op is run on CPU, and + * return {@code NaN} for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, + * or {@code TFloat64}. These activation energies are interpreted as unnormalized log * probabilities. - * @param the data type for the labels - * @param the data tyoe for the loss and logits. + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank * of the labels is not equal to the rank of the logits minus one. */ @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") public static - Operand sparseSoftmaxCrossEntropyWithLogits( - Scope scope, Operand labels, Operand logits) { + Operand sparseSoftmaxCrossEntropyWithLogits( + Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { @@ -119,7 +119,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } @@ -160,7 +160,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } } From 0a163c6cb30d9cdca0fb61c837a8c7035783136a Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 16 Apr 2021 18:04:30 -0400 Subject: [PATCH 45/60] Add confusionMatrix() method. add Unit test --- .../org/tensorflow/framework/op/MathOps.java | 301 +++++++++++++ .../tensorflow/framework/op/MathOpsTest.java | 413 ++++++++++++++++++ 2 files changed, 714 insertions(+) create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 5208cde98f3..36f5b692cab 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -15,16 +15,37 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.LossTuple; +import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; +import org.tensorflow.op.core.AssertThat; import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Identity; +import org.tensorflow.op.core.OnesLike; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.ReduceAll; +import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.core.ScatterNd; +import org.tensorflow.op.core.Squeeze; +import org.tensorflow.op.core.Stack; +import org.tensorflow.op.core.Zeros; import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.Less; import org.tensorflow.op.math.Maximum; import org.tensorflow.op.math.Mul; import org.tensorflow.op.math.Rsqrt; import org.tensorflow.op.math.Square; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; +import java.util.Arrays; +import java.util.Collections; + public class MathOps { private final Scope scope; @@ -62,4 +83,284 @@ public Operand l2Normalize(Operand x, int[] axis) { scope, squareSum, Cast.create(scope, Constant.scalarOf(scope, 1e-12F), x.type()))); return Mul.create(scope, x, invNorm); } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

      The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

      If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

      If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

      For example: + * + *

      +   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *         [[0 0 0 0 0]
      +   *          [0 0 1 0 0]
      +   *          [0 0 1 0 0]
      +   *          [0 0 0 0 0]
      +   *          [0 0 0 0 1]]
      +   * 
      + * + *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix(Operand labels, Operand predictions) { + return confusionMatrix(labels, predictions, null, null, labels.type()); + } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

      The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

      If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

      If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

      For example: + * + *

      +   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *         [[0 0 0 0 0]
      +   *          [0 0 1 0 0]
      +   *          [0 0 1 0 0]
      +   *          [0 0 0 0 0]
      +   *          [0 0 0 0 1]]
      +   * 
      + * + *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param weights An optional Operand whose shape matches {@code predictions}. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix( + Operand labels, Operand predictions, Operand weights) { + return confusionMatrix(labels, predictions, weights, null, labels.type()); + } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

      The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * labels for a given classification task. Both prediction and labels must be 1-D arrays of the + * same shape in order for this function to work. + * + *

      If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + * either predictions or labels. Class labels are expected to start at 0. For example, if + * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * + *

      If `weights` is not `None`, then each prediction contributes its corresponding weight to the + * total value of the confusion matrix cell. + * + *

      For example: + * + *

      +   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *         [[0 0 0 0 0]
      +   *          [0 0 1 0 0]
      +   *          [0 0 1 0 0]
      +   *          [0 0 0 0 0]
      +   *          [0 0 0 0 1]]
      +   * 
      + * + *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param weights An optional Operand whose shape matches {@code predictions}. + * @param numClasses The possible number of labels the classification task can have. If this value + * is null, it will be calculated using both predictions and labels. + * @param type Data type of the confusion matrix. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public Operand confusionMatrix( + Operand labels, + Operand predictions, + Operand weights, + Operand numClasses, + Class type) { + Scope lScope = scope.withSubScope("confusionMatrix"); + LossTuple tuple = removeSqueezableDimensions(labels, predictions, 0); + Operand lLabels = Cast.create(lScope, tuple.getLabels(), TInt64.class); + Operand lPredictions = Cast.create(lScope, tuple.getTarget(), TInt64.class); + + Operand zero = Constant.scalarOf(lScope, 0L); + Operand one = Constant.scalarOf(lScope, 1L); + + AssertThat labelsNonNegative = + AssertThat.create( + lScope, + ReduceAll.create(lScope, GreaterEqual.create(lScope, lLabels, zero), allAxes(lLabels)), + Collections.singletonList( + Constant.scalarOf(lScope, "labels contains negative values"))); + lLabels = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(labelsNonNegative)), lLabels); + + AssertThat predictionsNonNegative = + AssertThat.create( + lScope, + ReduceAll.create( + lScope, GreaterEqual.create(lScope, lPredictions, zero), allAxes(lPredictions)), + Collections.singletonList( + Constant.scalarOf(lScope, "predictions contains negative values"))); + lPredictions = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(predictionsNonNegative)), + lPredictions); + + Operand lNumClasses; + if (numClasses == null) { + lNumClasses = + Add.create( + lScope, + Maximum.create( + lScope, + ReduceMax.create(lScope, lPredictions, zero), + ReduceMax.create(lScope, lLabels, zero)), + one); + } else { + lNumClasses = Cast.create(lScope, numClasses, TInt64.class); + AssertThat labelsLess = + AssertThat.create( + lScope, + Less.create(lScope, lLabels, lNumClasses), + Collections.singletonList(Constant.scalarOf(lScope, "labels out of bounds"))); + lLabels = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(labelsLess)), lLabels); + + AssertThat predictionsLess = + AssertThat.create( + lScope, + Less.create(lScope, lPredictions, lNumClasses), + Collections.singletonList(Constant.scalarOf(lScope, "predictions out of bounds"))); + lPredictions = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(predictionsLess)), + lPredictions); + } + + if (weights != null) { + if (!predictions.shape().isCompatibleWith(weights.shape())) { + throw new IllegalArgumentException( + String.format( + "predictions.shape() [%s], is not compatible with weights.shape() [ %s].", + predictions.shape(), weights.shape())); + } + } + + Operand shape = Stack.create(lScope, Arrays.asList(lNumClasses, lNumClasses)); + Operand indices = + Stack.create(lScope, Arrays.asList(lLabels, lPredictions), Stack.axis(1L)); + Operand values = weights == null ? OnesLike.create(lScope, predictions) : weights; + Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), type); + + return ScatterNd.create(lScope, indices, values, shape); + } + + /** + * Squeeze last dim if ranks differ from expected by exactly 1. + * + * @param labels Label values, a Operand whose dimensions match predictions + * . + * @param predictions Predicted values, a Tensor of arbitrary dimensions. + * @param expectedRankDiff Expected result of rank(predictions) - rank(labels). + * @param the data type for the labels, predictions and result + * @return labels and predictions, possibly with last dim squeezed. + */ + public LossTuple removeSqueezableDimensions( + Operand labels, Operand predictions, int expectedRankDiff) { + Scope lScope = scope.withSubScope("removeSqueezableDimensions"); + Shape predictionsShape = predictions.shape(); + int predictionsRank = predictionsShape.numDimensions(); + Shape labelsShape = labels.shape(); + int labelsRank = labelsShape.numDimensions(); + + if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { + // Use static rank. + int rankDiff = predictionsRank - labelsRank; + if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { + predictions = Squeeze.create(lScope, predictions); + } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.size(-1), 1)) { + labels = Squeeze.create(lScope, labels); + } + return new LossTuple<>(labels, predictions); + } + // Use dynamic rank. + + // TODO: hold for lazy select feature, + // Operand rankDiff = tf.math.sub(tf.rank(predictions), tf.rank(labels)); + if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.size(-1), 1)) { + /* + * TODO, if we ever get a select that does lazy evaluation, but for now do the tf.squeeze + * predictions = tf.select( tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), + * tf.squeeze(predictions, Squeeze.axis(Arrays.asList(-1L))), predictions ); * + */ + predictions = + Squeeze.create(lScope, predictions, Squeeze.axis(Collections.singletonList(-1L))); + } + if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.size(-1), 1)) { + /* + * TODO, if we ever get a select that does lazy evaluation labels = tf.select( + * tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), tf.squeeze(labels, + * Squeeze.axis(Arrays.asList(-1L))), predictions ); * + */ + labels = Squeeze.create(lScope, labels, Squeeze.axis(Collections.singletonList(-1L))); + } + return new LossTuple<>(labels, predictions); + } + + public Operand allAxes(Operand op) { + int rank = op.shape().numDimensions(); + if (rank != Shape.UNKNOWN_SIZE) { + int[] axes = new int[rank]; + for (int i = 0; i < rank; i++) { + axes[i] = i; + } + return Constant.vectorOf(scope, axes); + } else { + return Range.create( + scope, Constant.scalarOf(scope, 0), Rank.create(scope, op), Constant.scalarOf(scope, 1)); + } + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java new file mode 100644 index 00000000000..326e3cdc2d1 --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java @@ -0,0 +1,413 @@ +package org.tensorflow.framework.op; + +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt64; + +class MathOpsTest { + + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + double[][][] array = + new double[][][] { + { + {4.17021990e-01, 7.20324516e-01, 1.14374816e-04}, + {3.02332580e-01, 1.46755889e-01, 9.23385918e-02}, + {1.86260208e-01, 3.45560730e-01, 3.96767467e-01}, + {5.38816750e-01, 4.19194520e-01, 6.85219526e-01}, + {2.04452246e-01, 8.78117442e-01, 2.73875929e-02}, + {6.70467496e-01, 4.17304814e-01, 5.58689833e-01}, + {1.40386939e-01, 1.98101491e-01, 8.00744593e-01} + }, + { + {9.68261600e-01, 3.13424170e-01, 6.92322612e-01}, + {8.76389146e-01, 8.94606650e-01, 8.50442126e-02}, + {3.90547849e-02, 1.69830427e-01, 8.78142476e-01}, + {9.83468369e-02, 4.21107620e-01, 9.57889557e-01}, + {5.33165276e-01, 6.91877127e-01, 3.15515637e-01}, + {6.86500907e-01, 8.34625661e-01, 1.82882771e-02}, + {7.50144303e-01, 9.88861084e-01, 7.48165667e-01} + }, + { + {2.80443996e-01, 7.89279342e-01, 1.03226006e-01}, + {4.47893530e-01, 9.08595502e-01, 2.93614149e-01}, + {2.87775338e-01, 1.30028576e-01, 1.93669572e-02}, + {6.78835511e-01, 2.11628109e-01, 2.65546650e-01}, + {4.91573155e-01, 5.33625446e-02, 5.74117601e-01}, + {1.46728575e-01, 5.89305520e-01, 6.99758351e-01}, + {1.02334432e-01, 4.14055973e-01, 6.94400132e-01} + }, + { + {4.14179265e-01, 4.99534607e-02, 5.35896420e-01}, + {6.63794637e-01, 5.14889121e-01, 9.44594741e-01}, + {5.86555064e-01, 9.03401911e-01, 1.37474701e-01}, + {1.39276341e-01, 8.07391286e-01, 3.97676826e-01}, + {1.65354192e-01, 9.27508593e-01, 3.47765863e-01}, + {7.50812113e-01, 7.25997984e-01, 8.83306086e-01}, + {6.23672187e-01, 7.50942409e-01, 3.48898351e-01} + }, + { + {2.69927889e-01, 8.95886242e-01, 4.28091198e-01}, + {9.64840055e-01, 6.63441479e-01, 6.21695697e-01}, + {1.14745975e-01, 9.49489236e-01, 4.49912131e-01}, + {5.78389585e-01, 4.08136815e-01, 2.37026975e-01}, + {9.03379500e-01, 5.73679507e-01, 2.87032709e-03}, + {6.17144942e-01, 3.26644897e-01, 5.27058125e-01}, + {8.85942101e-01, 3.57269764e-01, 9.08535123e-01} + }, + { + {6.23360097e-01, 1.58212427e-02, 9.29437220e-01}, + {6.90896928e-01, 9.97322857e-01, 1.72340512e-01}, + {1.37135744e-01, 9.32595491e-01, 6.96818173e-01}, + {6.60001710e-02, 7.55463064e-01, 7.53876209e-01}, + {9.23024535e-01, 7.11524785e-01, 1.24270961e-01}, + {1.98801346e-02, 2.62109861e-02, 2.83064879e-02}, + {2.46211067e-01, 8.60027969e-01, 5.38831055e-01} + }, + { + {5.52821994e-01, 8.42030883e-01, 1.24173313e-01}, + {2.79183686e-01, 5.85759282e-01, 9.69595730e-01}, + {5.61030209e-01, 1.86472889e-02, 8.00632656e-01}, + {2.32974276e-01, 8.07105184e-01, 3.87860656e-01}, + {8.63541842e-01, 7.47121632e-01, 5.56240261e-01}, + {1.36455223e-01, 5.99176884e-02, 1.21343456e-01}, + {4.45518792e-02, 1.07494131e-01, 2.25709334e-01} + }, + { + {7.12988973e-01, 5.59717000e-01, 1.25559801e-02}, + {7.19742775e-02, 9.67276335e-01, 5.68100452e-01}, + {2.03293234e-01, 2.52325743e-01, 7.43825853e-01}, + {1.95429474e-01, 5.81358910e-01, 9.70019996e-01}, + {8.46828818e-01, 2.39847764e-01, 4.93769705e-01}, + {6.19955719e-01, 8.28980923e-01, 1.56791389e-01}, + {1.85762029e-02, 7.00221434e-02, 4.86345112e-01} + }, + { + {6.06329441e-01, 5.68851411e-01, 3.17362398e-01}, + {9.88616168e-01, 5.79745233e-01, 3.80141169e-01}, + {5.50948203e-01, 7.45334446e-01, 6.69232905e-01}, + {2.64919549e-01, 6.63348362e-02, 3.70084196e-01}, + {6.29717529e-01, 2.10174009e-01, 7.52755582e-01}, + {6.65364787e-02, 2.60315090e-01, 8.04754555e-01}, + {1.93434283e-01, 6.39460862e-01, 5.24670303e-01} + }, + { + {9.24807966e-01, 2.63296783e-01, 6.59610927e-02}, + {7.35065937e-01, 7.72178054e-01, 9.07815874e-01}, + {9.31972086e-01, 1.39515726e-02, 2.34362081e-01}, + {6.16778374e-01, 9.49016333e-01, 9.50176120e-01}, + {5.56653202e-01, 9.15606380e-01, 6.41566217e-01}, + {3.90007704e-01, 4.85990673e-01, 6.04310513e-01}, + {5.49547911e-01, 9.26181436e-01, 9.18733418e-01} + }, + { + {3.94875616e-01, 9.63262558e-01, 1.73955664e-01}, + {1.26329526e-01, 1.35079160e-01, 5.05662143e-01}, + {2.15248056e-02, 9.47970212e-01, 8.27115476e-01}, + {1.50189810e-02, 1.76196262e-01, 3.32063586e-01}, + {1.30996838e-01, 8.09490681e-01, 3.44736665e-01}, + {9.40107465e-01, 5.82014203e-01, 8.78831983e-01}, + {8.44734430e-01, 9.05392289e-01, 4.59880263e-01} + }, + { + {5.46346843e-01, 7.98603594e-01, 2.85718858e-01}, + {4.90253508e-01, 5.99110305e-01, 1.55332759e-02}, + {5.93481421e-01, 4.33676362e-01, 8.07360530e-01}, + {3.15244794e-01, 8.92888725e-01, 5.77857196e-01}, + {1.84010208e-01, 7.87929237e-01, 6.12031162e-01}, + {5.39092720e-02, 4.20193672e-01, 6.79068863e-01}, + {9.18601751e-01, 4.02024889e-04, 9.76759136e-01} + }, + { + {3.76580328e-01, 9.73783553e-01, 6.04716122e-01}, + {8.28845799e-01, 5.74711502e-01, 6.28076196e-01}, + {2.85576284e-01, 5.86833358e-01, 7.50021756e-01}, + {8.58313859e-01, 7.55082190e-01, 6.98057234e-01}, + {8.64479423e-01, 3.22681010e-01, 6.70788765e-01}, + {4.50873941e-01, 3.82102758e-01, 4.10811365e-01}, + {4.01479572e-01, 3.17383945e-01, 6.21919394e-01} + }, + { + {4.30247277e-01, 9.73802090e-01, 6.77800894e-01}, + {1.98569894e-01, 4.26701009e-01, 3.43346238e-01}, + {7.97638834e-01, 8.79998267e-01, 9.03841972e-01}, + {6.62719786e-01, 2.70208269e-01, 2.52366692e-01}, + {8.54897916e-01, 5.27714670e-01, 8.02161098e-01}, + {5.72488546e-01, 7.33142555e-01, 5.19011617e-01}, + {7.70883918e-01, 5.68857968e-01, 4.65709865e-01} + }, + { + {3.42688918e-01, 6.82093501e-02, 3.77924174e-01}, + {7.96260759e-02, 9.82817113e-01, 1.81612849e-01}, + {8.11858714e-01, 8.74961674e-01, 6.88413262e-01}, + {5.69494426e-01, 1.60971433e-01, 4.66880023e-01}, + {3.45172048e-01, 2.25039959e-01, 5.92511892e-01}, + {3.12269837e-01, 9.16305542e-01, 9.09635544e-01}, + {2.57118285e-01, 1.10891297e-01, 1.92962736e-01} + }, + { + {4.99584168e-01, 7.28585660e-01, 2.08194435e-01}, + {2.48033553e-01, 8.51671875e-01, 4.15848732e-01}, + {6.16685092e-01, 2.33666137e-01, 1.01967260e-01}, + {5.15857041e-01, 4.77140993e-01, 1.52671650e-01}, + {6.21806204e-01, 5.44010103e-01, 6.54137373e-01}, + {1.44545540e-01, 7.51527846e-01, 2.22049147e-01}, + {5.19351840e-01, 7.85296023e-01, 2.23304275e-02} + }, + { + {3.24362457e-01, 8.72922361e-01, 8.44709635e-01}, + {5.38440585e-01, 8.66608262e-01, 9.49805975e-01}, + {8.26407015e-01, 8.54115427e-01, 9.87434015e-02}, + {6.51304305e-01, 7.03516960e-01, 6.10240817e-01}, + {7.99615264e-01, 3.45712192e-02, 7.70238757e-01}, + {7.31728613e-01, 2.59698391e-01, 2.57069290e-01}, + {6.32303298e-01, 3.45297456e-01, 7.96588659e-01} + }, + { + {4.46146220e-01, 7.82749414e-01, 9.90471780e-01}, + {3.00248325e-01, 1.43005833e-01, 9.01308417e-01}, + {5.41559398e-01, 9.74740386e-01, 6.36604428e-01}, + {9.93912995e-01, 5.46070814e-01, 5.26425958e-01}, + {1.35427907e-01, 3.55705172e-01, 2.62185670e-02}, + {1.60395175e-01, 7.45637178e-01, 3.03996895e-02}, + {3.66543084e-01, 8.62346232e-01, 6.92677736e-01} + }, + { + {6.90942168e-01, 1.88636795e-01, 4.41904277e-01}, + {5.81577420e-01, 9.89751697e-01, 2.03906223e-01}, + {2.47732908e-01, 2.62173086e-01, 7.50172436e-01}, + {4.56975341e-01, 5.69294393e-02, 5.08516252e-01}, + {2.11960167e-01, 7.98604250e-01, 2.97331393e-01}, + {2.76060123e-02, 5.93432426e-01, 8.43840420e-01}, + {3.81016135e-01, 7.49858320e-01, 5.11141479e-01} + }, + { + {5.40951788e-01, 9.59434330e-01, 8.03960919e-01}, + {3.23230661e-02, 7.09387243e-01, 4.65001494e-01}, + {9.47548926e-01, 2.21432731e-01, 2.67072022e-01}, + {8.14739615e-02, 4.28618819e-01, 1.09018765e-01}, + {6.33786738e-01, 8.02963257e-01, 6.96800470e-01}, + {7.66211390e-01, 3.42454106e-01, 8.45851481e-01}, + {4.28768784e-01, 8.24009895e-01, 6.26496136e-01} + } + }; + + double[][][] expectedArray = { + { + {3.45350616e-02, 5.96526116e-02, 9.47178160e-06}, + {2.50372272e-02, 1.21533722e-02, 7.64688430e-03}, + {1.54248644e-02, 2.86171008e-02, 3.28577124e-02}, + {4.46213149e-02, 3.47149745e-02, 5.67454435e-02}, + {1.69314109e-02, 7.27199987e-02, 2.26806314e-03}, + {5.55237755e-02, 3.45584825e-02, 4.62670736e-02}, + {1.16259372e-02, 1.64054818e-02, 6.63124844e-02} + }, + { + {8.01851526e-02, 2.59557609e-02, 5.73336743e-02}, + {7.25768730e-02, 7.40855262e-02, 7.04281079e-03}, + {3.23426444e-03, 1.40642561e-02, 7.27220699e-02}, + {8.14444851e-03, 3.48734073e-02, 7.93262124e-02}, + {4.41532955e-02, 5.72967827e-02, 2.61289626e-02}, + {5.68515584e-02, 6.91182911e-02, 1.51451665e-03}, + {6.21220917e-02, 8.18910673e-02, 6.19582348e-02} + }, + { + {2.32245550e-02, 6.53630048e-02, 8.54850933e-03}, + {3.70916426e-02, 7.52439946e-02, 2.43152231e-02}, + {2.38316897e-02, 1.07681248e-02, 1.60384597e-03}, + {5.62167615e-02, 1.75256692e-02, 2.19908543e-02}, + {4.07089069e-02, 4.41914052e-03, 4.75447029e-02}, + {1.21511100e-02, 4.88024652e-02, 5.79494536e-02}, + {8.47467501e-03, 3.42894346e-02, 5.75057231e-02} + }, + { + {3.42996456e-02, 4.13682219e-03, 4.43794727e-02}, + {5.49711734e-02, 4.26397808e-02, 7.82252178e-02}, + {4.85746935e-02, 7.48138949e-02, 1.13847647e-02}, + {1.15339644e-02, 6.68629184e-02, 3.29330191e-02}, + {1.36935636e-02, 7.68102556e-02, 2.87997164e-02}, + {6.21773973e-02, 6.01224527e-02, 7.31496885e-02}, + {5.16484901e-02, 6.21881858e-02, 2.88935024e-02} + }, + { + {2.23536789e-02, 7.41914958e-02, 3.54517400e-02}, + {7.99018070e-02, 5.49419262e-02, 5.14848121e-02}, + {9.50251892e-03, 7.86305517e-02, 3.72588076e-02}, + {4.78984788e-02, 3.37992460e-02, 1.96290389e-02}, + {7.48120397e-02, 4.75084223e-02, 2.37701897e-04}, + {5.11079468e-02, 2.70506144e-02, 4.36475389e-02}, + {7.33679906e-02, 2.95867678e-02, 7.52389953e-02} + }, + { + {5.16226478e-02, 1.31021289e-03, 7.69699737e-02}, + {5.72156087e-02, 8.25918168e-02, 1.42721254e-02}, + {1.13566946e-02, 7.72315189e-02, 5.77059686e-02}, + {5.46570681e-03, 6.25625551e-02, 6.24311455e-02}, + {7.64389113e-02, 5.89238741e-02, 1.02913165e-02}, + {1.64634397e-03, 2.17062421e-03, 2.34416011e-03}, + {2.03896053e-02, 7.12219477e-02, 4.46224995e-02} + }, + { + {4.57811356e-02, 6.97315410e-02, 1.02832299e-02}, + {2.31201854e-02, 4.85087894e-02, 8.02956372e-02}, + {4.64608893e-02, 1.54424773e-03, 6.63032085e-02}, + {1.92934200e-02, 6.68392256e-02, 3.21201086e-02}, + {7.15129450e-02, 6.18717745e-02, 4.60642166e-02}, + {1.13003375e-02, 4.96199494e-03, 1.00488793e-02}, + {3.68949817e-03, 8.90196767e-03, 1.86917856e-02} + }, + { + {5.90451285e-02, 4.63521369e-02, 1.03980501e-03}, + {5.96044352e-03, 8.01035613e-02, 4.70464006e-02}, + {1.68354288e-02, 2.08959840e-02, 6.15988411e-02}, + {1.61842033e-02, 4.81443815e-02, 8.03307742e-02}, + {7.01288804e-02, 1.98626388e-02, 4.08908091e-02}, + {5.13407178e-02, 6.86508343e-02, 1.29844472e-02}, + {1.53836084e-03, 5.79878036e-03, 4.02759537e-02} + }, + { + {5.02122790e-02, 4.71085906e-02, 2.62818988e-02}, + {8.18707868e-02, 4.80107442e-02, 3.14808302e-02}, + {4.56259623e-02, 6.17237724e-02, 5.54215349e-02}, + {2.19389219e-02, 5.49342157e-03, 3.06479763e-02}, + {5.21491282e-02, 1.74052510e-02, 6.23383410e-02}, + {5.51012019e-03, 2.15576105e-02, 6.66445568e-02}, + {1.60189737e-02, 5.29560074e-02, 4.34497967e-02} + }, + { + {7.65866041e-02, 2.18045339e-02, 5.46247046e-03}, + {6.08734004e-02, 6.39467835e-02, 7.51794279e-02}, + {7.71798939e-02, 1.15537888e-03, 1.94083489e-02}, + {5.10775894e-02, 7.85913840e-02, 7.86874294e-02}, + {4.60984148e-02, 7.58245885e-02, 5.31303585e-02}, + {3.22979130e-02, 4.02465984e-02, 5.00450842e-02}, + {4.55099978e-02, 7.67003447e-02, 7.60835484e-02} + }, + { + {3.27010415e-02, 7.97711685e-02, 1.44058811e-02}, + {1.04617933e-02, 1.11863809e-02, 4.18756641e-02}, + {1.78254500e-03, 7.85047561e-02, 6.84963465e-02}, + {1.24377478e-03, 1.45914331e-02, 2.74993554e-02}, + {1.08483098e-02, 6.70367777e-02, 2.85488572e-02}, + {7.78536126e-02, 4.81986478e-02, 7.27791712e-02}, + {6.99554384e-02, 7.49787241e-02, 3.80843058e-02} + }, + { + {4.52449061e-02, 6.61351755e-02, 2.36613862e-02}, + {4.05996218e-02, 4.96144369e-02, 1.28636532e-03}, + {4.91482876e-02, 3.59142683e-02, 6.68603703e-02}, + {2.61065327e-02, 7.39432648e-02, 4.78543900e-02}, + {1.52385337e-02, 6.52511939e-02, 5.06844558e-02}, + {4.46441676e-03, 3.47977169e-02, 5.62360846e-02}, + {7.60726482e-02, 3.32930977e-05, 8.08888674e-02} + }, + { + {3.11859436e-02, 8.06424469e-02, 5.00786714e-02}, + {6.86396435e-02, 4.75938842e-02, 5.20132035e-02}, + {2.36495789e-02, 4.85977381e-02, 6.21119440e-02}, + {7.10799918e-02, 6.25310168e-02, 5.78085780e-02}, + {7.15905875e-02, 2.67223511e-02, 5.55503815e-02}, + {3.73384580e-02, 3.16432752e-02, 3.40207368e-02}, + {3.32479365e-02, 2.62836833e-02, 5.15033379e-02} + }, + { + {3.56302932e-02, 8.06439817e-02, 5.61310798e-02}, + {1.64442733e-02, 3.53366137e-02, 2.84337122e-02}, + {6.60552830e-02, 7.28757605e-02, 7.48503357e-02}, + {5.48821613e-02, 2.23768987e-02, 2.08993759e-02}, + {7.07971081e-02, 4.37019095e-02, 6.64297864e-02}, + {4.74097952e-02, 6.07141182e-02, 4.29811813e-02}, + {6.38396144e-02, 4.71091345e-02, 3.85670736e-02} + }, + { + {2.83792764e-02, 5.64865675e-03, 3.12972330e-02}, + {6.59411587e-03, 8.13905448e-02, 1.50400000e-02}, + {6.72328845e-02, 7.24586621e-02, 5.70099279e-02}, + {4.71618399e-02, 1.33306114e-02, 3.86639796e-02}, + {2.85849143e-02, 1.86363515e-02, 4.90679964e-02}, + {2.58601662e-02, 7.58824944e-02, 7.53301233e-02}, + {2.12928709e-02, 9.18329880e-03, 1.59799233e-02} + }, + { + {4.13723253e-02, 6.03367463e-02, 1.72413141e-02}, + {2.05405317e-02, 7.05299526e-02, 3.44378985e-02}, + {5.10698669e-02, 1.93507168e-02, 8.44426826e-03}, + {4.27199379e-02, 3.95137258e-02, 1.26432776e-02}, + {5.14939614e-02, 4.50513922e-02, 5.41714206e-02}, + {1.19703254e-02, 6.22366704e-02, 1.83886718e-02}, + {4.30093557e-02, 6.50331303e-02, 1.84926135e-03} + }, + { + {2.68615987e-02, 7.22897798e-02, 6.99533820e-02}, + {4.45901640e-02, 7.17668831e-02, 7.86567777e-02}, + {6.84376806e-02, 7.07323104e-02, 8.17728881e-03}, + {5.39368056e-02, 5.82607202e-02, 5.05361930e-02}, + {6.62189573e-02, 2.86296452e-03, 6.37861863e-02}, + {6.05970249e-02, 2.15065386e-02, 2.12888140e-02}, + {5.23632653e-02, 2.85952985e-02, 6.59683123e-02} + }, + { + {3.69469412e-02, 6.48222342e-02, 8.20244551e-02}, + {2.48646215e-02, 1.18428171e-02, 7.46405274e-02}, + {4.48484421e-02, 8.07216838e-02, 5.27194552e-02}, + {8.23094398e-02, 4.52220477e-02, 4.35951874e-02}, + {1.12152621e-02, 2.94571985e-02, 2.17125192e-03}, + {1.32828895e-02, 6.17488436e-02, 2.51750532e-03}, + {3.03547252e-02, 7.14139268e-02, 5.73630854e-02} + }, + { + {5.72193563e-02, 1.56216780e-02, 3.65956500e-02}, + {4.81624752e-02, 8.19648281e-02, 1.68861933e-02}, + {2.05156356e-02, 2.17114780e-02, 6.21244237e-02}, + {3.78437378e-02, 4.71452763e-03, 4.21120226e-02}, + {1.75531674e-02, 6.61352351e-02, 2.46230606e-02}, + {2.28615105e-03, 4.91442308e-02, 6.98814020e-02}, + {3.15532871e-02, 6.20984100e-02, 4.23294269e-02} + }, + { + {4.47981246e-02, 7.94541389e-02, 6.65788352e-02}, + {2.67678709e-03, 5.87468557e-02, 3.85084115e-02}, + {7.84698650e-02, 1.83376241e-02, 2.21171752e-02}, + {6.74714567e-03, 3.54954340e-02, 9.02822800e-03}, + {5.24861142e-02, 6.64962158e-02, 5.77045009e-02}, + {6.34526685e-02, 2.83598304e-02, 7.00479448e-02}, + {3.55078541e-02, 6.82391599e-02, 5.18823527e-02} + } + }; + + @Test + public void testL2Normalize() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand input = tf.constant(array); + Operand result = fops.math.l2Normalize(tf.constant(array), new int[]{ 0,1,2}); + session.evaluate(tf.constant(expectedArray), result); + } + } + + @Test + public void testConfusionMatrix() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + long[] labels = new long[] {2, 0, 2, 2, 0, 1}; + long[] predictions = new long[] {0, 0, 2, 2, 0, 2}; + Operand result = + fops.math.confusionMatrix(tf.constant(labels), tf.constant(predictions)); + long[][] expected = + new long[][] { + {2, 0, 0}, + {0, 0, 1}, + {1, 0, 2} + }; + session.evaluate(tf.constant(expected), result); + } + } +} From c234b9a46b9e6bbf996a0f03b8e1ac1e4a083e3f Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:37:19 -0400 Subject: [PATCH 46/60] Added linalg methods for matmul --- .../tensorflow/framework/op/LinalgOps.java | 306 ++++++++++++++++++ .../framework/op/LinalgOpsTest.java | 60 ++++ 2 files changed, 366 insertions(+) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/op/LinalgOpsTest.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java new file mode 100644 index 00000000000..eb069a2db22 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java @@ -0,0 +1,306 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op; + +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.SparseTensor; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Conj; +import org.tensorflow.op.sparse.SparseMatMul; +import org.tensorflow.op.train.BatchMatMul; +import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; + +public class LinalgOps { + private final Scope scope; + + private final FrameworkOps frameworkOps; + + /** + * Creates Framework {@code nn} Operations + * + * @param frameworkOps the TensorFLow framework Ops + */ + LinalgOps(FrameworkOps frameworkOps) { + this.scope = frameworkOps.scope(); + this.frameworkOps = frameworkOps; + } + + /** + * Multiplies matrix a by matrix b, producing a * b + * . + * + *

      The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions + * specify matching batch size. + * + *

      Both matrices must be of the same type. The supported types are: TFloat16, + * TFloat32, TFloat64, TInt32. + * + *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

      A simple 2-D tensor matrix multiplication: + * + *

      {@code
      +   * Operand a = tf.constant(new double[][] {
      +   *         {-8.944851},
      +   *         {4.1711287},
      +   *         {-0.22380222}
      +   *     });
      +   * Operand b = tf.constant( new double[][] {
      +   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
      +   *     });
      +   * Operand result = fops.linalg.matmul(a, b);
      +   * // result = {
      +   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
      +   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
      +   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
      +   * //  }
      +   *
      +   * }
      + * + *

      Note: This is matrix product, not element-wise product. + * + * @param a an Operand of of type TFloat16, TFloat32, TFloat64 + * , TInt32. with a rank > 1 + * @param b an Operand with same type and rank as a. + * @param the data type of the Operands + * @return A Operand of the same type as a and b where each inner-most + * matrix is the product of the corresponding matrices in a and b. + * This is the matrix product not an element-wise product. + * @throws java.lang.IllegalArgumentException If transposeA and adjointA + * , or transposeB and adjointB are both set to `true`. + */ + @Endpoint(name = "matmul") + public Operand matmul(Operand a, Operand b) { + return matmul(a, b, false, false, false, false, false, false); + } + + /** + * Multiplies matrix a by matrix b, producing a * b + * . + * + *

      The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions + * specify matching batch size. + * + *

      Both matrices must be of the same type. The supported types are: TFloat16, + * TFloat32, TFloat64, TInt32. + * + *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

      + * + *

      Note: This is matrix product, not element-wise product. + * + *

      A simple 2-D tensor matrix multiplication: + * + *

      {@code
      +   * Operand a = tf.constant(new double[][] {
      +   *         {-8.944851},
      +   *         {4.1711287},
      +   *         {-0.22380222}
      +   *     });
      +   * Operand b = tf.constant( new double[][] {
      +   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
      +   *     });
      +   * Operand result = fops.linalg.matmul(a, b);
      +   * // result = {
      +   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
      +   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
      +   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
      +   * //  }
      +   *
      +   * }
      + * + * @param a an Operand of of type TFloat16, TFloat32, TFloat64 + * , TInt32. with a rank > 1 + * @param b an Operand with same type and rank as a. + * @param transposeA If `true`, a is transposed before multiplication. + * @param transposeB If `True`, b is transposed before multiplication + * @param the data type of the Operands + * @return A Operand of the same type as a and b where each inner-most + * matrix is the product of the corresponding matrices in a and b. + * This is the matrix product not an element-wise product. + * @throws java.lang.IllegalArgumentException If transposeA and adjointA + * , or transposeB and adjointB are both set to `true`. + */ + @Endpoint(name = "matmul") + public Operand matmul( + Operand a, Operand b, boolean transposeA, boolean transposeB) { + return matmul(a, b, transposeA, transposeB, false, false, false, false); + } + + /** + * Multiplies matrix a by matrix b, producing a * b + * . + * + *

      The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions + * specify matching batch size. + * + *

      Both matrices must be of the same type. The supported types are: TFloat16, + * TFloat32, TFloat64, TInt32. + * + *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

      Note: This is matrix product, not element-wise product. + * + *

      A simple 2-D tensor matrix multiplication: + * + *

      {@code
      +   * Operand a = tf.constant(new double[][] {
      +   *         {-8.944851},
      +   *         {4.1711287},
      +   *         {-0.22380222}
      +   *     });
      +   * Operand b = tf.constant( new double[][] {
      +   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
      +   *     });
      +   * Operand result = fops.linalg.matmul(a, b);
      +   * // result = {
      +   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
      +   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
      +   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
      +   * //  }
      +   *
      +   * }
      + * + * @param a an Operand of of type TFloat16, TFloat32, TFloat64 + * , TInt32. with a rank > 1 + * @param b an Operand with same type and rank as a. + * @param transposeA If true, a is transposed before multiplication. + * @param transposeB If True, b is transposed before multiplication + * @param adjointA If true, a is conjugated and transposed before multiplication. + * @param adjointB If true, b is conjugated and transposed before multiplication. + * @param aIsSparse If true, a is treated as a sparse matrix. Notice, this does + * not support {@link SparseTensor}, it just makes optimizations that assume most values + * in a are zero. + * @param bIsSparse If true, b is treated as a sparse matrix. Notice, this does + * not support {@link SparseTensor}, it just makes optimizations that assume most values + * in b are zero. + * @param the data type of the Operands + * @return A Operand of the same type as a and b where each inner-most + * matrix is the product of the corresponding matrices in a and b. + * This is the matrix product not an element-wise product. + * @throws java.lang.IllegalArgumentException If transposeA and adjointA + * , or transposeB and adjointB are both set to `true`. + */ + @SuppressWarnings("unchecked") + @Endpoint(name = "matmul") + public Operand matmul( + Operand a, + Operand b, + boolean transposeA, + boolean transposeB, + boolean adjointA, + boolean adjointB, + boolean aIsSparse, + boolean bIsSparse) { + Scope lscope = scope.withSubScope("MatMul"); + if (transposeA && adjointA) + throw new IllegalArgumentException("Only one of transposeA and adjointA can be true."); + if (transposeB && adjointB) + throw new IllegalArgumentException("Only one of transposeB and adjointB can be true."); + if (!(TFloating.class.isAssignableFrom(a.type()) || a.type().equals(TInt32.class))) + throw new IllegalArgumentException( + String.format( + "Operand 'a' must be of type 'TBfloat16','TFloat16', 'TFloat32', 'TFloat64' or 'TInt32'. found type : %s", + a.type().getSimpleName())); + if (!(TFloating.class.isAssignableFrom(a.type()) || b.type().equals(TInt32.class))) + throw new IllegalArgumentException( + String.format( + "Operand 'b' must be of type 'TBfloat16', 'TFloat32', 'TFloat64' or 'TInt32'. found type : %s", + b.type().getSimpleName())); + + Shape aShape = a.shape(); + Shape bShape = b.shape(); + if (aShape.numDimensions() != bShape.numDimensions()) + throw new IllegalArgumentException( + String.format( + "Parameters 'a' and 'b' must the same rank: found a rank = %d, b rank = %d", + aShape.numDimensions(), bShape.numDimensions())); + boolean outputMayHaveNonEmptyBatchShape = + aShape.numDimensions() == Shape.UNKNOWN_SIZE + || aShape.numDimensions() > 2 + || bShape.numDimensions() == Shape.UNKNOWN_SIZE; + + if ((!aIsSparse && !bIsSparse) && outputMayHaveNonEmptyBatchShape) { + // BatchMatmul does not support transpose, so we conjugate the matrix and + // use adjoint instead. Conj() is a noop for real matrices. + if (transposeA) { + a = Conj.create(scope, a); + adjointA = true; + } + if (transposeB) { + b = Conj.create(scope, b); + adjointB = true; + } + return BatchMatMul.create( + lscope, a, b, BatchMatMul.adjX(adjointA), BatchMatMul.adjY(adjointB)); + } + + // Neither matmul nor sparse_matmul support adjoint, so we conjugate + // the matrix and use transpose instead. Conj() is a noop for real + // matrices. + if (adjointA) { + a = Conj.create(scope, a); + transposeA = true; + } + if (adjointB) { + b = Conj.create(scope, b); + transposeB = true; + } + + boolean useSparseMatmul = false; + if (aIsSparse || bIsSparse) { + useSparseMatmul = + (a.type().equals(TBfloat16.class) || a.type().equals(TFloat32.class)) + && (b.type().equals(TBfloat16.class) || b.type().equals(TFloat32.class)); + } + if ((a.type().equals(TBfloat16.class) || b.type().equals(TBfloat16.class)) + && !a.type().equals(b.type())) useSparseMatmul = true; + + if (useSparseMatmul) { + Operand result = + SparseMatMul.create( + lscope, + a, + b, + SparseMatMul.transposeA(transposeA), + SparseMatMul.transposeB(transposeB), + SparseMatMul.aIsSparse(aIsSparse), + SparseMatMul.bIsSparse(bIsSparse)); + if (a.type().equals(TFloat32.class)) return (Operand) result; + else return Cast.create(scope, result, a.type()); + } + + return org.tensorflow.op.linalg.MatMul.create( + lscope, + a, + b, + org.tensorflow.op.linalg.MatMul.transposeA(transposeA), + org.tensorflow.op.linalg.MatMul.transposeB(transposeB)); + } +} diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/LinalgOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/LinalgOpsTest.java new file mode 100644 index 00000000000..f2c297ce032 --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/LinalgOpsTest.java @@ -0,0 +1,60 @@ +package org.tensorflow.framework.op; + +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; + +class LinalgOpsTest { + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + @Test + public void test2D() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand a = tf.constant(new float[][] {{3.7213619f}}); + Operand b = tf.constant(new float[][] {{8.153921f}}); + + Operand ans = fops.linalg.matmul(a, b); + Operand expected = tf.constant(new float[][] {{30.34369f}}); + session.evaluate(expected, ans); + + Operand a64 = + tf.constant(new double[][] {{-8.944851}, {4.1711287}, {-0.22380222}}); + Operand b64 = + tf.constant( + new double[][] {{-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}}); + + Operand ans64 = fops.linalg.matmul(a64, b64); + Operand expected64 = + tf.constant( + new double[][] { + {127.69746, 111.21564, 20.078575, 14.111271, -16.62731}, + {-59.547394, -51.861652, -9.362965, -6.580314, 7.753584}, + {3.1950197, 2.7826407, 0.50237054, 0.35306725, -0.4160191} + }); + session.evaluate(expected64, ans64); + + a64 = + tf.constant( + new double[][] { + {-9.189821, -1.588742, -8.684379}, + {-10.953391, -8.473055, -6.8909864}, + {-11.712155, -6.6350083, -2.4441578}, + {1.4037079, -11.279383, 0.9129576}, + {0.11368857, 2.3792067, -11.218701}, + }); + b64 = tf.constant(new double[][] {{-4.933953}, {-12.692161}, {-10.192119}}); + ans64 = fops.linalg.matmul(a64, b64); + expected64 = + tf.constant( + new double[][] {{154.01892}, {231.81863}, {166.91096}, {126.92895}, {83.58413}}); + session.setEpsilon(1e-4f); + session.evaluate(expected64, ans64); + } + } +} From e024f4b4f058c074fe740cc6f586ff1e199415d0 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:47:53 -0400 Subject: [PATCH 47/60] add nn ops for sigmoidCrossEntropyWithLogits, softmaxCrossEntropyWithLogits and sparseSoftmaxCrossEntropyWithLogits --- .../annotations/org/tensorflow/op/NnOps.java | 13 ++-- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 57 +++++++++------ .../SparseSoftmaxCrossEntropyWithLogits.java | 62 +++++++++------- .../org/tensorflow/framework/op/NnOps.java | 15 ++-- .../op/nn/SigmoidCrossEntropyWithLogits.java | 3 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 70 +++++++++++-------- .../tensorflow/framework/op/NnOpsTest.java | 68 ++++++++++++++++++ 7 files changed, 192 insertions(+), 96 deletions(-) create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/op/NnOpsTest.java diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 1cf8b910297..2bd4d13145f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -1811,14 +1811,14 @@ public Softmax softmax(Operand logits) { /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

      * Inputs are the logits, not probabilities. * - * @param data type for {@code loss()} output + * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. + * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( @@ -2011,18 +2011,17 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

      - * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

      - * Inputs are the logits, not probabilities. + *

      Inputs are the logits, not probabilities. * - * @param data type for {@code loss()} output + * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. + * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 5d3ab3c1100..d6eed5cbe28 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -29,57 +29,68 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

      * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss()} output + * + * @param data type for {@code loss} output */ -@Operator(group = "nn") +@Operator( + group = "nn" +) public final class SoftmaxCrossEntropyWithLogits extends RawOp { - + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; + + private Output loss; + + private Output backprop; + + private SoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx++); + } + /** * Factory method to create a class wrapping a new SoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. + * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ - @Endpoint(describeByClass = true) - public static SoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { + @Endpoint( + describeByClass = true + ) + public static SoftmaxCrossEntropyWithLogits create(Scope scope, + Operand features, Operand labels) { OperationBuilder opBuilder = scope.env().opBuilder("SoftmaxCrossEntropyWithLogits", scope.makeOpName("SoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** + * Gets loss. * Per example loss (batch_size vector). + * @return loss. */ public Output loss() { return loss; } - + /** + * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). + * @return backprop. */ public Output backprop() { return backprop; } - - /** The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; - - private Output loss; - private Output backprop; - - private SoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx); - } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 794beab4ded..26498cdce7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -29,61 +29,71 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

      - * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

      - * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss()} output + *

      Inputs are the logits, not probabilities. + * + * @param data type for {@code loss} output */ -@Operator(group = "nn") +@Operator( + group = "nn" +) public final class SparseSoftmaxCrossEntropyWithLogits extends RawOp { - + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; + + private Output loss; + + private Output backprop; + + private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx++); + } + /** * Factory method to create a class wrapping a new SparseSoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. + * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ - @Endpoint(describeByClass = true) - public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { + @Endpoint( + describeByClass = true + ) + public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, + Operand features, Operand labels) { OperationBuilder opBuilder = scope.env().opBuilder("SparseSoftmaxCrossEntropyWithLogits", scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SparseSoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** + * Gets loss. * Per example loss (batch_size vector). + * @return loss. */ public Output loss() { return loss; } - + /** + * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). + * @return backprop. */ public Output backprop() { return backprop; } - - /** The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; - - private Output loss; - private Output backprop; - - private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx); - } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java index 0fea3743d95..4f5120a3dbf 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -87,7 +87,7 @@ public class NnOps { * @param logits the logits of type float32 or float64 * @param the type of labels and logits * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape + * @throws IllegalArgumentException if logits and labels do not have the same shape */ public Operand sigmoidCrossEntropyWithLogits( Operand labels, Operand logits) { @@ -139,7 +139,6 @@ public Operand sigmoidCrossEntropyWithLogits( * @return the softmax cross entropy loss. Its type is the same as {@code logits} and its shape is * the same as {@code labels} except that it does not have the last dimension of {@code * labels}. - * */ public Operand softmaxCrossEntropyWithLogits( Operand labels, Operand logits, int axis) { @@ -181,14 +180,14 @@ public Operand softmaxCrossEntropyWith * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. - * @param The data type for the labels - * @param The data type for the logits and loss + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank - * of the labels is not equal to the rank of the logits minus one. + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if + * the rank of the labels is not equal to the rank of the logits minus one. */ - public Operand sparseSoftmaxCrossEntropyWithLogits( - Operand labels, Operand logits) { + public Operand sparseSoftmaxCrossEntropyWithLogits( + Operand labels, Operand logits) { return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits( scope, labels, logits); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index fc3f7739363..432e1b47a3f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -26,8 +26,7 @@ public class SigmoidCrossEntropyWithLogits { * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - *

      For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in - * pseudo-code is + *

      For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in pseudo-code is * *

          * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
      diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
      index 0b2d29d6092..553adf90aad 100644
      --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
      +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
      @@ -14,7 +14,11 @@
       import org.tensorflow.types.TBfloat16;
       import org.tensorflow.types.TFloat16;
       import org.tensorflow.types.TFloat32;
      +import org.tensorflow.types.TFloat64;
       import org.tensorflow.types.TInt32;
      +import org.tensorflow.types.TInt64;
      +import org.tensorflow.types.family.TFloating;
      +import org.tensorflow.types.family.TIntegral;
       import org.tensorflow.types.family.TNumber;
       
       import java.util.ArrayList;
      @@ -34,39 +38,37 @@ public class SparseSoftmaxCrossEntropyWithLogits {
          * 

      NOTE: * *

      For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the {@code labels} vector must provide a single specific - * index for the true class for each row of {@code logits} (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link + * classes are not allowed, and the {@code labels} vector must provide a single specific index for + * the true class for each row of {@code logits} (each minibatch entry). For soft softmax + * classification with a probability distribution for each entry, {@link * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. * *

      WARNING: * - *

      This op expects unscaled logits, since it performs a {@code softmax} on {@code logits - * } internally for efficiency. Do not call this op with the output of {@code softmax}, - * as it will produce incorrect results. + *

      This op expects unscaled logits, since it performs a {@code softmax} on {@code logits } + * internally for efficiency. Do not call this op with the output of {@code softmax}, as it will + * produce incorrect results. * - *

      A common use case is to have logits of shape {@code [batchSize, numClasses]} and have - * labels of shape {@code [batchSize]}, but higher dimensions are supported, in which case - * the {@code dim}-th dimension is assumed to be of size {@code numClasses}. {@code - * logits} must have the {@code dataType} of {@code TFloat16}, {@code TFloat32} - * , or {@code TFloat64}, and {@code labels} must have the dtype of {@code TInt32} - * or {@code TInt64}. + *

      A common use case is to have logits of shape {@code [batchSize, numClasses]} and have labels + * of shape {@code [batchSize]}, but higher dimensions are supported, in which case the {@code + * dim}-th dimension is assumed to be of size {@code numClasses}. {@code logits} must have the + * {@code dataType} of {@code TFloat16}, {@code TFloat32} , or {@code TFloat64}, and {@code + * labels} must have the dtype of {@code TInt32} or {@code TInt64}. * * @param scope current scope - * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r - * } is rank of {@code labels} and result) and the dataType is {@code TInt32} - * or {@code TInt64}. Each entry in {@code labels} must be an index in {@code [0, - * numClasses)}. Other values will raise an exception when this op is run on CPU, and - * return {@code NaN} for corresponding loss and gradient rows on GPU. + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r } is + * rank of {@code labels} and result) and the dataType is {@code TInt32} or {@code TInt64}. + * Each entry in {@code labels} must be an index in {@code [0, numClasses)}. Other values will + * raise an exception when this op is run on CPU, and return {@code NaN} for corresponding + * loss and gradient rows on GPU. * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., - * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, - * or {@code TFloat64}. These activation energies are interpreted as unnormalized log - * probabilities. + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code + * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. * @param the data type for the labels * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank - * of the labels is not equal to the rank of the logits minus one. + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if + * the rank of the labels is not equal to the rank of the logits minus one. */ @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") @@ -74,15 +76,23 @@ public class SparseSoftmaxCrossEntropyWithLogits { Operand sparseSoftmaxCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); - Operand preciseLogits; + Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { preciseLogits = Cast.create(scope, logits, TFloat32.class); + } else if (TFloating.class.isAssignableFrom(logits.type())) { + preciseLogits = (Operand) logits; } else { - preciseLogits = logits; + preciseLogits = Cast.create(scope, logits, TFloat64.class); } - Shape labelsStaticShape = labels.shape(); + Operand iLabels; + if (TIntegral.class.isAssignableFrom(labels.type())) { + iLabels = (Operand) labels; + } else { + iLabels = Cast.create(scope, labels, TInt64.class); + } + Shape labelsStaticShape = iLabels.shape(); org.tensorflow.op.core.Shape labelsShape = - org.tensorflow.op.core.Shape.create(scope, labels); + org.tensorflow.op.core.Shape.create(scope, iLabels); Shape logitsShape = logits.shape(); Shape logitsShortened = logitsShape.take(logitsShape.numDimensions() - 1); @@ -113,7 +123,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( if (logitsShape.numDimensions() == 2) { org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( - scope, preciseLogits, labels); + scope, preciseLogits, iLabels); Operand cost = smax.loss(); if (cost.type() != logits.type()) { return Cast.create(scope, cost, logits.type()); @@ -131,7 +141,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( scope, Equal.create( scope, - org.tensorflow.op.core.Shape.create(scope, labels), + org.tensorflow.op.core.Shape.create(scope, iLabels), Shapes.take( scope, org.tensorflow.op.core.Shape.create(scope, logits), @@ -148,12 +158,12 @@ Operand sparseSoftmaxCrossEntropyWithLogits( long numClassses = logitsShape.size(-1); preciseLogits = Reshape.create(scope, preciseLogits, Constant.arrayOf(scope, -1L, numClassses)); - labels = Reshape.create(scope, labels, Constant.scalarOf(scope, -1)); + iLabels = Reshape.create(scope, iLabels, Constant.scalarOf(scope, -1)); scope.withControlDependencies(shapeChecks); // call raw op org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( - scope, preciseLogits, labels); + scope, preciseLogits, iLabels); Operand cost = smax.loss(); cost = Reshape.create(scope, cost, labelsShape); if (cost.type() != logits.type()) { diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/NnOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/NnOpsTest.java new file mode 100644 index 00000000000..0436fdd57cf --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/NnOpsTest.java @@ -0,0 +1,68 @@ +package org.tensorflow.framework.op; + +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +class NnOpsTest { + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + @Test + public void testSigmoidCrossEntropyWithLogits() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + float[] x = new float[] {-100, -2, -2, 0, 2, 2, 2, 100}; + float[] y = new float[] {0, 0, 1, 0, 0, 1, 0.5f, 1}; + + Operand logits = tf.constant(x); + Operand targets = tf.constant(y); + Operand loss = fops.nn.sigmoidCrossEntropyWithLogits(targets, logits); + Operand expected = + tf.constant( + new float[] { + 0.f, 0.126928f, 2.126928f, 0.6931472f, + 2.126928f, 0.126928f, 1.126928f, 0.f + }); + session.evaluate(expected, loss); + } + } + + @Test + public void testSoftmaxCrossEntropyWithLogits() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + float[] x = new float[] {-100, -2, -2, 0, 2, 2, 2, 100}; + float[] y = new float[] {0, 0, 1, 0, 0, 1, 0.5f, 1}; + + Operand logits = tf.constant(x); + Operand targets = tf.constant(y); + Operand loss = fops.nn.softmaxCrossEntropyWithLogits(targets, logits, 0); + + session.evaluate(249.0f, loss); + } + } + + @Test + public void testSparseSoftmaxCrossEntropyWithLogits() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + float[][] x = new float[][] {{0, 0}}; + int[] y = new int[] {0}; + + Operand logits = tf.constant(x); + Operand labels = tf.constant(y); + Operand loss = fops.nn.sparseSoftmaxCrossEntropyWithLogits(labels, logits); + + session.evaluate(0.69314718f, loss); + } + } +} From b108b06ce1a3b12d05a07287d897f85c707a8cb3 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:48:53 -0400 Subject: [PATCH 48/60] Moved SetOps to FrameworkOps --- .../org/tensorflow/framework/op/{SetsOps.java => SetOps.java} | 4 ++-- .../tensorflow/framework/{metrics/impl => op}/SetOpsTest.java | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) rename tensorflow-framework/src/main/java/org/tensorflow/framework/op/{SetsOps.java => SetOps.java} (98%) rename tensorflow-framework/src/test/java/org/tensorflow/framework/{metrics/impl => op}/SetOpsTest.java (97%) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java similarity index 98% rename from tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java rename to tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java index d7833cdbb06..f76947018b5 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetsOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java @@ -24,7 +24,7 @@ import org.tensorflow.types.family.TNumber; /** Implementation of set operations */ -public class SetsOps { +public class SetOps { private final Scope scope; @@ -35,7 +35,7 @@ public class SetsOps { * * @param frameworkOps the TensorFLow framework Ops */ - SetsOps(FrameworkOps frameworkOps) { + SetOps(FrameworkOps frameworkOps) { this.scope = frameworkOps.scope(); this.frameworkOps = frameworkOps; } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java similarity index 97% rename from tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java rename to tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java index e10f016bd94..7dee866abf2 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java @@ -1,9 +1,7 @@ -package org.tensorflow.framework.metrics.impl; +package org.tensorflow.framework.op; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; -import org.tensorflow.framework.op.FrameworkOps; -import org.tensorflow.framework.op.SetsOps; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; From 13b6f0f46c0019ebfcbbdda407376a64d79e5343 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:51:05 -0400 Subject: [PATCH 49/60] Added tensordot and reduceLogSumExp --- .../org/tensorflow/framework/op/MathOps.java | 796 +++++++++++++++++- .../tensorflow/framework/op/MathOpsTest.java | 90 +- 2 files changed, 874 insertions(+), 12 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 36f5b692cab..4c2210feb9c 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -14,37 +14,59 @@ =======================================================================*/ package org.tensorflow.framework.op; +import org.tensorflow.Graph; import org.tensorflow.Operand; +import org.tensorflow.Session; import org.tensorflow.framework.losses.impl.LossTuple; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.core.AssertThat; +import org.tensorflow.op.core.Concat; import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Gather; import org.tensorflow.op.core.Identity; import org.tensorflow.op.core.OnesLike; import org.tensorflow.op.core.Range; import org.tensorflow.op.core.Rank; import org.tensorflow.op.core.ReduceAll; import org.tensorflow.op.core.ReduceMax; +import org.tensorflow.op.core.ReduceProd; import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.core.Reshape; import org.tensorflow.op.core.ScatterNd; +import org.tensorflow.op.core.Select; +import org.tensorflow.op.core.SetDiff1d; +import org.tensorflow.op.core.Slice; import org.tensorflow.op.core.Squeeze; import org.tensorflow.op.core.Stack; -import org.tensorflow.op.core.Zeros; +import org.tensorflow.op.core.StopGradient; +import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.linalg.Transpose; import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.Exp; import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.IsFinite; import org.tensorflow.op.math.Less; +import org.tensorflow.op.math.Log; import org.tensorflow.op.math.Maximum; import org.tensorflow.op.math.Mul; import org.tensorflow.op.math.Rsqrt; import org.tensorflow.op.math.Square; +import org.tensorflow.op.math.Sub; +import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TFloat16; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; public class MathOps { private final Scope scope; @@ -123,7 +145,7 @@ public Operand l2Normalize(Operand x, int[] axis) { * predictions}. */ public Operand confusionMatrix(Operand labels, Operand predictions) { - return confusionMatrix(labels, predictions, null, null, labels.type()); + return confusionMatrix(labels, predictions, null, null); } /** @@ -167,7 +189,7 @@ public Operand confusionMatrix(Operand labels, Operand */ public Operand confusionMatrix( Operand labels, Operand predictions, Operand weights) { - return confusionMatrix(labels, predictions, weights, null, labels.type()); + return confusionMatrix(labels, predictions, weights, null); } /** @@ -204,7 +226,6 @@ public Operand confusionMatrix( * @param weights An optional Operand whose shape matches {@code predictions}. * @param numClasses The possible number of labels the classification task can have. If this value * is null, it will be calculated using both predictions and labels. - * @param type Data type of the confusion matrix. * @param Data type of the confusion matrix. * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion * matrix, where {@code n} is the number of possible labels in the classification task. @@ -213,11 +234,7 @@ public Operand confusionMatrix( * predictions}. */ public Operand confusionMatrix( - Operand labels, - Operand predictions, - Operand weights, - Operand numClasses, - Class type) { + Operand labels, Operand predictions, Operand weights, Operand numClasses) { Scope lScope = scope.withSubScope("confusionMatrix"); LossTuple tuple = removeSqueezableDimensions(labels, predictions, 0); Operand lLabels = Cast.create(lScope, tuple.getLabels(), TInt64.class); @@ -293,7 +310,8 @@ public Operand confusionMatrix( Operand indices = Stack.create(lScope, Arrays.asList(lLabels, lPredictions), Stack.axis(1L)); Operand values = weights == null ? OnesLike.create(lScope, predictions) : weights; - Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), type); + /// Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), + // type); return ScatterNd.create(lScope, indices, values, shape); } @@ -317,7 +335,7 @@ public LossTuple removeSqueezableDimensions( int labelsRank = labelsShape.numDimensions(); if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { - // Use static rank. + // Use rank. int rankDiff = predictionsRank - labelsRank; if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { predictions = Squeeze.create(lScope, predictions); @@ -350,6 +368,13 @@ public LossTuple removeSqueezableDimensions( return new LossTuple<>(labels, predictions); } + /** + * Creates an Operand that has all axes contained in the Operand's shape. + * + * @param op the Operand + * @param THe Data type for the Operand + * @return an Operand that has all axes contained in the Operand's shape.. + */ public Operand allAxes(Operand op) { int rank = op.shape().numDimensions(); if (rank != Shape.UNKNOWN_SIZE) { @@ -363,4 +388,753 @@ public Operand allAxes(Operand op) { scope, Constant.scalarOf(scope, 0), Rank.create(scope, op), Constant.scalarOf(scope, 1)); } } + + /** + * Transpose and reshape the input for contraction op. + * + *

      This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul` using + * `array_ops.transpose` and `array_ops.reshape`. The method takes a tensor and performs the + * correct transpose and reshape operation for a given set of indices. It returns the reshaped + * tensor as well as a list of indices necessary to reshape the tensor again after matrix + * multiplication. + * + * @param the type of Operand + * @param a the Tensor + * @param axis unique indices specifying valid axes of `a`. + * @param flipped whether to flip the dimensions or not + * @return A tuple (reshapedA, freeDims, freeDimsStatic) where reshapedA is a reshaped to allow + * contraction via matmul, freeDims` is a TInt32 Operand, depending on whether the shape of a + * is fully specified, and freeDimsStatic is either a list of integers and null values, or + * None, representing the inferred shape of the free dimensions + */ + private Object[] tensordotReshape( + Operand a, Operand axis, boolean flipped) { + Shape aShape = a.shape(); + + if (!aShape.hasUnknownDimension()) { // calculate using values + long[] aShapeDims = aShape.asArray(); + if (aShapeDims == null) aShapeDims = new long[0]; + long[] aDimsIndex = new long[aShapeDims.length]; + for (int i = 0; i < aDimsIndex.length; i++) aDimsIndex[i] = i; + + // get int array from axis Operand + int[] iAxes = getIntArray(axis); + // Convert negative axes to positive + for (int i = 0; i < iAxes.length; i++) + iAxes[i] = iAxes[i] >= 0 ? iAxes[i] : Math.floorMod(iAxes[i], iAxes.length); + + // convert integer axis to long axis + long[] lAxes = Arrays.stream(iAxes).mapToLong(i -> i).toArray(); + + // create list of the axes, dims, and free axes + List axesList = Arrays.stream(lAxes).boxed().collect(Collectors.toList()); + List freeList = Arrays.stream(aDimsIndex).boxed().collect(Collectors.toList()); + freeList.removeAll(axesList); + + // create array of free dims + long[] free = freeList.stream().mapToLong(i -> i).toArray(); + long[] freeDims = new long[free.length]; + for (int i = 0; i < free.length; i++) freeDims[i] = aShapeDims[(int) free[i]]; + + // Calculate the free dim by doing a reduce prod + long prodFree = 1; + for (long i : freeDims) { + prodFree *= i; + } + + // calculate the used dims by doing a reduce prod + long prodAxis = 1; + for (long i : lAxes) { + prodAxis *= aShapeDims[(int) i]; + } + + // setup the permutations array for the transpose + long[] perm = new long[freeDims.length + lAxes.length]; + Shape newShape; + if (flipped) { + System.arraycopy(lAxes, 0, perm, 0, lAxes.length); + System.arraycopy(free, 0, perm, lAxes.length, free.length); + newShape = Shape.of(prodAxis, prodFree); + } else { + System.arraycopy(free, 0, perm, 0, free.length); + System.arraycopy(lAxes, 0, perm, freeDims.length, lAxes.length); + newShape = Shape.of(prodFree, prodAxis); + } + + Operand aTrans; + long[] arrange = new long[lAxes.length]; + for (int i = 0; i < arrange.length; i++) arrange[i] = i; + + // if the permutations is not equals to the natural order of the dims, then do a transpose + if (!Arrays.equals(perm, arrange)) { + aTrans = Transpose.create(scope, a, Constant.vectorOf(scope, perm)); + } else { + aTrans = a; + } + + // reshape the final result to the new Shape, if necessary + Operand aReshaped = + aTrans.asOutput().shape().equals(newShape) + ? aTrans + : Reshape.create(scope, aTrans, Constant.vectorOf(scope, newShape.asArray())); + // return a tuple for the reshaped Operand, and Operand for the free dimensions, and a long + // array for the free dimensions + return new Object[] {aReshaped, Constant.vectorOf(scope, freeDims), freeDims}; + + } else { // calculate dynamically + + long[] freeDimsStatic = null; + Operand one = Constant.scalarOf(scope, 1); + Operand minusOne = Constant.scalarOf(scope, -1); + Operand zero = Constant.scalarOf(scope, 0); + org.tensorflow.op.core.Shape tShape = org.tensorflow.op.core.Shape.create(scope, a); + Operand axesT; + Operand freeT; + if (aShape.numDimensions() + != Shape.UNKNOWN_SIZE) { // we know the rank, but there are unknown dimensions + long[] aShapeDims = aShape.asArray(); + if (aShapeDims == null) aShapeDims = new long[0]; + + // get int array from axis Operand + int[] iAxes = getIntArray(axis); + // Convert negative axes to positive + for (int i = 0; i < iAxes.length; i++) + iAxes[i] = iAxes[i] >= 0 ? iAxes[i] : Math.floorMod(iAxes[i], iAxes.length); + + // convert integer axis to long axis + long[] lAxes = Arrays.stream(iAxes).mapToLong(i -> i).toArray(); + + // create list of the axes, dims, and free axes + List axesList = Arrays.stream(lAxes).boxed().collect(Collectors.toList()); + List dimsList = Arrays.stream(aShapeDims).boxed().collect(Collectors.toList()); + List freeList = new ArrayList<>(axesList); + freeList.removeAll(dimsList); + + // create array of free dims + long[] freeDims = freeList.stream().mapToLong(i -> i).toArray(); + freeDimsStatic = freeDims; + + axesT = Constant.vectorOf(scope, iAxes); + freeT = Cast.create(scope, Constant.vectorOf(scope, freeDims), TInt32.class); + + } else { // we don't know the rank yet + Rank rank = Rank.create(scope, a); + + // convert axis to positive + axesT = + Select.create( + scope, + GreaterEqual.create(scope, axis, Constant.scalarOf(scope, 0)), + axis, + Add.create(scope, axis, rank)); + + SetDiff1d diff = + SetDiff1d.create( + scope, Range.create(scope, Constant.scalarOf(scope, 0), rank, one), axesT); + freeT = diff.out(); + } + Operand freeDims = Gather.create(scope, tShape, freeT, zero); + Operand axesDims = Gather.create(scope, tShape, axesT, zero); + Operand prodFreeDims = ReduceProd.create(scope, freeDims, minusOne); + Operand prodAxesDims = ReduceProd.create(scope, axesDims, minusOne); + Operand perm; + Operand newShape; + if (flipped) { + perm = Concat.create(scope, Arrays.asList(axesT, freeT), zero); + newShape = Stack.create(scope, Arrays.asList(prodAxesDims, prodFreeDims)); + } else { + perm = Concat.create(scope, Arrays.asList(freeT, axesT), zero); + newShape = Stack.create(scope, Arrays.asList(prodFreeDims, prodAxesDims)); + } + Operand aReshaped = Reshape.create(scope, Transpose.create(scope, a, perm), newShape); + return new Object[] {aReshaped, freeDims, freeDimsStatic}; + } + } + + /** + * Gets an int array from an Operand<TInt32> operand. + * + * @param axes the Operand to fetch the values + * @return the int array from an Operand<TInt32> + */ + private int[] getIntArray(Operand axes) { + List result = new ArrayList<>(); + if (scope.env().isEager()) { + axes.asTensor().scalars().forEach(s -> result.add(s.getInt())); + } else { + try (Session session = new Session((Graph) scope.env()); + TInt32 tensor = (TInt32) session.runner().fetch(axes).run().get(0)) { + tensor.scalars().forEach(s -> result.add(s.getInt())); + } + } + return result.stream().mapToInt(i -> i).toArray(); + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axis the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings("unchecked") + private Operand[] tensordotAxes(Operand a, int axis) { + Shape aShape = a.asOutput().shape(); + if (axis < 0) { + throw new IllegalArgumentException("'axis' must be at least 0."); + } + int rank = aShape.numDimensions(); + Operand[] result = new Operand[2]; + if (rank != Shape.UNKNOWN_SIZE) { + if (axis > rank) { + throw new IllegalArgumentException( + String.format( + "'axis' must not be larger than the number of dimensions of tensor %s.", rank)); + } + int min = rank - axis; + int postRange = rank - min; + int[] postAxis = new int[postRange]; + for (int i = 0; i < postRange; i++) postAxis[i] = i + min; + + int[] preAxis = new int[axis]; + for (int i = 0; i < axis; i++) preAxis[i] = i; + + result[0] = Constant.vectorOf(scope, postAxis); + result[1] = Constant.vectorOf(scope, preAxis); + } else { + Rank rankT = Rank.create(scope, a); + Constant axisT = Constant.scalarOf(scope, axis); + Constant one = Constant.scalarOf(scope, 1); + Constant zero = Constant.scalarOf(scope, 0); + AssertThat assertion = + AssertThat.create( + scope, + Less.create(scope, axisT, rankT), + Arrays.asList( + Constant.scalarOf( + scope, "'axes' must not be larger than the number of dimensions of tensor "), + rankT)); + Scope scope1 = scope.withControlDependencies(Collections.singletonList(assertion)); + result[0] = Range.create(scope1, Sub.create(scope, rankT, axisT), rankT, one); + result[1] = Range.create(scope1, zero, axisT, one); + } + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private Operand[] tensordotAxes(Operand a, int[] axes) { + if (axes.length != 2) + throw new IllegalArgumentException( + "'axes' must have length 1 or 2, provided with " + axes.length); + int[] aAxis = new int[] {axes[0]}; + int[] bAxis = new int[] {axes[1]}; + Operand[] result = new Operand[2]; + result[0] = Constant.vectorOf(scope, aAxis); + result[1] = Constant.vectorOf(scope, bAxis); + + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private Operand[] tensordotAxes(Operand a, int[][] axes) { + if (axes.length != 2) + throw new IllegalArgumentException( + "'axes' must have length 1 or 2, provided with " + axes.length); + int[] aAxis = axes[0]; + int[] bAxis = axes[1]; + if (aAxis.length != bAxis.length) + throw new IllegalArgumentException( + String.format( + "Different number of contraction axes 'a' and 'b', %d != %d", + aAxis.length, bAxis.length)); + Operand[] result = new Operand[2]; + result[0] = Constant.vectorOf(scope, aAxis); + result[1] = Constant.vectorOf(scope, bAxis); + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private Operand[] tensordotAxes(Operand a, Operand axes) { + + Constant one = Constant.scalarOf(scope, 1); + Constant zero = Constant.scalarOf(scope, 0); + Operand[] result = new Operand[2]; + result[0] = + Slice.create( + scope, + axes, + Cast.create(scope, zero, TInt32.class), + Cast.create(scope, one, TInt32.class)); + result[1] = + Slice.create( + scope, + axes, + Cast.create(scope, one, TInt32.class), + Cast.create(scope, one, TInt32.class)); + return result; + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

      + * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

      + * This operation corresponds to numpy.tensordot(a, b, axes). + *

      + * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

      + * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

      + * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

      + * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

      + * cjklm = Σi aijk + * blmi . + *

      + * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

      + * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param axis sum over the last N axes of a and the + * first N axes of b in order. If `axes=0`, computes the outer + * product between `a` and `b`. + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @Endpoint(name = "tensordot") + public Operand tensordot(Operand a, Operand b, int axis) { + + Operand[] abAxis = tensordotAxes(a, axis); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + return tensordot(a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

      + * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

      + * This operation corresponds to numpy.tensordot(a, b, axes). + *

      + * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

      + * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

      + * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

      + * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

      + * cjklm = Σi aijk + * blmi . + *

      + * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

      + * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param axes If axes is a scalar, sum over the last N axes of a and the + * first N axes of b in order. If axes is a list, the first and second row + * contain the set of unique integers specifying axes along which the + * contraction is computed, for `a` and `b`, respectively. The number of + * axes for `a` and `b` must be equal. If `axes=0`, computes the outer + * product between `a` and `b`. + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @Endpoint(name = "tensordot") + public Operand tensordot( + Operand a, Operand b, Operand axes) { + + Operand[] abAxis = tensordotAxes(a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

      + * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

      + * This operation corresponds to numpy.tensordot(a, b, axes). + *

      + * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

      + * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

      + * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

      + * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

      + * cjklm = Σi aijk + * blmi . + *

      + * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

      + * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param axes the first and second row + * contain the set of unique integers specifying axes along which the + * contraction is computed, for `a` and `b`, respectively. The number of + * axes for `a` and `b` must be equal. I + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @Endpoint(name = "tensordot") + public Operand tensordot(Operand a, Operand b, int[] axes) { + + Operand[] abAxis = tensordotAxes(a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

      + * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

      + * This operation corresponds to numpy.tensordot(a, b, axes). + *

      + * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

      + * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

      + * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

      + * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

      + * cjklm = Σi aijk + * blmi . + *

      + * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

      + * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param axes the first and second row + * contain the set of unique integers specifying axes along which the + * contraction is computed, for `a` and `b`, respectively. The number of + * axes for `a` and `b` must be equal. I + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @Endpoint(name = "tensordot") + public Operand tensordot(Operand a, Operand b, int[][] axes) { + + Operand[] abAxis = tensordotAxes(a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + *

      + * Tensordot (also known as tensor contraction) sums the product of elements + * from a and b` over the indices specified by + * a_axes and b_axes. The lists + * a_axes and b_axes specify those pairs of axes + * along which to contract the tensors. The axis a_axes[i] of + * a must have the same dimension as axis + * b_axes[i] of b for all i in + * range(0, len(a_axes)). The lists + * a_axes and b_axes must have identical length + * and consist of unique integers that specify valid axes for each of the + * tensors. Additionally outer product is supported by passing + * axes=0. + *

      + * This operation corresponds to numpy.tensordot(a, b, axes). + *

      + * Example 1: When a and b are matrices (order 2), + * the case axes = 1 is equivalent to matrix multiplication. + *

      + * Example 2: When a and`b are matrices (order 2), + * the case + * axes = [[1], [0]] is equivalent to matrix multiplication. + *

      + * Example 3: When a and b are matrices (order 2), + * the case axes=0 gives the outer product, a tensor of order + * 4. + *

      + * Example 4: Suppose that aijk and blmn + * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * cjklm whose entry corresponding to the indices + * (j,k,l,m) is given by: + *

      + * cjklm = Σi aijk + * blmi . + *

      + * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + *

      + * + * @param a `Operand` of type `float32` or `float64`. + * @param b `Operand` with the same type as `a`. + * @param aAxis axes for the a Operand + * @param bAxis axes for the b Operand + * @param the datatype of the Operands, must be either TFloat32 or + * TFloat64 + * @return A `Operand` with the same type as `a`. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + */ + @SuppressWarnings({"unchecked", "unused"}) + @Endpoint(name = "tensordot") + public Operand tensordot( + Operand a, Operand b, Operand aAxis, Operand bAxis) { + + if (a.type().equals(TBfloat16.class) || a.type().equals(TFloat16.class)) { + throw new IllegalArgumentException( + String.format( + "Operand 'a' must be either TFloat32 or TFloat64 DataType, 'a' is a %s DataType", + a.type().getSimpleName())); + } + if (!a.type().equals(b.type())) { + throw new IllegalArgumentException( + String.format( + "Operands a and b must be the same data type, a is %s DataType, b is %s DataType", + a.type().getSimpleName(), b.type().getSimpleName())); + } + + // first result is Operand, second result is Operand, third result is long[] and it is + // ignored here. + Object[] aResult = tensordotReshape(a, aAxis, false); + Operand reshapedA = (Operand) aResult[0]; + Operand aFreeDims = (Operand) aResult[1]; + long[] aFreeDimsStatic = (long[]) aResult[2]; + + // first result is Operand, second result is Operand, third result is long[] and it is + // ignored here. + Object[] bResult = tensordotReshape(b, bAxis, true); + Operand reshapedB = (Operand) bResult[0]; + Operand bFreeDims = (Operand) bResult[1]; + long[] bFreeDimsStatic = (long[]) bResult[2]; + + Operand abMatmul = frameworkOps.linalg.matmul(reshapedA, reshapedB); + long[] abDimsStatic = new long[aFreeDimsStatic.length + bFreeDimsStatic.length]; + System.arraycopy(aFreeDimsStatic, 0, abDimsStatic, 0, aFreeDimsStatic.length); + System.arraycopy( + bFreeDimsStatic, 0, abDimsStatic, aFreeDimsStatic.length, bFreeDimsStatic.length); + if (!abMatmul.shape().hasUnknownDimension() + && abMatmul.shape().equals(Shape.of(abDimsStatic))) { + return abMatmul; + } else { + return Reshape.create(scope, abMatmul, Constant.vectorOf(scope, abDimsStatic)); + } + } + + /** + * Computes log(sum(exp(elements across dimensions of a tensor))). Reduces {@code input_tensor} + * along the dimensions given in {@code axes}. + * + *

      Reduces `{@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} + * is true, the rank of the tensor is reduced by 1 for each of the entries in {@code axes}, which + * must be unique. If {@code keepdims} is true, the reduced dimensions are retained with length 1. + * If {@code axes} has no entries, all dimensions are reduced, and a tensor with a single element + * is returned. This function is more numerically stable than {@code log(sum(exp(input)))}. It + * avoids overflows caused by taking the exp of large inputs and underflows caused by taking the + * log of small inputs. + * + * @param input The tensor to reduce. + * @param axes The dimensions to reduce. If null, reduces all dimensions. Must be in the range + * {@link [-rank(input_tensor), rank(input_tensor)]}. + * @param keepDims If true, retains reduced dimensions with length 1. + * @return The reduced tensor. + */ + @Endpoint(name = "reduceLogSumExp") + public Operand reduceLogSumExp( + Operand input, int[] axes, boolean keepDims) { + Operand reduceDims = reductionDims(input, axes); + Operand rawMax = reduceMaxWithDims(input, axes, keepDims, reduceDims); + Operand myMax = + StopGradient.create( + scope, + Select.create( + scope, IsFinite.create(scope, rawMax), rawMax, ZerosLike.create(scope, rawMax))); + + Operand result = + Log.create( + scope, + reduceSumWithDims( + Exp.create(scope, Sub.create(scope, input, myMax)), axes, keepDims, reduceDims)); + + if (!keepDims) { + myMax = Reshape.create(scope, myMax, org.tensorflow.op.core.Shape.create(scope, result)); + } + result = Add.create(scope, result, myMax); + return mayReduceToScalar(keepDims, axes, result); + } + + private Operand reduceSumWithDims( + Operand input, int[] axes, boolean keepDims, Operand dims) { + return mayReduceToScalar( + keepDims, axes, ReduceSum.create(scope, input, dims, ReduceSum.keepDims(keepDims))); + } + + private Operand reduceMaxWithDims( + Operand input, int[] axes, boolean keepDims, Operand dims) { + return mayReduceToScalar( + keepDims, axes, ReduceMax.create(scope, input, dims, ReduceMax.keepDims(keepDims))); + } + + /** + * Sets a reduction's output shape to be a scalar if possible. + * + * @return the operand, possibly reduced to a scalar. + */ + private Operand mayReduceToScalar( + boolean keepDims, int[] axes, Operand output) { + + if ((output.shape().numDimensions() == Shape.UNKNOWN_SIZE + || output.shape().hasUnknownDimension()) + && !keepDims + && axes == null) { + return Reshape.create(scope, output, Constant.tensorOf(scope, Shape.scalar())); + } else { + return output; + } + } + + /** + * Reduce dimensions based on axis + * + * @param input the input + * @param axes he dimensions to reduce, may be null + * @return the dimensions to be reduced. + */ + private Operand reductionDims(Operand input, int[] axes) { + if (axes != null) { + return Constant.vectorOf(scope, axes); + } + long rank = input.shape().numDimensions(); + if (rank != Shape.UNKNOWN_SIZE) { + int[] dims = new int[(int) rank]; + for (int i = 0; i < rank; i++) { + dims[i] = i; + } + return Constant.vectorOf(scope, dims); + + } else { + return Range.create( + scope, + Constant.scalarOf(scope, 0), + Rank.create(scope, input), + Constant.scalarOf(scope, 1)); + } + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java index 326e3cdc2d1..dda5a7c6eaa 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java @@ -5,9 +5,12 @@ import org.tensorflow.framework.utils.TestSession; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt64; +import static org.junit.jupiter.api.Assertions.assertThrows; + class MathOpsTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -386,7 +389,7 @@ public void testL2Normalize() { Ops tf = session.getTF(); FrameworkOps fops = FrameworkOps.create(tf); Operand input = tf.constant(array); - Operand result = fops.math.l2Normalize(tf.constant(array), new int[]{ 0,1,2}); + Operand result = fops.math.l2Normalize(tf.constant(array), new int[] {0, 1, 2}); session.evaluate(tf.constant(expectedArray), result); } } @@ -410,4 +413,89 @@ public void testConfusionMatrix() { session.evaluate(tf.constant(expected), result); } } + + @Test + public void testTensorDotValid() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + int[] axes1 = new int[] {1, 2}; + int[][] axes2 = new int[][] {{1}, {2}}; + int[][] axes3 = new int[2][0]; + int axes4 = 0; + + Operand a = tf.ones(tf.constant(Shape.of(3, 3)), TFloat32.class); + Operand b = tf.constant(new float[][][] {{{2, 3, 1}}}); + + Operand ans = fops.math.tensordot(a, b, axes1); + Operand expected = tf.constant(new float[][][] {{{6}}, {{6}}, {{6}}}); + session.evaluate(expected, ans); + + ans = fops.math.tensordot(a, b, axes2); + expected = tf.constant(new float[][][] {{{6}}, {{6}}, {{6}}}); + session.evaluate(expected, ans); + + ans = fops.math.tensordot(a, b, axes3); + + float[][][][][] expectedArray = + new float[][][][][] { + {{{{2, 3, 1}}}, {{{2, 3, 1}}}, {{{2, 3, 1}}}}, + {{{{2, 3, 1}}}, {{{2, 3, 1}}}, {{{2, 3, 1}}}}, + {{{{2, 3, 1}}}, {{{2, 3, 1}}}, {{{2, 3, 1}}}} + }; + ans = fops.math.tensordot(a, b, axes3); + expected = tf.constant(expectedArray); + session.evaluate(expected, ans); + + ans = fops.math.tensordot(a, b, axes4); + expected = tf.constant(expectedArray); + session.evaluate(expected, ans); + } + } + + @Test + public void testTensorDotInValidAxis() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand a = tf.constant(new float[][] {{1, 2}, {3, 4}}); + Operand b = tf.constant(new float[][] {{1, 2}, {3, 4}}); + assertThrows(IllegalArgumentException.class, () -> fops.math.tensordot(a, b, -1)); + assertThrows(IllegalArgumentException.class, () -> fops.math.tensordot(a, b, 3)); + assertThrows( + IllegalArgumentException.class, () -> fops.math.tensordot(a, b, new int[] {1})); + assertThrows( + IllegalArgumentException.class, () -> fops.math.tensordot(a, b, new int[][] {{1}})); + assertThrows( + IllegalArgumentException.class, + () -> fops.math.tensordot(a, b, new int[][] {{1}, {0, 1}})); + + assertThrows( + ArrayIndexOutOfBoundsException.class, + () -> fops.math.tensordot(a, b, new int[][] {{0}, {7}})); + } + } + + @Test + public void testReduceLogSumExp() { + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand x = + tf.constant( + new float[][] { + {0.43346116f, 0.8569728f, 0.57155997f, 0.0743812f, 0.63846475f}, + {0.8165283f, 0.26554802f, 0.37025765f, 0.8255019f, 0.45682374f}, + {0.93511814f, 0.52291054f, 0.80983895f, 0.11580781f, 0.8111686f}, + {0.49967498f, 0.27537802f, 0.48554695f, 0.28238368f, 0.7989301f}, + {0.8958915f, 0.84870094f, 0.56874424f, 0.08818512f, 0.13915819f} + }); + + Operand result = fops.math.reduceLogSumExp(x, new int[] {0, 1}, false); + session.evaluate(3.7911222f, result); + } + } } From f1dbb01f9646f71a1e75524b278fe287a76ebc03 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:52:43 -0400 Subject: [PATCH 50/60] Added frameworkOps for nn and linalg --- .../java/org/tensorflow/framework/op/FrameworkOps.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index c8b234f2c51..d9e3eec4b21 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -31,8 +31,9 @@ public class FrameworkOps { public final Ops coreOps; public final NnOps nn; - public final SetsOps sets; + public final SetOps sets; public final MathOps math; + public final LinalgOps linalg; private final Scope scope; /** @@ -44,8 +45,9 @@ private FrameworkOps(Scope scope) { this.coreOps = Ops.create(scope.env()); this.scope = scope; nn = new NnOps(this); - sets = new SetsOps(this); + sets = new SetOps(this); math = new MathOps(this); + linalg = new LinalgOps(this); } /** @@ -57,8 +59,9 @@ private FrameworkOps(Ops coreOps) { this.coreOps = coreOps; this.scope = coreOps.scope(); nn = new NnOps(this); - sets = new SetsOps(this); + sets = new SetOps(this); math = new MathOps(this); + linalg = new LinalgOps(this); } /** From 6174a3256ea1a6b598adde9583884ea98048439b Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:53:15 -0400 Subject: [PATCH 51/60] Modified to use FrameworkOps --- .../main/java/org/tensorflow/framework/losses/Losses.java | 1 + .../java/org/tensorflow/framework/metrics/MeanIoU.java | 7 ++++--- .../tensorflow/framework/metrics/impl/MetricsHelper.java | 1 + .../framework/metrics/impl/WeightsBroadcastOps.java | 4 +++- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 398588cee67..6700f2569f0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -27,6 +27,7 @@ import org.tensorflow.op.math.Softplus; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; import static org.tensorflow.framework.utils.CastHelper.cast; diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java index 00ae3727249..06affc710a6 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanIoU.java @@ -21,7 +21,7 @@ import java.util.List; import org.tensorflow.Operand; import org.tensorflow.framework.initializers.Zeros; -import org.tensorflow.framework.metrics.impl.MetricsHelper; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; @@ -170,10 +170,11 @@ public List updateStateList( tSampleWeights = getTF().shape.flatten(tSampleWeights); } + FrameworkOps fops = FrameworkOps.create(getTF()); // Accumulate the prediction to current confusion matrix. Operand currentCM = - MetricsHelper.confusionMatrix( - getTF(), tLabels, tPredictions, getTF().constant(numClasses), tSampleWeights, type); + fops.math.confusionMatrix( + tLabels, tPredictions, tSampleWeights, getTF().constant(numClasses)); return Collections.singletonList(getTF().assignAdd(totalConfusionMatrix, currentCM)); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index 7572adc366a..61d1962184f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -32,6 +32,7 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; +; import org.tensorflow.op.core.Assign; import org.tensorflow.op.core.OneHot; import org.tensorflow.op.core.Rank; diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java index 2df90a841ee..ef548442735 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/WeightsBroadcastOps.java @@ -20,6 +20,7 @@ import java.util.Collections; import java.util.List; import org.tensorflow.Operand; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; @@ -149,13 +150,14 @@ private static Operand hasValidNonscalarShape( private static Operand hasValidDims( Ops tf, Operand weightsShape, Operand valuesShape) { tf = tf.withSubScope("hasInvalidDims"); + FrameworkOps fops = FrameworkOps.create(tf); Operand valuesShape2d = tf.expandDims(valuesShape, tf.constant(-1)); Operand validDims = tf.concat(Arrays.asList(valuesShape2d, tf.onesLike(valuesShape2d)), tf.constant(1)); Operand weightsShape2d = tf.expandDims(weightsShape, tf.constant(-1)); - Operand invalidDims = SetsOps.difference(tf, weightsShape2d, validDims); + Operand invalidDims = fops.sets.difference(weightsShape2d, validDims); Operand numInvalidDims = tf.size(invalidDims, TInt32.class); return tf.math.equal(tf.constant(0), numInvalidDims); } From 5523896925217a4609919d0f4085ee887996ef09 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 18:54:36 -0400 Subject: [PATCH 52/60] move nn.raw classes to nn in core, remove nn.raw --- .../src/gen/resources/ops.pb | Bin 1480980 -> 1462288 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb index 4c3e6bef038286ce7eb88eeb01798070a47682f6..fbcecceb5bd35e3681296ef57e58ae324733949a 100644 GIT binary patch delta 12648 zcmeHN3v^V~x$fEX%IrKyW->!EA+R%mBr%L30%FvPJmn2Z01cJQ$;_E#Waga7IcI=q zajHC`sI7>0OjHqdnMxweTYl<2rqBc3HMLON)HNJ(u zcoVudas0ypMD?Bs6cz2booOlU~J!wYLW5o zBWONCdz`d>FIqv5{0NoMw;xAAdSEY_N1u5BxzHaGJ$)a#p1L1FI(=&oT1XE(iY6ML zK8jv(q5EC*`6q@9T>B*2$p;F%Xy7UIT{dnsefueNHU0JzXg(k8z_-;H#rsj2fUd1J zYMw@MBwR|bJb>zi3#st1D@b3TgnabP188&+iKV!G2D+X_M%S~B@>aCpZS?)!Q6(E;R+eQ`)|j9Ko5QH=r!(hIIC6*H`D!{&TpbOT*i@3=QhFr4C?4j zYDCEpT@__5CF;`7E);B;yHQMMQW8<6n4-qDCL+l(B`(EDR!hn~7)(gkXu2rJMKw+q zwl7A9CU7r2{nQr4B7QBTS?xZZL{UoC*x)M`TxI3FjNU9bg9WbdX zY1xzxOAyoJBqnl)$)sB%30PlOh$#0HCBfWC6!=HQmavmEqRONLZD7k7W7j%E4}0!J z*fA%$esJ%)obC#j!_bOdkDTK<2v6sxvUJLfvM^mZ-xWl+Alg0OwRpwS*-AQ-)nPA6 z8#9V5$$C;ukyKKaM75Pf;bwh867?(y+(0IiDN&M4kaJ0M6N%dQqo#NplA08mr5Ys* zT*@T$%4lO##uXFjKl)rQ^g$(stJ91o)#WQPqL;ZEN}O~wo&ms2_hhWev>)kcNZS;j@0gCq6`JqO>3GF%W6k^c_VbO!6N|5WN1*g)sAL=;>MRrdXu#np zF&vK4^*4c+E~BL(LHH@Xc~|M#zH43BrS*y#$!Gov9j1qNm4=MJ>?++mTi8ec>Fv^a zHDj=|BN^XlLU0H*J6bU=YH^qO1!NSzQ~FW3_z1$DHc3p45DSl;S$55I;V*Rm^s)q^QA#pl+*J#x=I?#DeNPYQ3A3!&;`J`B&t?xYMctoiugvvn;r?E}pS)JXI zAya5X*+h!x zK<}QUrb{3bwoJe`tfiT`VZj=NColWboK%xQLzbj$8x@)evpjZ`Rk=~DD1f12R5u3? znIBwbrAZAREj@Tx%cfyt%aE?Y%8=W+k>M{(nUL$*1E{FDH;ftQmyKyb18(|oh4PuDxx>}i(Uyt%im^wG$01*8RURM4?zE)$DslJ?4^*iy zD`|R_Cscxq;1$TYnpEWp2XT>IC)j`xLcVW=1K67olc}zRsOdgux1yv%s9p$H;Zi-B zmOx{`kgl}mb0p=Q9zs>$;Q8cFRa_E44Hy3r{!qxvz_+c5} zlAY8iBT|I)Dw^KK_HAoMQS}D99%)-Xn{CMTk{ZDaOnc|-N~RZlk%ryP94nJ4L`q3% zC@Y%S9+63!l|8KDf+9vtrKA*5GcoWpw~J=R2E5IM0Vj%T4|rMDNw=zq@feuF!mp@l zR;Zj|_eEiRju|!@3BM`WCihvU5B=N7nwm2ZTVdF4Y!{E9+6ifv`|4D)BpTGsQtfx( zM$1w@`$yV+?sJIA;O*}IeSzDFU#;e1BoD(%El!!sRv9xDbjMQP#DgGkL2RLMp1OSt&~Dt?3)A@L_1cYH=nSK%gwebj%~Q3F42TJ`YL~SKt*{(Md9$ z29O7gPxUh}1-ywjK(E1shu~z&S~Esk?m@#*WhlrncnKd7qR{wQA~qMw$JaMQXq}|W z5ZD~Fq3^ZB3Nj0Kmzfe}kcfd8!)?}6*s&Lv#5ij|>=qfewW*kWRxI*C%{s1(ha7}^{SFY(opqqXoX0EW)sR| z0$p1)A7%B-77Wx+XL!z*nzgij5CTm%SW7mTV{M@2Lz$o!ZnCUEz!Ka&subkDwZ>R8 z!2Y0r+F4wgvqFdKwjTB@i~`jxRBD2C;6I+XhnOOdP7M)^=fN1s?AD>gN|1gfOM1oi z`F+fGeliA~byk*na}0K1DyEc}m`gF0TU*YYI;{z_gaM@*L?>7%A;J*YIS@;UWDi7E z9JUFR$IMI#C6!V(u(^0>gSCN{PwAB|PodFX?Zy za?^4~1T=s@p_vB!;7?3MvYxQ9oA9P{_|$Am$`Gkgwef0(?_XZGn+cJ*mWg0zuk%Ql zX*#HA7+ZdVCzJC6o?#0XHdpBYJ)?PVnOp>YZC#*}?tj8nx&jom`WO=BnlQNK;)3#K zPC18%Gqaaf<(l{HJOY!ON=AA7(Pv*rl{xMMt41gFSwJPI!(*)CPGnQy3r5K>2nDE8 zR13D49|pz2T$}>)MeW@*e5(LZ@(R_CHK2HM49$VS7^YDZH#Zm__JeJ1+cpHO80ICC zHQhAQ*l3q9jg3U&3Bod@)`X9CT)|AaHQ{g*%vbpTB~cK32`ig=Bw1pm5l9Gp3@3(S z9M^1AV39Ih_5#zGyH&#pRY?=5>$9+FEwF+*o%MhVRhGV(L7Cg4Cgs(|H#wApnI`U0 za1WGsI)_Hvhvu?{R;SOVo&Fry*hE8X0ae2V^F!P~DLyzqDB9S#bXmt-u-j@yUB?9B zK{x2$p*frdD64>NOoqk|C^;ai7=ebxSWeFZXT{|wO82FJFD$wNr=Sc4D{2a)LK}NU z83H~oL9YURFH0yeH;`N~23jAs^8itaKM8>8$j{|-~(ur z<0UY-%1p>?R+d#^7un#*r~*C$RImYnJm{BEQ8CiJ%)py)VE%~Y;Ey`dj$V4VRY;|3oc2Cf0 zRm`1sb%~!8u&Yh$Q}vyP${PmOmz5jq%PQK>K_8dU9g8ab#%~r>R15U)T2X*bTU61B z-tZc5zYz)Dv@BZ`GTcimYCX>Hx`5z%W@SY!eQU3;oOZ0NIL~>P)4Ep#eM%S;SvH>#P_v&~;IT(RGn; z<;Cbu4_&g)H<2xq5eDeueZB@)Jw6TQzG9#6TUR-E6ais%<#hiT`pdKZi|MlIewQ)6 z)qmL{^p2PQV5@&BdeBRMxz*o`9xpP^-{xP3ob5_lGVZ$0f3V&217K0xv)!CytsUMH zG7idY3yXX~J>eB&U`u6- z16}E$=WeO2p>GUSjuoD#pAJ;6F&152Ns4lxAUw8%^EQzG0DEV1Bn_vZRh9W0W=^Z}@SgCYFPx~{5hBAEi?DjCaJBHfA*=Zcmku>j@rdB3P5&uW z<(69SnL=3anHQlJ$d-0mF{9SX8CD4Zg)jtryo;%&McDF@)-z4bq3!_&AND^OL&>TT z%PgJeL(1OtdQO2HAL%R<&$=*Wz#k%9Q)pPW${Z<=KzSn`->P>XYnHjDeqhtuJstjhi=A1l=gxil}}~w4P3zKli}F)boMhMTKs{Tby|GaP^i$ z;z-8yqj!!Em&GB3r7ruxq)G?@F9Z+2J4l(0Fta*)~} z1+v1zwkA)Ak_bGgnUz^NZFYRTB8ari!{vMq`q*EEv52(O7mLx@wZOqHGf7c4k+q23 zO!OqzL+xwQsEnN+y3d9g6F-J98D<^&+Q-60{+EQKAI$tjsPsd&88$$o2WwCaNAe&; zr(9g=&zYFZprOr*LHq*3)x!tp;p`~ihiq^jg2kJUHwU=8##*999TXtHMtH%|9=E|i zG76ICOPlM7bz4K5lAu>H@L zw_8&tPo%YOz69((JQ_S7F*#`1_hxb$qOx)64VCW?2s7!@s=&D(#-Gpnj@#H#6^IlG zi|7Fom{tup8S*f+g$k6N8?(xE)^0@0>H^aOW}C}eRwy?CuHa-x!}OxMz*z#JH`E0J z0;Uht1_o9IYIz%)(zlhG_mIk80{P@M^A*(9%QtCF~*h{ip3)P_m||gb0pjn+#q$P^U@MF*eRjxEv_PWa>O{1>lDjds+)7G+=}o83r|KzNTOVBO_0~ za+V%Yhtc#rC&A}9r<|qFnKV7m9FUNKTRF8XeU7PbA0x2zb&6T~oJrFI^!?wk^f{BJ zw<)^FtT#sSv z`I@2sf5yF_ZhJ#few z%t4K09x%ar8Z@K%6H9)cV6mYi6;;+JJ_QkPmVD;-RiHlmXpTUlPBKo_L9Z1ySE2!6R0uo+CUJYzj}?e*9Vp$dSJH)(^szu zcxdVNz%F{)HfVvfTLUo~*&Y~6-`^UTNf+!0)X>S>0-McyPi+et_cq7gkX`Cs-pCtcSv@=-xTOTA^SKc5n6E z;6$NdGGpAl;8T-?@6l`~*g~Jl1n0xI@}9onLYVA8COF5~(-#a>pj$k2$CjYq`1MsV zr7(?twmEnjEn5;S7nalaNN^_Iv?b^h-Z5_25}aN5d=#2B_(C~NZVOf#PhS^YRq{Wt C04}Ql delta 17292 zcmd6O3w%`Nm3Z%+XTCfpbLWxCSM^G6?|@NFWKQK}~LEZjzD7%rJL` zBv2brE3H;Rv7XxXQBrED)vBnyb+@`;wOXs9b=9^Cg>~JZt-H3AN_|xQpYPr~k4yq$ zxBuPcM`q@JkMo`HeCPe0yX)JR+_Jy-fxWZx9%6~X9}KSdLA9Qgx+@6N!|4;_F|nB= z#lE5<8^h|Y&Jv^Hp(C??vzhtn?niI0b(dyF6_ElTF&cDvW{zQr^!`{d{aZF&vE$fnUrg2EFTrct{usi*@t8P4xsDg5p4OXebmJ@&<;*z{b8v zazu=W0#cpO$d8B;1+;k1oO(hAGyoGR2$7gKB18~Ke7u?P6!nI&wSMce9r;Pe5ch z`#AFvY`U4f+N2#c_`3^i@boS0GBd%Q(qoq$-(VM7_3!A31N?hg8;jic&VF{jm9r3o zG%UnKkFiJjhS=R~4fsAPD1!rg*;V>$b=nlmdS9{5T}jI0!e}%mc@;Ri^*#?A_&!?- z^B!i;F7^~@&^Q$+_`l1Rz~I6n2UOj|UJj1C*>hm!BkU|wuR1_pbvJt!tAB*zwf1h- z28Z{vE8$Cbw*_Ha^$lyxHwAGc-kNkjr75w|y^Wl|y+1c{;ee80Ueck}i-ovh) z#nq6aXgr9_85!zJL^p~Nm%YK#F|`Ty>|;H!_y_E=S)Ou|+so-fk%XIdeW8x|D#J_~ zpVh&2KVWb0c=%kt=_9uAlA@;^I-q)8NX1{vI``Wgjy+GNQEq7n!rgB+&ge_v&?QZ$% zAF)A}{VfA;|Cp^NKVke46w|~4AIvXsmcm^>X00&)QPwp<0%0oal9%s@CPd_JA;C*x zB0)7oVmKtJ!ow$`yugQr_>jn>h8hs#{D?5l$3-a_PEh&p>|D)@@pv>&f?`aJ1nG2C zCsbIZaVa5=@T1ZA#&A>!dObHXq%ags_9cen;*P#YS-I~~9ox*lYLWvdbS}p4sU^;! zkP!M3aUmiNit)Z!Jer6GqG8S`B`E*7S?&u82i;^K+)^CcEyg#80wVp1$V2r$L%F>F z&$=DVLf=1}64)sgp}?EZ1?EFTi6t%yL5Zix@PqN_2>j4lvvx*^xE&BQ)^Wz++s zCv=^494QJ!BNEl+eW6HD9CPJyR#J$h>r1su*i;Tn-qX3^*a_XYf}dm8?-*uY8x=|g zDbp0njL04Mvu?tV(w0I(MUKmP_EhGe%1y-_THe!r^#6&n%~d(b^iEy77yR$(O640) z>KZR%-i3c!t#4uPH_9Kb)?dp^d{aj#7o(-3oQwY(8Y0m|Lj&^ekQfo;LYR*wvG2&Cj&7xG5Pz3oJ$wD4nEV!^w-Hpd#E8?N)@PuD-vXmy)3b z^*a!YxVA%yno(R&HGvXt!IcqHBi-yyjXs;<7r~yY--NHmv%G`nKo@I!vPKJc(nmXavzmbop>7 zA_{SSFc}F@^X_f^j;;rFNBgRE&~%f@-NiTXwY9K#{CRv6UrUtU zu}o{rVx>9dk4fl?wY0QQ7bz0)is2;nzF^ZY><-xVOKQU|7u5c`8T&RSAR0LrK)7TwXXN zCF)R5*SlGwYPcPaDHA6H2F}d-c=)7ElUI0y$hrlan8Ha$G0@=^73g$uWw);flWJ&wWpPVAMP}M>4Bo}vV<%b z6P=2?DgpDati8oaxQssOi>h5N)1>kDO!i>hjEz{e&1Autc&o6htD$ZtAlwXqo143` zLKUh)s3G%e81?f)A|YVV5d`KjVpsQAzr@siIr~aJ9KQ`u;kG+XKW4WPc}}~5lg3B$v@!CJo;KN7r_Y`qqz$I-qTF)GlwjF^%!l)zGyTjx10=;6 z_~$&@UnPI>`O|}{(zBoFpzmeVLgq>Nn=hLp43Jk$)okk*<+-nzUU+2ob~9;Qg|14^ znsx2y>jbGcvn-DKR*VG3+oePZ)q6A!^S)~?D@Lo14pgFVIF#r^KPLv;uQiWvl#OQs zVT}+MM#O{|m+-{TA`7$=|LmnR6Fnjv{Ia=}SqTqpGV`{<6qdZ0Msw}e=F*EIIcRcI zv37T?r?DI#9YiNDYd09lqH)uro|%9jyUqOVQZL0A9VCrHnYHEWhs_rk3VmNCq6K=K-nt>Tgkw>ULulGrYG7=v(XeCAoB8sbHw!l3g1*3=qlIpU{ka3~8ZC6fi(`eJ@*Re$ zi4FHjJ@xj2q7;h5o@S#H{bT89`iG8zcvSsLk|nU<>Xp<5CUiJNDf zAX@{LI!6C|9%(rx8PPJhV99kB>vZ!FyV-J!{#DayCj0lBD~(wcoavl4g)F7anGtW| ze~fsje4}v;Ge^7)*D=H~6c>UaF#=?(c{bAvd)8UzouQeiXg}=%ps6^Qk>3tm#_E}q zAlz!12k!4#c=`TYE%#NDA!6zZNdfYM`uraLo?r> zStS_k=YvstygP^XbIX8tji0G12PuDNqF^R=W{ zvzxd$wtX$fcC>Tsa{AZ9v8(WV6;z9iYbv@Kcr&7#>86KJ>iukVZ(&HjO6f%4)?Msb zHIv4!Oba@dxDMXPWRWEifWR(xo&)zwj$uwvnH@)0GJvC@U>)1Je0y#)0lNA^{YFl1IZCXHYYYoy!ht|{W2^67D9BhO`T#64vT zT=sro&)JM^!kLYJW+y%bsq8`BLe_5|F0D{U`J_*j44m@WtDw`Hg+OoiisVnM##NY( zL?R*EVyZ=FgMIt-m6Ub#aFan-0-h&~^PuB6TT}If#>%+y&ADudO<=vQzyn9VuP=jL z*B9843H^`edEnTl0*^Z-6vn|&7{k$&l5dMnRr2_ZfU}i69YNXKv_)0paP(t+Io0sa zQoq`=G(?ffo&$$=8f)B|lKYg=hD8$`sz1TQ^uWQdawV|blIMflmouuERNRmmmjiAo z>is=$T18h{dkVeGKxzx^W3efA3ZHZ4(-CwF`S)u}EE`M!GtaoZ^l}w7e9oBy+a1g- z5H^|Ic8zjmgyc~PT^5dFVgLX6lwf(nK~Hwr_GKF zx46ip@w_S1@j4)9y0##8c#vbpIA*K?zqfGA7OLCBs#1ZHXUvuLlg4)SQJd3A4#jk*vukl7G=2earYqpOGe45SK; z{bQQMuMu*cPk8zray1nHk<^$ml2$x~-CKXNW>zgJh()8A3BfDyY_AbRQS=(uFGmm2 ze=$@)VQku6`MRpQsvg#4aScvHY_nyLHo>>5toq8vLOVC_A0#X8jDSgxZG6&pwdm&ebe6IMu zY9RwjDX=b42vY@CsVOk}oI^y~1dOSm2AYl(Rzy;oY#-*|>1MjbU@{qPe>d*SQ{AEzzHX&7VrBYp1LzBIp0JN_;acs)^q zEoVwVyTp;4)5jJ?KvN8Wj;7+n!wu&YEy$9W`5zTj)?TA^c8N-53xdja=haqab4Ak+ zxk9O0sY=z`q`dEcPlLo8S4o7|?zXgLh*@)`icJ@$3b1$Dg0%Hi)I6s+l`f)5BbqKc z1--t^&!}y8fQd>$awBV&XG!#)Rb%hqZ&<2E)t(~w5tnb-MVF`Mh=1(-7pylJ@ zmMjYO`79-slST}r6c4C7j62Ff2!6ZV=7f#+*ozC%1y8Ugh=;qjm7oMqYxXmW~2tcY4S-YSXkU_n7S@XgkI zuspzdYBy)~ZOLVPWyY4w0p~YK6vh^@FBlq;xPt5_dX1bqc}+8PCkLk6ZfwMTXZvY% z7`5Fg)o@S?x8TdaXV+9+q54HVv_OZJQ{d!mzX(43J=?}ntD=vI@ey(-OKgf^Ie!sz zX~wIcje@1L_h89FWGIPw^X_PPGlhn^jI;rP!e(ZB_(hGu*3sGsPIIi>nq%czJv|-L zOctU|Ze7Lc+uB!P7N@PVo71&lyoNI@Th-aw#pze9_M^dG)rlV+?aMiRSGykvcduC9 zp`>>%?oc^eSrAdm7BQtQ<)cymk@{$;rBkUKF7+d=byh|PWhy<hLI`XCT@W{0_XTRFMBPK zA1kmnu-@`1Q?tNO06x^X3IOHP&AQ~bimc5h=4RlX))98E9;%(zs={K%J)6{G83v|c zy>OG$S^-Z<+(Nma#JZPZ_QHcMYa1iLNtgAa9a~GS^43zW{}N^r-iULzvez5oaGaY} zfIu2ag9=36BXO5r6}TBo8@m&jjm9*Jq`DkTiI6`f1C?0v2#-`n~Ms}&`|pWo-& zf%}9jvty+my(^Idcz2A2LPPY57A}3CtA(BqxX0KV%@8}uRZ~P($l;URmsXk&SxG^6 zB8p+|f95ypCylpRfp4`9v-^tW>soCa*@^@F#5^qQV_QZ>TIh!!-_iXR4DtY6jROwv->=j z9HF73iz$VrM7se=U_DzhjP(cfOldtEp2TQ8ijtvd!g}P^Z~$$KS~eaD@(EhAhr{p# z%iRSsfoc^GHj+>z3#*Lqq?H{=!wO)^=5$#+aSRhq!R-oTobCiAl)%0e%5~XCIzi7= zJ-WZ&p?=1hc1K6B+5<_2k}rR>)b?t(>Dw&PrJRGxQ`Y$5YZ7>0Uv!&o-l~Z^48(>t zGK%XT52jvikTBNLZ^rsTtPf{8Fj-s_5l6M6$i9u@xXZ-ZNii)R6KKf{s%*4HahKgg z2o9$KrdA&QxPC_Ps7cY6!>sazcZS7jU_}?_luue&K6Q02=NaR@X}ECHTBsTCz|ynk z^ExTlBj?0LX<``5H<2eIbAuwSKIRb}=^~VvkyuC*31vHK zc`Ob_0nLD$myDz#qrfBgj-bGVscd1v>x3%!BUlm3YrS56Dz%BEQ9#8&x!_Mxqmjp? z0j)=!!Uu|8MHB+bc$}8DsvAnHP>s0WUOe8~xYV0-n!VFbPCoA~8{fm+3zsdjA7h@C z&u_N-bh^K?1a4exFNGTx+pCy^aNlD42Dz}sUS^h$cG~}K2Xh|$^fmi=y4GuqFzc9o zg}m}t_TQP9v*4_w_OnV`52$t-$t+M((t2G*yTH}h?Lfx}?L`PHh*2YqI0H;@RA^H6rozQ{#zmn#*b&@)h!7f*vFxct}lg$$9ud(3_zVSWyO ze8)cLv^5S*|7q_u$varb9qX9`FzXV>KQepegO@m*EPD-+f4#x6nk^nvO+!lc)xn`v zjxt|88E@nwPrSNTVO@+|YCx{B@P%JkVX@sDMM4 zJ52CWx1)|31OKI*4UPr!T=3$Z4g)+;jRmUibq<0rJ)QXqiz)nxK1Vl&T1DpftFKg% zzZ>}ZmqE)qhhP3lzeAs}JZK?#7l#FUf3n`kRY36z4wp&4YFO&wDxvWON2x*Ig%5Ch znR6Gs_JZSZ9iL7dIZ@j1O$D@OeOd}uNgsJ?2nDd4Nmd#0Cr$?|SG)*5KIHgjxu=7i zVj8ChfmXp(<~dEfw_jGmG{BLU9gE=5i;nf>ADXhzL{B(B>rrlb$?>m@8+D+9w(?0s z1&TKYIJo?Foy7%BFFWq@`c|BB`5Dm4AS54t*@4OX8_mRw)WYi*Bvu);yzX$WR97t{ zy7;if6;Uqkx}aMtF!v}(xc?TCZGYWy|L33ZgCA0fWS-#XZ#Y&~oSH^G zz8;wWvSSV`J?yyhZzt2i!w!#wy*?k_`jg|uc}`+RZ5*TqFqM*^FPh+$rWrF8n{(UY z;S-KU%uDjSCmdC}1+J{gX)ts;UOi@}r+?8Nx!^C3yYktG3gJH1c{#TS(+x2}sbuX# zt;FqMV0*C>U)9n%T{T{Qby7+Y^yn3JhL%B`i?xNQgf+*ApTGO2qqeiKz7tFJ(Z#~B zeYjP8Ue7ZOylZgIV*jV<_x0a7R>)sBI;A;nU?6T( zBi&+R>3BpK2?drS<3;GZDc$rsM7j7*=K?XUH=?~yrK9x-)H=F!u=X0eYocNCYGEVg zVCpf10z3{?;UJeH%L@Eh7~iZx8x}+dB&e>H8RYeSjiFI0{nwM@eoO&Imqh0$9L4|` zO}`S3P_Pr|%TRwtai6d!5>#Sa3NMN|FxsU~&^#dx0R^1u-DDv%Y5W6wIPVoi@r|56m|lU`@9m3c(2TH>6~Fmqt7-#NS3 z>kD^_BN#*CtJ^eSg+8;h6Q1!qIp!Vtb-%MMGqUv*Ba7kgHm7~!@d9E&fml!D2)rFo zGU989QBk!L*XGAprKAK^zWrI{#UYIOacR`fh;$4ibaXH=FDqfrV;uQv5M2}MOw&6{ z$?L{Z80XjRii-DJf|=UUYE{&0cJdv0w0NMPGh$FJp*q= zDyW>^uVCKqg-<`QxN1Cy@q&y-A6UMO=^m}#=NZON{b;ojm_ny*f|?m$LkH%$XgM`P zLncv^KU6adgytpuw8jsbJ=X$UNs<_6H2S^rB-49G>Saf+uDb+9ojPK;I&ilCc=w04|A95T*@A zGPH>|rp_rEQQ{|(UYJ&m$3uAVc*2v$e=<(vB2*f98JWXco^@%~f{PEDZ0G*XT;Yw^7FVHN`@hCd{hx!Md<(Mq iDZ^T)XR14!W@4(#{(el=tLdQLjOLIZZF62!@c#f%a}X#1 From b750dd2101cf2fb68b27740ece86c59d3ab8aa5e Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 26 Mar 2021 17:54:15 -0400 Subject: [PATCH 53/60] Moved high level tf.nn ops to framework. Moved tf.raw.nn Ops to tf.nn. Changed generation to generate SoftmaxCrossEntropyWithLogits and SparseSoftmaxCrossEntropyWithLogits to core NNOps (tf.nn). --- .../annotations/org/tensorflow/op/NnOps.java | 1518 +++++++++-------- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 57 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 62 +- .../op/nn/SigmoidCrossEntropyWithLogits.java | 15 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 89 +- 5 files changed, 859 insertions(+), 882 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 2bd4d13145f..0269d387859 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -113,16 +113,16 @@ public final class NnOps { /** * Performs average pooling on the input. - * Each entry in {@code output} is the mean of the corresponding size {@code ksize} - * window in {@code value}. - * - * @param data type for {@code output} output - * @param value 4-D with shape {@code [batch, height, width, channels]}. - * @param ksize The size of the sliding window for each dimension of {@code value}. - * @param strides The stride of the sliding window for each dimension of {@code value}. + *

      + * Each entry in `output` is the mean of the corresponding size `ksize` + * window in `value`. + * + * @param data type for {@code output()} output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param ksize The size of the sliding window for each dimension of `value`. + * @param strides The stride of the sliding window for each dimension of `value`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code AvgPool} output and operands + * @param options carries optional attributes values * @return a new instance of AvgPool */ public AvgPool avgPool(Operand value, List ksize, @@ -132,18 +132,18 @@ public AvgPool avgPool(Operand value, List ksize /** * Performs 3D average pooling on the input. - * Each entry in {@code output} is the mean of the corresponding size {@code ksize} window in - * {@code value}. + *

      + * Each entry in `output` is the mean of the corresponding size `ksize` window in + * `value`. * - * @param data type for {@code output} output - * @param input Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. + * @param data type for {@code output()} output + * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code AvgPool3D} output and operands + * @param options carries optional attributes values * @return a new instance of AvgPool3d */ public AvgPool3d avgPool3d(Operand input, List ksize, @@ -154,16 +154,15 @@ public AvgPool3d avgPool3d(Operand input, List k /** * Computes gradients of average pooling function. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param origInputShape The original input dimensions. - * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code AvgPool3DGrad} output and operands + * @param options carries optional attributes values * @return a new instance of AvgPool3dGrad */ public AvgPool3dGrad avgPool3dGrad(Operand origInputShape, @@ -174,9 +173,10 @@ public AvgPool3dGrad avgPool3dGrad(Operand origIn /** * Batch normalization. - * This op is deprecated. Prefer {@code tf.nn.batch_normalization}. + *

      + * This op is deprecated. Prefer `tf.nn.batch_normalization`. * - * @param data type for {@code result} output + * @param data type for {@code result()} output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -187,12 +187,11 @@ public AvgPool3dGrad avgPool3dGrad(Operand origIn * @param beta A 1D beta Tensor with size matching the last dimension of t. * An offset to be added to the normalized tensor. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this tensor will be multiplied + * If "scale_after_normalization" is true, this tensor will be multiplied * with the normalized tensor. * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. - * @param data type for {@code BatchNormWithGlobalNormalization} output and operands * @return a new instance of BatchNormWithGlobalNormalization */ public BatchNormWithGlobalNormalization batchNormWithGlobalNormalization( @@ -203,9 +202,10 @@ public BatchNormWithGlobalNormalization batchNormWithGlobal /** * Gradients for batch normalization. - * This op is deprecated. See {@code tf.nn.batch_normalization}. + *

      + * This op is deprecated. See `tf.nn.batch_normalization`. * - * @param data type for {@code dx} output + * @param data type for {@code dx()} output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -214,13 +214,12 @@ public BatchNormWithGlobalNormalization batchNormWithGlobal * This is the second output from tf.nn.moments, * or a saved moving average thereof. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this Tensor will be multiplied + * If "scale_after_normalization" is true, this Tensor will be multiplied * with the normalized Tensor. * @param backprop 4D backprop Tensor. * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. - * @param data type for {@code BatchNormWithGlobalNormalizationGrad} output and operands * @return a new instance of BatchNormWithGlobalNormalizationGrad */ public BatchNormWithGlobalNormalizationGrad batchNormWithGlobalNormalizationGrad( @@ -230,15 +229,15 @@ public BatchNormWithGlobalNormalizationGrad batchNormWithGl } /** - * Adds {@code bias} to {@code value}. - * This is a special case of {@code tf.add} where {@code bias} is restricted to be 1-D. - * Broadcasting is supported, so {@code value} may have any number of dimensions. + * Adds `bias` to `value`. + *

      + * This is a special case of `tf.add` where `bias` is restricted to be 1-D. + * Broadcasting is supported, so `value` may have any number of dimensions. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param value Any number of dimensions. - * @param bias 1-D with size the last dimension of {@code value}. - * @param options carries optional attribute values - * @param data type for {@code BiasAdd} output and operands + * @param bias 1-D with size the last dimension of `value`. + * @param options carries optional attributes values * @return a new instance of BiasAdd */ public BiasAdd biasAdd(Operand value, Operand bias, @@ -247,15 +246,15 @@ public BiasAdd biasAdd(Operand value, Operand bias, } /** - * The backward operation for "BiasAdd" on the "bias" tensor. + * The backward operation for "BiasAdd" on the "bias" tensor. + *

      * It accumulates all the values from out_backprop into the feature dimension. * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param outBackprop Any number of dimensions. - * @param options carries optional attribute values - * @param data type for {@code BiasAddGrad} output and operands + * @param options carries optional attributes values * @return a new instance of BiasAddGrad */ public BiasAddGrad biasAddGrad(Operand outBackprop, @@ -265,6 +264,7 @@ public BiasAddGrad biasAddGrad(Operand outBackprop, /** * Computes the ids of the positions in sampled_candidates that match true_labels. + *

      * When doing log-odds NCE, the result of this op should be passed through a * SparseToDense op, then added to the logits of the sampled candidates. This has * the effect of 'removing' the sampled labels that match the true labels by @@ -273,7 +273,7 @@ public BiasAddGrad biasAddGrad(Operand outBackprop, * @param trueClasses The true_classes output of UnpackSparseLabels. * @param sampledCandidates The sampled_candidates output of CandidateSampler. * @param numTrue Number of true labels per context. - * @param options carries optional attribute values + * @param options carries optional attributes values * @return a new instance of ComputeAccidentalHits */ public ComputeAccidentalHits computeAccidentalHits(Operand trueClasses, @@ -282,39 +282,40 @@ public ComputeAccidentalHits computeAccidentalHits(Operand trueClasses, } /** - * Computes a 2-D convolution given 4-D {@code input} and {@code filter} tensors. - * Given an input tensor of shape {@code [batch, in_height, in_width, in_channels]} + * Computes a 2-D convolution given 4-D `input` and `filter` tensors. + *

      + * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` * and a filter / kernel tensor of shape - * {@code [filter_height, filter_width, in_channels, out_channels]}, this op + * `[filter_height, filter_width, in_channels, out_channels]`, this op * performs the following: - *

        - *
      1. Flattens the filter to a 2-D matrix with shape - * {@code [filter_height * filter_width * in_channels, output_channels]}.
      2. - *
      3. Extracts image patches from the input tensor to form a virtual - * tensor of shape {@code [batch, out_height, out_width, filter_height * filter_width * in_channels]}.
      4. - *
      5. For each patch, right-multiplies the filter matrix and the image patch - * vector.
      6. - *
      - *

      In detail, with the default NHWC format, - *

      -   *  output[b, i, j, k] =
      -   *      sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
      -   *                      filter[di, dj, q, k]
      -   *  
      - *

      Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same - * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. - * - * @param data type for {@code output} output + *

      + * 1. Flattens the filter to a 2-D matrix with shape + * `[filter_height * filter_width * in_channels, output_channels]`. + * 2. Extracts image patches from the input tensor to form a virtual + * tensor of shape `[batch, out_height, out_width, + * filter_height * filter_width * in_channels]`. + * 3. For each patch, right-multiplies the filter matrix and the image patch + * vector. + *

      + * In detail, with the default NHWC format, + *

      + * output[b, i, j, k] = + * sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + * filter[di, dj, q, k] + *

      + * Must have `strides[0] = strides[3] = 1`. For the most common case of the same + * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + * + * @param data type for {@code output()} output * @param input A 4-D tensor. The dimension order is interpreted according to the value - * of {@code data_format}, see below for details. + * of `data_format`, see below for details. * @param filter A 4-D tensor of shape - * {@code [filter_height, filter_width, in_channels, out_channels]} + * `[filter_height, filter_width, in_channels, out_channels]` * @param strides 1-D tensor of length 4. The stride of the sliding window for each - * dimension of {@code input}. The dimension order is determined by the value of - * {@code data_format}, see below for details. + * dimension of `input`. The dimension order is determined by the value of + * `data_format`, see below for details. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code Conv2D} output and operands + * @param options carries optional attributes values * @return a new instance of Conv2d */ public Conv2d conv2d(Operand input, Operand filter, @@ -325,19 +326,18 @@ public Conv2d conv2d(Operand input, Operand filter, /** * Computes the gradients of convolution with respect to the filter. * - * @param data type for {@code output} output - * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. - * @param filterSizes An integer vector representing the tensor shape of {@code filter}, - * where {@code filter} is a 4-D - * {@code [filter_height, filter_width, in_channels, out_channels]} tensor. - * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, out_channels]}. + * @param data type for {@code output()} output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 4-D + * `[filter_height, filter_width, in_channels, out_channels]` tensor. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code Conv2DBackpropFilter} output and operands + * @param options carries optional attributes values * @return a new instance of Conv2dBackpropFilter */ public Conv2dBackpropFilter conv2dBackpropFilter(Operand input, @@ -349,19 +349,18 @@ public Conv2dBackpropFilter conv2dBackpropFilter(Operand< /** * Computes the gradients of convolution with respect to the input. * - * @param data type for {@code output} output - * @param inputSizes An integer vector representing the shape of {@code input}, - * where {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. + * @param data type for {@code output()} output + * @param inputSizes An integer vector representing the shape of `input`, + * where `input` is a 4-D `[batch, height, width, channels]` tensor. * @param filter 4-D with shape - * {@code [filter_height, filter_width, in_channels, out_channels]}. - * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, out_channels]}. + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code Conv2DBackpropInput} output and operands + * @param options carries optional attributes values * @return a new instance of Conv2dBackpropInput */ public Conv2dBackpropInput conv2dBackpropInput(Operand inputSizes, @@ -371,20 +370,22 @@ public Conv2dBackpropInput conv2dBackpropInput(Operand * In signal processing, cross-correlation is a measure of similarity of * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. - *

      Our Conv3D implements a form of cross-correlation. + *

      + * Our Conv3D implements a form of cross-correlation. * - * @param data type for {@code output} output - * @param input Shape {@code [batch, in_depth, in_height, in_width, in_channels]}. - * @param filter Shape {@code [filter_depth, filter_height, filter_width, in_channels, out_channels]}. {@code in_channels} must match between {@code input} and {@code filter}. + * @param data type for {@code output()} output + * @param input Shape `[batch, in_depth, in_height, in_width, in_channels]`. + * @param filter Shape `[filter_depth, filter_height, filter_width, in_channels, + * out_channels]`. `in_channels` must match between `input` and `filter`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code Conv3D} output and operands + * @param options carries optional attributes values * @return a new instance of Conv3d */ public Conv3d conv3d(Operand input, Operand filter, @@ -395,18 +396,18 @@ public Conv3d conv3d(Operand input, Operand filter, /** * Computes the gradients of 3-D convolution with respect to the filter. * - * @param data type for {@code output} output - * @param input Shape {@code [batch, depth, rows, cols, in_channels]}. - * @param filterSizes An integer vector representing the tensor shape of {@code filter}, - * where {@code filter} is a 5-D - * {@code [filter_depth, filter_height, filter_width, in_channels, out_channels]} + * @param data type for {@code output()} output + * @param input Shape `[batch, depth, rows, cols, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 5-D + * `[filter_depth, filter_height, filter_width, in_channels, out_channels]` * tensor. - * @param outBackprop Backprop signal of shape {@code [batch, out_depth, out_rows, out_cols, out_channels]}. + * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + * out_channels]`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code Conv3DBackpropFilterV2} output and operands + * @param options carries optional attributes values * @return a new instance of Conv3dBackpropFilter */ public Conv3dBackpropFilter conv3dBackpropFilter(Operand input, @@ -418,18 +419,18 @@ public Conv3dBackpropFilter conv3dBackpropFilter(Operand< /** * Computes the gradients of 3-D convolution with respect to the input. * - * @param data type for {@code output} output - * @param inputSizes An integer vector representing the tensor shape of {@code input}, - * where {@code input} is a 5-D - * {@code [batch, depth, rows, cols, in_channels]} tensor. - * @param filter Shape {@code [depth, rows, cols, in_channels, out_channels]}. - * {@code in_channels} must match between {@code input} and {@code filter}. - * @param outBackprop Backprop signal of shape {@code [batch, out_depth, out_rows, out_cols, out_channels]}. + * @param data type for {@code output()} output + * @param inputSizes An integer vector representing the tensor shape of `input`, + * where `input` is a 5-D + * `[batch, depth, rows, cols, in_channels]` tensor. + * @param filter Shape `[depth, rows, cols, in_channels, out_channels]`. + * `in_channels` must match between `input` and `filter`. + * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + * out_channels]`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code Conv3DBackpropInputV2} output and operands + * @param options carries optional attributes values * @return a new instance of Conv3dBackpropInput */ public Conv3dBackpropInput conv3dBackpropInput( @@ -440,19 +441,19 @@ public Conv3dBackpropInput conv3dBackpropInput( /** * Performs beam search decoding on the logits given in input. + *

      * A note about the attribute merge_repeated: For the beam search decoder, * this means that if consecutive entries in a beam are the same, only - * the first of these is emitted. That is, when the top path is "A B B B B", - * "A B" is returned if merge_repeated = True but "A B B B B" is + * the first of these is emitted. That is, when the top path is "A B B B B", + * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. * - * @param data type for {@code log_probability} output - * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. - * @param sequenceLength A vector containing sequence lengths, size {@code (batch)}. - * @param beamWidth A scalar >= 0 (beam search beam width). - * @param topPaths A scalar >= 0, <= beam_width (controls output size). - * @param options carries optional attribute values - * @param data type for {@code CTCBeamSearchDecoder} output and operands + * @param data type for {@code logProbability()} output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size `(batch)`. + * @param beamWidth A scalar >= 0 (beam search beam width). + * @param topPaths A scalar >= 0, <= beam_width (controls output size). + * @param options carries optional attributes values * @return a new instance of CtcBeamSearchDecoder */ public CtcBeamSearchDecoder ctcBeamSearchDecoder(Operand inputs, @@ -463,20 +464,21 @@ public CtcBeamSearchDecoder ctcBeamSearchDecoder(Operand< /** * Performs greedy decoding on the logits given in inputs. + *

      * A note about the attribute merge_repeated: if enabled, when * consecutive logits' maximum indices are the same, only the first of - * these is emitted. Labeling the blank '*', the sequence "A B B * B B" - * becomes "A B B" if merge_repeated = True and "A B B B B" if + * these is emitted. Labeling the blank '*', the sequence "A B B * B B" + * becomes "A B B" if merge_repeated = True and "A B B B B" if * merge_repeated = False. - *

      Regardless of the value of merge_repeated, if the maximum index of a given - * time and batch corresponds to the blank, index {@code (num_classes - 1)}, no new + *

      + * Regardless of the value of merge_repeated, if the maximum index of a given + * time and batch corresponds to the blank, index `(num_classes - 1)`, no new * element is emitted. * - * @param data type for {@code log_probability} output - * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. - * @param sequenceLength A vector containing sequence lengths, size {@code (batch_size)}. - * @param options carries optional attribute values - * @param data type for {@code CTCGreedyDecoder} output and operands + * @param data type for {@code logProbability()} output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size `(batch_size)`. + * @param options carries optional attributes values * @return a new instance of CtcGreedyDecoder */ public CtcGreedyDecoder ctcGreedyDecoder(Operand inputs, @@ -486,18 +488,18 @@ public CtcGreedyDecoder ctcGreedyDecoder(Operand input /** * Calculates the CTC Loss (log probability) for each batch entry. Also calculates + *

      * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. * - * @param data type for {@code loss} output - * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. - * @param labelsIndices The indices of a {@code SparseTensor}. - * {@code labels_indices(i, :) == [b, t]} means {@code labels_values(i)} stores the id for - * {@code (batch b, time t)}. + * @param data type for {@code loss()} output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param labelsIndices The indices of a `SparseTensor`. + * `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for + * `(batch b, time t)`. * @param labelsValues The values (labels) associated with the given batch and time. * @param sequenceLength A vector containing sequence lengths (batch). - * @param options carries optional attribute values - * @param data type for {@code CTCLoss} output and operands + * @param options carries optional attributes values * @return a new instance of CtcLoss */ public CtcLoss ctcLoss(Operand inputs, Operand labelsIndices, @@ -507,43 +509,45 @@ public CtcLoss ctcLoss(Operand inputs, Operand /** * Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM. + *

      * Writes a set of weights into the opaque params buffer so they can be used in * upcoming training or inferences. - *

      Note that the params buffer may not be compatible across different GPUs. So any + *

      + * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - *

      num_layers: Specifies the number of layers in the RNN model. + *

      + * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * weights: the canonical form of weights that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * biases: the canonical form of biases that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * num_params_weights: number of weight parameter matrix for all layers. * num_params_biases: number of bias parameter vector for all layers. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, - * no projection is performed. - * - * @param data type for {@code params} output - * @param numLayers the numLayers value - * @param numUnits the numUnits value - * @param inputSize the inputSize value - * @param weights the weights value - * @param biases the biases value - * @param options carries optional attribute values - * @param data type for {@code CudnnRNNCanonicalToParamsV2} output and operands + * no projection is performed. + * + * @param data type for {@code params()} output + * @param numLayers + * @param numUnits + * @param inputSize + * @param weights + * @param biases + * @param options carries optional attributes values * @return a new instance of CudnnRNNCanonicalToParams */ public CudnnRNNCanonicalToParams cudnnRNNCanonicalToParams( @@ -555,44 +559,46 @@ public CudnnRNNCanonicalToParams cudnnRNNCanonicalToParam /** * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. + *

      * Retrieves a set of weights from the opaque params buffer that can be saved and * restored in a way compatible with future runs. - *

      Note that the params buffer may not be compatible across different GPUs. So any + *

      + * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - *

      num_layers: Specifies the number of layers in the RNN model. + *

      + * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * num_params_weights: number of weight parameter matrix for all layers. * num_params_biases: number of bias parameter vector for all layers. * weights: the canonical form of weights that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * biases: the canonical form of biases that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, - * no projection is performed. - * - * @param data type for {@code weights} output - * @param numLayers the numLayers value - * @param numUnits the numUnits value - * @param inputSize the inputSize value - * @param params the params value - * @param numParamsWeights the value of the numParamsWeights property - * @param numParamsBiases the value of the numParamsBiases property - * @param options carries optional attribute values - * @param data type for {@code CudnnRNNParamsToCanonicalV2} output and operands + * no projection is performed. + * + * @param data type for {@code weights()} output + * @param numLayers + * @param numUnits + * @param inputSize + * @param params + * @param numParamsWeights + * @param numParamsBiases + * @param options carries optional attributes values * @return a new instance of CudnnRNNParamsToCanonical */ public CudnnRNNParamsToCanonical cudnnRNNParamsToCanonical( @@ -604,53 +610,53 @@ public CudnnRNNParamsToCanonical cudnnRNNParamsToCanonica /** * Computes size of weights that can be used by a Cudnn RNN model. + *

      * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - *

      num_layers: Specifies the number of layers in the RNN model. + *

      + * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * params_size: The size of the params buffer that should be allocated and - * initialized for this RNN model. Note that this params buffer may not be - * compatible across GPUs. Please use CudnnRNNParamsWeights and - * CudnnRNNParamsBiases to save and restore them in a way that is compatible - * across different runs. - * - * @param data type for {@code params_size} output - * @param numLayers the numLayers value - * @param numUnits the numUnits value - * @param inputSize the inputSize value - * @param T the value of the T property - * @param S the value of the S property - * @param options carries optional attribute values - * @param data type for {@code CudnnRNNParamsSize} output and operands - * @param data type for {@code CudnnRNNParamsSize} output and operands + * initialized for this RNN model. Note that this params buffer may not be + * compatible across GPUs. Please use CudnnRNNParamsWeights and + * CudnnRNNParamsBiases to save and restore them in a way that is compatible + * across different runs. + * + * @param data type for {@code paramsSize()} output + * @param numLayers + * @param numUnits + * @param inputSize + * @param T + * @param S + * @param options carries optional attributes values * @return a new instance of CudnnRnnParamsSize */ - public CudnnRnnParamsSize cudnnRnnParamsSize( - Operand numLayers, Operand numUnits, Operand inputSize, Class T, - Class S, CudnnRnnParamsSize.Options... options) { + public CudnnRnnParamsSize cudnnRnnParamsSize( + Operand numLayers, Operand numUnits, Operand inputSize, Class T, + Class S, CudnnRnnParamsSize.Options... options) { return CudnnRnnParamsSize.create(scope, numLayers, numUnits, inputSize, T, S, options); } /** * Returns the dimension index in the destination data format given the one in + *

      * the source data format. * - * @param data type for {@code y} output + * @param data type for {@code y()} output * @param x A Tensor with each element as a dimension index in source data format. * Must be in the range [-4, 4). - * @param options carries optional attribute values - * @param data type for {@code DataFormatDimMap} output and operands + * @param options carries optional attributes values * @return a new instance of DataFormatDimMap */ public DataFormatDimMap dataFormatDimMap(Operand x, @@ -659,31 +665,32 @@ public DataFormatDimMap dataFormatDimMap(Operand x, } /** - * Permute input tensor from {@code src_format} to {@code dst_format}. + * Permute input tensor from `src_format` to `dst_format`. + *

      * Input tensor must be a vector of size 4, or a 4x2 tensor. - *

      For example, with {@code src_format} of {@code NHWC}, {@code dst_format} of {@code NCHW}, and inputs: - *

      +   *  

      + * For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs: + *

      {@code
          *  [1, 2, 3, 4]
      -   *  
      - *

      and - *

      +   *  }
      + * and + *
      {@code
          *  [[1, 2, 3, 4],
          *   [5, 6, 7, 8]]
      -   *  
      - *

      , the outputs will be (respectively): - *

      +   *  }
      + * , the outputs will be (respectively): + *
      {@code
          *  [1, 4, 2, 3]
      -   *  
      - *

      and - *

      +   *  }
      + * and + *
      {@code
          *  [[1, 4, 2, 3],
          *   [5, 8, 6, 7]]
      -   *  
      + * }
      * - * @param data type for {@code y} output + * @param data type for {@code y()} output * @param x Vector of size 4 or Tensor of shape (4, 2) in source data format. - * @param options carries optional attribute values - * @param data type for {@code DataFormatVecPermute} output and operands + * @param options carries optional attributes values * @return a new instance of DataFormatVecPermute */ public DataFormatVecPermute dataFormatVecPermute(Operand x, @@ -693,86 +700,90 @@ public DataFormatVecPermute dataFormatVecPermute(Operand< /** * DepthToSpace for tensors of type T. + *

      * Rearranges data from depth into blocks of spatial data. * This is the reverse transformation of SpaceToDepth. More specifically, - * this op outputs a copy of the input tensor where values from the {@code depth} - * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions. - * The attr {@code block_size} indicates the input block size and how the data is moved. - *

        - *
      • Chunks of data of size {@code block_size * block_size} from depth are rearranged - * into non-overlapping blocks of size {@code block_size x block_size}
      • - *
      • The width the output tensor is {@code input_depth * block_size}, whereas the - * height is {@code input_height * block_size}.
      • - *
      • The Y, X coordinates within each block of the output image are determined - * by the high order component of the input channel index.
      • - *
      • The depth of the input tensor must be divisible by - * {@code block_size * block_size}.
      • - *
      - *

      The {@code data_format} attr specifies the layout of the input and output tensors + * this op outputs a copy of the input tensor where values from the `depth` + * dimension are moved in spatial blocks to the `height` and `width` dimensions. + * The attr `block_size` indicates the input block size and how the data is moved. + *

      + * Chunks of data of size `block_size * block_size` from depth are rearranged + * into non-overlapping blocks of size `block_size x block_size` + * The width the output tensor is `input_depth * block_size`, whereas the + * height is `input_height * block_size`. + * The Y, X coordinates within each block of the output image are determined + * by the high order component of the input channel index. + * The depth of the input tensor must be divisible by + * `block_size * block_size`. + *

      + * The `data_format` attr specifies the layout of the input and output tensors * with the following options: - * "NHWC": {@code [ batch, height, width, channels ]} - * "NCHW": {@code [ batch, channels, height, width ]} - * "NCHW_VECT_C": - * {@code qint8 [ batch, channels / 4, height, width, 4 ]} - *

      It is useful to consider the operation as transforming a 6-D Tensor. + * "NHWC": `[ batch, height, width, channels ]` + * "NCHW": `[ batch, channels, height, width ]` + * "NCHW_VECT_C": + * `qint8 [ batch, channels / 4, height, width, 4 ]` + *

      + * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, - * Each element in the input tensor can be specified via 6 coordinates, - * ordered by decreasing memory layout significance as: - * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates - * within the input image, bX, bY means coordinates - * within the output block, oC means output channels). - * The output would be the input transposed to the following layout: - * n,iY,bY,iX,bX,oC - *

      This operation is useful for resizing the activations between convolutions + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + * within the input image, bX, bY means coordinates + * within the output block, oC means output channels). + * The output would be the input transposed to the following layout: + * n,iY,bY,iX,bX,oC + *

      + * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - *

      For example, given an input of shape {@code [1, 1, 1, 4]}, data_format = "NHWC" and + *

      + * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and * block_size = 2: - *

      +   *  
      {@code
          *  x = [[[[1, 2, 3, 4]]]]
          *
      -   *  
      - *

      This operation will output a tensor of shape {@code [1, 2, 2, 1]}: - *

      +   *  }
      + * This operation will output a tensor of shape `[1, 2, 2, 1]`: + *
      {@code
          *     [[[[1], [2]],
          *       [[3], [4]]]]
      -   *  
      - *

      Here, the input has a batch of 1 and each batch element has shape {@code [1, 1, 4]}, + * }

      + * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, * the corresponding output will have 2x2 elements and will have a depth of - * 1 channel (1 = {@code 4 / (block_size * block_size)}). - * The output element shape is {@code [2, 2, 1]}. - *

      For an input tensor with larger depth, here of shape {@code [1, 1, 1, 12]}, e.g. - *

      +   *  1 channel (1 = `4 / (block_size * block_size)`).
      +   *  The output element shape is `[2, 2, 1]`.
      +   *  

      + * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. + *

      {@code
          *  x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
      -   *  
      - *

      This operation, for block size of 2, will return the following tensor of shape - * {@code [1, 2, 2, 3]} - *

      +   *  }
      + * This operation, for block size of 2, will return the following tensor of shape + * `[1, 2, 2, 3]` + *
      {@code
          *     [[[[1, 2, 3], [4, 5, 6]],
          *       [[7, 8, 9], [10, 11, 12]]]]
          *
      -   *  
      - *

      Similarly, for the following input of shape {@code [1 2 2 4]}, and a block size of 2: - *

      +   *  }
      + * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: + *
      {@code
          *  x =  [[[[1, 2, 3, 4],
          *         [5, 6, 7, 8]],
          *        [[9, 10, 11, 12],
          *         [13, 14, 15, 16]]]]
      -   *  
      - *

      the operator will return the following tensor of shape {@code [1 4 4 1]}: - *

      +   *  }
      + * the operator will return the following tensor of shape `[1 4 4 1]`: + *
      {@code
          *  x = [[[ [1],   [2],  [5],  [6]],
          *        [ [3],   [4],  [7],  [8]],
          *        [ [9],  [10], [13],  [14]],
          *        [ [11], [12], [15],  [16]]]]
          *
      -   *  
      + * }
      * - * @param data type for {@code output} output - * @param input the input value + * @param data type for {@code output()} output + * @param input * @param blockSize The size of the spatial block, same as in Space2Depth. - * @param options carries optional attribute values - * @param data type for {@code DepthToSpace} output and operands + * @param options carries optional attributes values * @return a new instance of DepthToSpace */ public DepthToSpace depthToSpace(Operand input, Long blockSize, @@ -781,32 +792,32 @@ public DepthToSpace depthToSpace(Operand input, Long blo } /** - * Computes a 2-D depthwise convolution given 4-D {@code input} and {@code filter} tensors. - * Given an input tensor of shape {@code [batch, in_height, in_width, in_channels]} + * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. + *

      + * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` * and a filter / kernel tensor of shape - * {@code [filter_height, filter_width, in_channels, channel_multiplier]}, containing - * {@code in_channels} convolutional filters of depth 1, {@code depthwise_conv2d} applies + * `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + * `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies * a different filter to each input channel (expanding from 1 channel to - * {@code channel_multiplier} channels for each), then concatenates the results - * together. Thus, the output has {@code in_channels * channel_multiplier} channels. - *

      +   *  `channel_multiplier` channels for each), then concatenates the results
      +   *  together. Thus, the output has `in_channels * channel_multiplier` channels.
      +   *  
      {@code
          *  for k in 0..in_channels-1
          *    for q in 0..channel_multiplier-1
          *      output[b, i, j, k * channel_multiplier + q] =
          *        sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
          *                          filter[di, dj, k, q]
      -   *  
      - *

      Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same - * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. + * }

      + * Must have `strides[0] = strides[3] = 1`. For the most common case of the same + * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. * - * @param data type for {@code output} output - * @param input the input value - * @param filter the filter value + * @param data type for {@code output()} output + * @param input + * @param filter * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of {@code input}. + * of `input`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code DepthwiseConv2dNative} output and operands + * @param options carries optional attributes values * @return a new instance of DepthwiseConv2dNative */ public DepthwiseConv2dNative depthwiseConv2dNative(Operand input, @@ -818,21 +829,21 @@ public DepthwiseConv2dNative depthwiseConv2dNative(Operan /** * Computes the gradients of depthwise convolution with respect to the filter. * - * @param data type for {@code output} output - * @param input 4-D with shape based on {@code data_format}. For example, if - * {@code data_format} is 'NHWC' then {@code input} is a 4-D {@code [batch, in_height, in_width, in_channels]} tensor. - * @param filterSizes An integer vector representing the tensor shape of {@code filter}, - * where {@code filter} is a 4-D - * {@code [filter_height, filter_width, in_channels, depthwise_multiplier]} tensor. - * @param outBackprop 4-D with shape based on {@code data_format}. - * For example, if {@code data_format} is 'NHWC' then - * out_backprop shape is {@code [batch, out_height, out_width, out_channels]}. + * @param data type for {@code output()} output + * @param input 4-D with shape based on `data_format`. For example, if + * `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, + * in_width, in_channels]` tensor. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 4-D + * `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. + * @param outBackprop 4-D with shape based on `data_format`. + * For example, if `data_format` is 'NHWC' then + * out_backprop shape is `[batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code DepthwiseConv2dNativeBackpropFilter} output and operands + * @param options carries optional attributes values * @return a new instance of DepthwiseConv2dNativeBackpropFilter */ public DepthwiseConv2dNativeBackpropFilter depthwiseConv2dNativeBackpropFilter( @@ -844,21 +855,20 @@ public DepthwiseConv2dNativeBackpropFilter depthwiseConv2 /** * Computes the gradients of depthwise convolution with respect to the input. * - * @param data type for {@code output} output - * @param inputSizes An integer vector representing the shape of {@code input}, based - * on {@code data_format}. For example, if {@code data_format} is 'NHWC' then - * {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. + * @param data type for {@code output()} output + * @param inputSizes An integer vector representing the shape of `input`, based + * on `data_format`. For example, if `data_format` is 'NHWC' then + * `input` is a 4-D `[batch, height, width, channels]` tensor. * @param filter 4-D with shape - * {@code [filter_height, filter_width, in_channels, depthwise_multiplier]}. - * @param outBackprop 4-D with shape based on {@code data_format}. - * For example, if {@code data_format} is 'NHWC' then - * out_backprop shape is {@code [batch, out_height, out_width, out_channels]}. + * `[filter_height, filter_width, in_channels, depthwise_multiplier]`. + * @param outBackprop 4-D with shape based on `data_format`. + * For example, if `data_format` is 'NHWC' then + * out_backprop shape is `[batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code DepthwiseConv2dNativeBackpropInput} output and operands + * @param options carries optional attributes values * @return a new instance of DepthwiseConv2dNativeBackpropInput */ public DepthwiseConv2dNativeBackpropInput depthwiseConv2dNativeBackpropInput( @@ -868,38 +878,40 @@ public DepthwiseConv2dNativeBackpropInput depthwiseConv2d } /** - * Computes the grayscale dilation of 4-D {@code input} and 3-D {@code filter} tensors. - * The {@code input} tensor has shape {@code [batch, in_height, in_width, depth]} and the - * {@code filter} tensor has shape {@code [filter_height, filter_width, depth]}, i.e., each + * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. + *

      + * The `input` tensor has shape `[batch, in_height, in_width, depth]` and the + * `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each * input channel is processed independently of the others with its own structuring - * function. The {@code output} tensor has shape - * {@code [batch, out_height, out_width, depth]}. The spatial dimensions of the output - * tensor depend on the {@code padding} algorithm. We currently only support the default - * "NHWC" {@code data_format}. - *

      In detail, the grayscale morphological 2-D dilation is the max-sum correlation - * (for consistency with {@code conv2d}, we use unmirrored filters): - *

      -   *  output[b, y, x, c] =
      -   *     max_{dy, dx} input[b,
      -   *                        strides[1] * y + rates[1] * dy,
      -   *                        strides[2] * x + rates[2] * dx,
      -   *                        c] +
      -   *                  filter[dy, dx, c]
      -   *  
      - *

      Max-pooling is a special case when the filter has size equal to the pooling + * function. The `output` tensor has shape + * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + * tensor depend on the `padding` algorithm. We currently only support the default + * "NHWC" `data_format`. + *

      + * In detail, the grayscale morphological 2-D dilation is the max-sum correlation + * (for consistency with `conv2d`, we use unmirrored filters): + *

      + * output[b, y, x, c] = + * max_{dy, dx} input[b, + * strides[1] * y + rates[1] * dy, + * strides[2] * x + rates[2] * dx, + * c] + + * filter[dy, dx, c] + *

      + * Max-pooling is a special case when the filter has size equal to the pooling * kernel size and contains all zeros. - *

      Note on duality: The dilation of {@code input} by the {@code filter} is equal to the - * negation of the erosion of {@code -input} by the reflected {@code filter}. + *

      + * Note on duality: The dilation of `input` by the `filter` is equal to the + * negation of the erosion of `-input` by the reflected `filter`. * - * @param data type for {@code output} output - * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. - * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. + * @param data type for {@code output()} output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. * @param strides The stride of the sliding window for each dimension of the input - * tensor. Must be: {@code [1, stride_height, stride_width, 1]}. + * tensor. Must be: `[1, stride_height, stride_width, 1]`. * @param rates The input stride for atrous morphological dilation. Must be: - * {@code [1, rate_height, rate_width, 1]}. + * `[1, rate_height, rate_width, 1]`. * @param padding The type of padding algorithm to use. - * @param data type for {@code Dilation2D} output and operands * @return a new instance of Dilation2d */ public Dilation2d dilation2d(Operand input, Operand filter, @@ -910,16 +922,15 @@ public Dilation2d dilation2d(Operand input, Operand /** * Computes the gradient of morphological 2-D dilation with respect to the filter. * - * @param data type for {@code filter_backprop} output - * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. - * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. - * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, depth]}. + * @param data type for {@code filterBackprop()} output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. * @param strides 1-D of length 4. The stride of the sliding window for each dimension of - * the input tensor. Must be: {@code [1, stride_height, stride_width, 1]}. + * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. * @param rates 1-D of length 4. The input stride for atrous morphological dilation. - * Must be: {@code [1, rate_height, rate_width, 1]}. + * Must be: `[1, rate_height, rate_width, 1]`. * @param padding The type of padding algorithm to use. - * @param data type for {@code Dilation2DBackpropFilter} output and operands * @return a new instance of Dilation2dBackpropFilter */ public Dilation2dBackpropFilter dilation2dBackpropFilter(Operand input, @@ -931,16 +942,15 @@ public Dilation2dBackpropFilter dilation2dBackpropFilter( /** * Computes the gradient of morphological 2-D dilation with respect to the input. * - * @param data type for {@code in_backprop} output - * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. - * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. - * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, depth]}. + * @param data type for {@code inBackprop()} output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. * @param strides 1-D of length 4. The stride of the sliding window for each dimension of - * the input tensor. Must be: {@code [1, stride_height, stride_width, 1]}. + * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. * @param rates 1-D of length 4. The input stride for atrous morphological dilation. - * Must be: {@code [1, rate_height, rate_width, 1]}. + * Must be: `[1, rate_height, rate_width, 1]`. * @param padding The type of padding algorithm to use. - * @param data type for {@code Dilation2DBackpropInput} output and operands * @return a new instance of Dilation2dBackpropInput */ public Dilation2dBackpropInput dilation2dBackpropInput(Operand input, @@ -950,13 +960,13 @@ public Dilation2dBackpropInput dilation2dBackpropInput(Op } /** - * Computes exponential linear: {@code exp(features) - 1} if < 0, {@code features} otherwise. - * See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - * + * Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. + *

      + * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * ](http://arxiv.org/abs/1511.07289) * - * @param data type for {@code activations} output - * @param features the features value - * @param data type for {@code Elu} output and operands + * @param data type for {@code activations()} output + * @param features * @return a new instance of Elu */ public Elu elu(Operand features) { @@ -965,14 +975,18 @@ public Elu elu(Operand features) { /** * Generates labels for candidate sampling with a learned unigram distribution. + *

      * A unigram sampler could use a fixed unigram distribution read from a * file or passed in as an in-memory array instead of building up the distribution * from data on the fly. There is also an option to skew the distribution by * applying a distortion power to the weights. - *

      The vocabulary file should be in CSV-like format, with the last field + *

      + * The vocabulary file should be in CSV-like format, with the last field * being the weight associated with the word. - *

      For each batch, this op picks a single set of sampled candidate labels. - *

      The advantages of sampling candidates per-batch are simplicity and the + *

      + * For each batch, this op picks a single set of sampled candidate labels. + *

      + * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -985,7 +999,7 @@ public Elu elu(Operand features) { * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * @param rangeMax The sampler will sample integers from the interval [0, range_max). - * @param options carries optional attribute values + * @param options carries optional attributes values * @return a new instance of FixedUnigramCandidateSampler */ public FixedUnigramCandidateSampler fixedUnigramCandidateSampler(Operand trueClasses, @@ -996,21 +1010,21 @@ public FixedUnigramCandidateSampler fixedUnigramCandidateSampler(Operand /** * Performs fractional average pooling on the input. + *

      * Fractional average pooling is similar to Fractional max pooling in the pooling * region generation step. The only difference is that after pooling regions are * generated, a mean operation is performed instead of a max operation in each * pooling region. * - * @param data type for {@code output} output - * @param value 4-D with shape {@code [batch, height, width, channels]}. - * @param poolingRatio Pooling ratio for each dimension of {@code value}, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid + * @param data type for {@code output()} output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of `value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. - * @param options carries optional attribute values - * @param data type for {@code FractionalAvgPool} output and operands + * @param options carries optional attributes values * @return a new instance of FractionalAvgPool */ public FractionalAvgPool fractionalAvgPool(Operand value, @@ -1020,43 +1034,45 @@ public FractionalAvgPool fractionalAvgPool(Operand val /** * Performs fractional max pooling on the input. + *

      * Fractional max pooling is slightly different than regular max pooling. In * regular max pooling, you downsize an input set by taking the maximum value of * smaller N x N subsections of the set (often 2x2), and try to reduce the set by * a factor of N, where N is an integer. Fractional max pooling, as you might - * expect from the word "fractional", means that the overall reduction ratio N + * expect from the word "fractional", means that the overall reduction ratio N * does not have to be an integer. - *

      The sizes of the pooling regions are generated randomly but are fairly uniform. + *

      + * The sizes of the pooling regions are generated randomly but are fairly uniform. * For example, let's look at the height dimension, and the constraints on the * list of rows that will be pool boundaries. - *

      First we define the following: - *

        - *
      1. input_row_length : the number of rows from the input set
      2. - *
      3. output_row_length : which will be smaller than the input
      4. - *
      5. alpha = input_row_length / output_row_length : our reduction ratio
      6. - *
      7. K = floor(alpha)
      8. - *
      9. row_pooling_sequence : this is the result list of pool boundary rows
      10. - *
      - *

      Then, row_pooling_sequence should satisfy: - *

        - *
      1. a[0] = 0 : the first value of the sequence is 0
      2. - *
      3. a[end] = input_row_length : the last value of the sequence is the size
      4. - *
      5. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
      6. - *
      7. length(row_pooling_sequence) = output_row_length+1
      8. - *
      - *

      For more details on fractional max pooling, see this paper: - * Benjamin Graham, Fractional Max-Pooling - * - * @param data type for {@code output} output - * @param value 4-D with shape {@code [batch, height, width, channels]}. - * @param poolingRatio Pooling ratio for each dimension of {@code value}, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid + *

      + * First we define the following: + *

      + * 1. input_row_length : the number of rows from the input set + * 2. output_row_length : which will be smaller than the input + * 3. alpha = input_row_length / output_row_length : our reduction ratio + * 4. K = floor(alpha) + * 5. row_pooling_sequence : this is the result list of pool boundary rows + *

      + * Then, row_pooling_sequence should satisfy: + *

      + * 1. a[0] = 0 : the first value of the sequence is 0 + * 2. a[end] = input_row_length : the last value of the sequence is the size + * 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size + * 4. length(row_pooling_sequence) = output_row_length+1 + *

      + * For more details on fractional max pooling, see this paper: + * [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) + * + * @param data type for {@code output()} output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of `value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. - * @param options carries optional attribute values - * @param data type for {@code FractionalMaxPool} output and operands + * @param options carries optional attributes values * @return a new instance of FractionalMaxPool */ public FractionalMaxPool fractionalMaxPool(Operand value, @@ -1066,11 +1082,12 @@ public FractionalMaxPool fractionalMaxPool(Operand val /** * Batch normalization. - * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + *

      + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param data type for {@code y} output - * @param data type for {@code batch_mean} output + * @param data type for {@code y()} output + * @param data type for {@code batchMean()} output * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. * @param offset A 1D Tensor for offset, to shift to the normalized x. @@ -1078,9 +1095,7 @@ public FractionalMaxPool fractionalMaxPool(Operand val * must be empty for training. * @param variance A 1D Tensor for population variance. Used for inference only; * must be empty for training. - * @param options carries optional attribute values - * @param data type for {@code FusedBatchNormV3} output and operands - * @param data type for {@code FusedBatchNormV3} output and operands + * @param options carries optional attributes values * @return a new instance of FusedBatchNorm */ public FusedBatchNorm fusedBatchNorm(Operand x, @@ -1091,11 +1106,12 @@ public FusedBatchNorm fusedBatchNor /** * Gradient for batch normalization. - * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + *

      + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param data type for {@code x_backprop} output - * @param data type for {@code scale_backprop} output + * @param data type for {@code xBackprop()} output + * @param data type for {@code scaleBackprop()} output * @param yBackprop A 4D Tensor for the gradient with respect to y. * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. @@ -1111,9 +1127,7 @@ public FusedBatchNorm fusedBatchNor * @param reserveSpace3 When is_training is True, a 1D Tensor for some intermediate results to be reused * in gradient computation. When is_training is False, a dummy empty Tensor will be * created. - * @param options carries optional attribute values - * @param data type for {@code FusedBatchNormGradV3} output and operands - * @param data type for {@code FusedBatchNormGradV3} output and operands + * @param options carries optional attributes values * @return a new instance of FusedBatchNormGrad */ public FusedBatchNormGrad fusedBatchNormGrad( @@ -1124,6 +1138,7 @@ public FusedBatchNormGrad fusedBatc /** * Performs a padding as a preprocess during a convolution. + *

      * Similar to FusedResizeAndPadConv2d, this op allows for an optimized * implementation where the spatial padding transformation stage is fused with the * im2col lookup, but in this case without the bilinear filtering required for @@ -1136,17 +1151,16 @@ public FusedBatchNormGrad fusedBatc * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param data type for {@code output} output - * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. + * @param data type for {@code output()} output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of {@code input}. + * rows must be the same as the rank of `input`. * @param filter 4-D with shape - * {@code [filter_height, filter_width, in_channels, out_channels]}. - * @param mode the value of the mode property + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param mode * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of {@code input}. Must be in the same order as the dimension specified with format. + * of `input`. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. - * @param data type for {@code FusedPadConv2D} output and operands * @return a new instance of FusedPadConv2d */ public FusedPadConv2d fusedPadConv2d(Operand input, @@ -1157,6 +1171,7 @@ public FusedPadConv2d fusedPadConv2d(Operand input, /** * Performs a resize and padding as a preprocess during a convolution. + *

      * It's often possible to do spatial transformations more efficiently as part of * the packing stage of a convolution, so this op allows for an optimized * implementation where these stages are fused together. This prevents the need to @@ -1168,46 +1183,48 @@ public FusedPadConv2d fusedPadConv2d(Operand input, * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param data type for {@code output} output - * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. - * @param sizeOutput A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The + * @param data type for {@code output()} output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param size A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of {@code input}. + * rows must be the same as the rank of `input`. * @param filter 4-D with shape - * {@code [filter_height, filter_width, in_channels, out_channels]}. - * @param mode the value of the mode property + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param mode * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of {@code input}. Must be in the same order as the dimension specified with format. + * of `input`. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code FusedResizeAndPadConv2D} output and operands + * @param options carries optional attributes values * @return a new instance of FusedResizeAndPadConv2d */ public FusedResizeAndPadConv2d fusedResizeAndPadConv2d(Operand input, - Operand sizeOutput, Operand paddings, Operand filter, String mode, + Operand size, Operand paddings, Operand filter, String mode, List strides, String padding, FusedResizeAndPadConv2d.Options... options) { - return FusedResizeAndPadConv2d.create(scope, input, sizeOutput, paddings, filter, mode, strides, padding, options); - } - - /** - * Says whether the targets are in the top {@code K} predictions. - * This outputs a {@code batch_size} bool array, an entry {@code out[i]} is {@code true} if the - * prediction for the target class is among the top {@code k} predictions among - * all predictions for example {@code i}. Note that the behavior of {@code InTopK} differs - * from the {@code TopK} op in its handling of ties; if multiple classes have the - * same prediction value and straddle the top-{@code k} boundary, all of those - * classes are considered to be in the top {@code k}. - *

      More formally, let - *

      \(predictions_i\) be the predictions for all classes for example {@code i}, - * \(targets_i\) be the target class for example {@code i}, - * \(out_i\) be the output for example {@code i}, - *

      $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ - * - * @param predictions A {@code batch_size} x {@code classes} tensor. - * @param targets A {@code batch_size} vector of class ids. + return FusedResizeAndPadConv2d.create(scope, input, size, paddings, filter, mode, strides, padding, options); + } + + /** + * Says whether the targets are in the top `K` predictions. + *

      + * This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + * prediction for the target class is among the top `k` predictions among + * all predictions for example `i`. Note that the behavior of `InTopK` differs + * from the `TopK` op in its handling of ties; if multiple classes have the + * same prediction value and straddle the top-`k` boundary, all of those + * classes are considered to be in the top `k`. + *

      + * More formally, let + *

      + * \\(predictions_i\\) be the predictions for all classes for example `i`, + * \\(targets_i\\) be the target class for example `i`, + * \\(out_i\\) be the output for example `i`, + *

      + * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + * + * @param predictions A `batch_size` x `classes` tensor. + * @param targets A `batch_size` vector of class ids. * @param k Number of top elements to look at for computing precision. - * @param data type for {@code InTopKV2} output and operands * @return a new instance of InTopK */ public InTopK inTopK(Operand predictions, Operand targets, @@ -1217,14 +1234,13 @@ public InTopK inTopK(Operand predictions, Operand< /** * L2 Loss. - * Computes half the L2 norm of a tensor without the {@code sqrt}: - *

      -   *  output = sum(t ** 2) / 2
      -   *  
      + *

      + * Computes half the L2 norm of a tensor without the `sqrt`: + *

      + * output = sum(t ** 2) / 2 * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param t Typically 2-D, but may have any dimensions. - * @param data type for {@code L2Loss} output and operands * @return a new instance of L2Loss */ public L2Loss l2Loss(Operand t) { @@ -1232,12 +1248,11 @@ public L2Loss l2Loss(Operand t) { } /** - * Computes rectified linear: {@code max(features, features * alpha)}. + * Computes rectified linear: `max(features, features * alpha)`. * - * @param data type for {@code activations} output - * @param features the features value - * @param options carries optional attribute values - * @param data type for {@code LeakyRelu} output and operands + * @param data type for {@code activations()} output + * @param features + * @param options carries optional attributes values * @return a new instance of LeakyRelu */ public LeakyRelu leakyRelu(Operand features, @@ -1247,10 +1262,13 @@ public LeakyRelu leakyRelu(Operand features, /** * Generates labels for candidate sampling with a learned unigram distribution. + *

      * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - *

      For each batch, this op picks a single set of sampled candidate labels. - *

      The advantages of sampling candidates per-batch are simplicity and the + *

      + * For each batch, this op picks a single set of sampled candidate labels. + *

      + * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -1263,7 +1281,7 @@ public LeakyRelu leakyRelu(Operand features, * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * @param rangeMax The sampler will sample integers from the interval [0, range_max). - * @param options carries optional attribute values + * @param options carries optional attributes values * @return a new instance of LearnedUnigramCandidateSampler */ public LearnedUnigramCandidateSampler learnedUnigramCandidateSampler(Operand trueClasses, @@ -1274,22 +1292,22 @@ public LearnedUnigramCandidateSampler learnedUnigramCandidateSampler(Operand + * The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within - * {@code depth_radius}. In detail, - *

      -   *  sqr_sum[a, b, c, d] =
      -   *      sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
      -   *  output = input / (bias + alpha * sqr_sum) ** beta
      -   *  
      - *

      For details, see Krizhevsky et al., ImageNet classification with deep - * convolutional neural networks (NIPS 2012) . - * - * @param data type for {@code output} output + * `depth_radius`. In detail, + *

      + * sqr_sum[a, b, c, d] = + * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + * output = input / (bias + alpha * sqr_sum) ** beta + *

      + * For details, see [Krizhevsky et al., ImageNet classification with deep + * convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). + * + * @param data type for {@code output()} output * @param input 4-D. - * @param options carries optional attribute values - * @param data type for {@code LRN} output and operands + * @param options carries optional attributes values * @return a new instance of LocalResponseNormalization */ public LocalResponseNormalization localResponseNormalization( @@ -1299,14 +1317,13 @@ public LocalResponseNormalization localResponseNormalizat /** * Computes log softmax activations. - * For each batch {@code i} and class {@code j} we have - *

      -   *  logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
      -   *  
      - * - * @param data type for {@code logsoftmax} output - * @param logits 2-D with shape {@code [batch_size, num_classes]}. - * @param data type for {@code LogSoftmax} output and operands + *

      + * For each batch `i` and class `j` we have + *

      + * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) + * + * @param data type for {@code logsoftmax()} output + * @param logits 2-D with shape `[batch_size, num_classes]`. * @return a new instance of LogSoftmax */ public LogSoftmax logSoftmax(Operand logits) { @@ -1316,17 +1333,16 @@ public LogSoftmax logSoftmax(Operand logits) { /** * Performs max pooling on the input. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param input 4-D input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPoolV2} output and operands + * @param options carries optional attributes values * @return a new instance of MaxPool */ - public MaxPool maxPool(Operand input, Operand ksize, + public MaxPool maxPool(Operand input, Operand ksize, Operand strides, String padding, MaxPool.Options... options) { return MaxPool.create(scope, input, ksize, strides, padding, options); } @@ -1334,15 +1350,14 @@ public MaxPool maxPool(Operand input, Operand /** * Performs 3D max pooling on the input. * - * @param data type for {@code output} output - * @param input Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. + * @param data type for {@code output()} output + * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPool3D} output and operands + * @param options carries optional attributes values * @return a new instance of MaxPool3d */ public MaxPool3d maxPool3d(Operand input, List ksize, @@ -1353,18 +1368,16 @@ public MaxPool3d maxPool3d(Operand input, List k /** * Computes gradients of 3D max pooling function. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPool3DGrad} output and operands - * @param data type for {@code MaxPool3DGrad} output and operands + * @param options carries optional attributes values * @return a new instance of MaxPool3dGrad */ public MaxPool3dGrad maxPool3dGrad(Operand origInput, @@ -1376,17 +1389,16 @@ public MaxPool3dGrad maxPool3dGrad(Ope /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPool3DGradGrad} output and operands + * @param options carries optional attributes values * @return a new instance of MaxPool3dGradGrad */ public MaxPool3dGradGrad maxPool3dGradGrad(Operand origInput, @@ -1398,16 +1410,15 @@ public MaxPool3dGradGrad maxPool3dGradGrad(Operand ori /** * Computes gradients of the maxpooling function. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad 4-D. Gradients w.r.t. the output of {@code max_pool}. + * @param grad 4-D. Gradients w.r.t. the output of `max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPoolGradV2} output and operands + * @param options carries optional attributes values * @return a new instance of MaxPoolGrad */ public MaxPoolGrad maxPoolGrad(Operand origInput, Operand origOutput, @@ -1419,16 +1430,15 @@ public MaxPoolGrad maxPoolGrad(Operand origInput, Oper /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad 4-D. Gradients of gradients w.r.t. the input of {@code max_pool}. + * @param grad 4-D. Gradients of gradients w.r.t. the input of `max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPoolGradGradV2} output and operands + * @param options carries optional attributes values * @return a new instance of MaxPoolGradGrad */ public MaxPoolGradGrad maxPoolGradGrad(Operand origInput, @@ -1440,17 +1450,16 @@ public MaxPoolGradGrad maxPoolGradGrad(Operand origInp /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param input The original input. - * @param grad 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. the - * input of {@code max_pool}. - * @param argmax The indices of the maximum values chosen for each output of {@code max_pool}. + * @param grad 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + * input of `max_pool`. + * @param argmax The indices of the maximum values chosen for each output of `max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPoolGradGradWithArgmax} output and operands + * @param options carries optional attributes values * @return a new instance of MaxPoolGradGradWithArgmax */ public MaxPoolGradGradWithArgmax maxPoolGradGradWithArgmax( @@ -1461,53 +1470,54 @@ public MaxPoolGradGradWithArgmax maxPoolGradGradWithArgma /** * Performs max pooling on the input and outputs both max values and indices. - * The indices in {@code argmax} are flattened, so that a maximum value at position - * {@code [b, y, x, c]} becomes flattened index: - * {@code (y * width + x) * channels + c} if {@code include_batch_in_index} is False; - * {@code ((b * height + y) * width + x) * channels + c} if {@code include_batch_in_index} is True. - *

      The indices returned are always in {@code [0, height) x [0, width)} before flattening, + *

      + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + *

      + * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param data type for {@code output} output - * @param data type for {@code argmax} output - * @param input 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. + * @param data type for {@code output()} output + * @param data type for {@code argmax()} output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPoolWithArgmax} output and operands - * @return a new instance of MaxPoolWithArgmax, with default output types + * @param options carries optional attributes values + * @return a new instance of MaxPoolWithArgmax */ public MaxPoolWithArgmax maxPoolWithArgmax(Operand input, - List ksize, List strides, String padding, MaxPoolWithArgmax.Options[] options) { + List ksize, List strides, String padding, MaxPoolWithArgmax.Options... options) { return MaxPoolWithArgmax.create(scope, input, ksize, strides, padding, options); } /** * Performs max pooling on the input and outputs both max values and indices. - * The indices in {@code argmax} are flattened, so that a maximum value at position - * {@code [b, y, x, c]} becomes flattened index: - * {@code (y * width + x) * channels + c} if {@code include_batch_in_index} is False; - * {@code ((b * height + y) * width + x) * channels + c} if {@code include_batch_in_index} is True. - *

      The indices returned are always in {@code [0, height) x [0, width)} before flattening, + *

      + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + *

      + * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param data type for {@code output} output - * @param data type for {@code argmax} output - * @param input 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. + * @param data type for {@code output()} output + * @param data type for {@code argmax()} output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. - * @param Targmax the value of the Targmax property + * @param Targmax * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code MaxPoolWithArgmax} output and operands - * @param data type for {@code MaxPoolWithArgmax} output and operands + * @param options carries optional attributes values * @return a new instance of MaxPoolWithArgmax */ public MaxPoolWithArgmax maxPoolWithArgmax( @@ -1517,21 +1527,21 @@ public MaxPoolWithArgmax maxPoolWit } /** - * Finds values of the {@code n}-th order statistic for the last dimension. + * Finds values of the `n`-th order statistic for the last dimension. + *

      * If the input is a vector (rank-1), finds the entries which is the nth-smallest * value in the vector and outputs their values as scalar tensor. - *

      For matrices (resp. higher rank input), computes the entries which is the + *

      + * For matrices (resp. higher rank input), computes the entries which is the * nth-smallest value in each row (resp. vector along the last dimension). Thus, - *

      -   *  values.shape = input.shape[:-1]
      -   *  
      + *

      + * values.shape = input.shape[:-1] * - * @param data type for {@code values} output - * @param input 1-D or higher with last dimension at least {@code n+1}. + * @param data type for {@code values()} output + * @param input 1-D or higher with last dimension at least `n+1`. * @param n 0-D. Position of sorted vector to select along the last dimension (along - * each row for matrices). Valid range of n is {@code [0, input.shape[:-1])} - * @param options carries optional attribute values - * @param data type for {@code NthElement} output and operands + * each row for matrices). Valid range of n is `[0, input.shape[:-1])` + * @param options carries optional attributes values * @return a new instance of NthElement */ public NthElement nthElement(Operand input, Operand n, @@ -1542,8 +1552,8 @@ public NthElement nthElement(Operand input, Operand data type for {@code output} output - * @param input 4-D with shape {@code [batch, height, width, channels]}. + * @param data type for {@code output()} output + * @param input 4-D with shape `[batch, height, width, channels]`. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param ksize The size of the window for each dimension of the input tensor. @@ -1551,10 +1561,9 @@ public NthElement nthElement(Operand input, Operand data type for {@code QuantizedAvgPool} output and operands * @return a new instance of QuantizedAvgPool */ - public QuantizedAvgPool quantizedAvgPool(Operand input, + public QuantizedAvgPool quantizedAvgPool(Operand input, Operand minInput, Operand maxInput, List ksize, List strides, String padding) { return QuantizedAvgPool.create(scope, input, minInput, maxInput, ksize, strides, padding); @@ -1562,10 +1571,11 @@ public QuantizedAvgPool quantizedAvgPool(Operand input /** * Quantized Batch normalization. + *

      * This op is deprecated and will be removed in the future. Prefer - * {@code tf.nn.batch_normalization}. + * `tf.nn.batch_normalization`. * - * @param data type for {@code result} output + * @param data type for {@code result()} output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. * @param tMax The value represented by the highest quantized input. @@ -1584,19 +1594,17 @@ public QuantizedAvgPool quantizedAvgPool(Operand input * @param betaMin The value represented by the lowest quantized offset. * @param betaMax The value represented by the highest quantized offset. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this tensor will be multiplied + * If "scale_after_normalization" is true, this tensor will be multiplied * with the normalized tensor. * @param gammaMin The value represented by the lowest quantized gamma. * @param gammaMax The value represented by the highest quantized gamma. - * @param outType the value of the outType property + * @param outType * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. - * @param data type for {@code QuantizedBatchNormWithGlobalNormalization} output and operands - * @param data type for {@code QuantizedBatchNormWithGlobalNormalization} output and operands * @return a new instance of QuantizedBatchNormWithGlobalNormalization */ - public QuantizedBatchNormWithGlobalNormalization quantizedBatchNormWithGlobalNormalization( + public QuantizedBatchNormWithGlobalNormalization quantizedBatchNormWithGlobalNormalization( Operand t, Operand tMin, Operand tMax, Operand m, Operand mMin, Operand mMax, Operand v, Operand vMin, Operand vMax, Operand beta, Operand betaMin, Operand betaMax, @@ -1607,49 +1615,49 @@ public QuantizedBatchNormWithGlobalNormal /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. + *

      * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * - * @param data type for {@code output} output - * @param input the input value + * @param data type for {@code output()} output + * @param input * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minBias The float value that the lowest quantized bias value represents. * @param maxBias The float value that the highest quantized bias value represents. - * @param outType the value of the outType property - * @param data type for {@code QuantizedBiasAdd} output and operands + * @param outType * @return a new instance of QuantizedBiasAdd */ - public QuantizedBiasAdd quantizedBiasAdd(Operand input, - Operand bias, Operand minInput, Operand maxInput, + public QuantizedBiasAdd quantizedBiasAdd(Operand input, + Operand bias, Operand minInput, Operand maxInput, Operand minBias, Operand maxBias, Class outType) { return QuantizedBiasAdd.create(scope, input, bias, minInput, maxInput, minBias, maxBias, outType); } /** * Computes a 2D convolution given quantized 4D input and filter tensors. + *

      * The inputs are quantized tensors where the lowest value represents the real * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. * - * @param data type for {@code output} output - * @param input the input value + * @param data type for {@code output()} output + * @param input * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minFilter The float value that the lowest quantized filter value represents. * @param maxFilter The float value that the highest quantized filter value represents. - * @param outType the value of the outType property + * @param outType * @param strides The stride of the sliding window for each dimension of the input * tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attribute values - * @param data type for {@code QuantizedConv2D} output and operands + * @param options carries optional attributes values * @return a new instance of QuantizedConv2d */ - public QuantizedConv2d quantizedConv2d(Operand input, - Operand filter, Operand minInput, Operand maxInput, + public QuantizedConv2d quantizedConv2d(Operand input, + Operand filter, Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, QuantizedConv2d.Options... options) { return QuantizedConv2d.create(scope, input, filter, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); @@ -1658,15 +1666,14 @@ public QuantizedConv2d quantizedConv2d(Operand data type for {@code y} output + * @param data type for {@code y()} output * @param x A 4D input Tensor. * @param xMin The value represented by the lowest quantized input. * @param xMax The value represented by the highest quantized input. - * @param options carries optional attribute values - * @param data type for {@code QuantizedInstanceNorm} output and operands + * @param options carries optional attributes values * @return a new instance of QuantizedInstanceNorm */ - public QuantizedInstanceNorm quantizedInstanceNorm(Operand x, + public QuantizedInstanceNorm quantizedInstanceNorm(Operand x, Operand xMin, Operand xMax, QuantizedInstanceNorm.Options... options) { return QuantizedInstanceNorm.create(scope, x, xMin, xMax, options); } @@ -1674,7 +1681,7 @@ public QuantizedInstanceNorm quantizedInstanceNorm(Operan /** * Produces the max pool of the input tensor for quantized types. * - * @param data type for {@code output} output + * @param data type for {@code output()} output * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. @@ -1683,93 +1690,82 @@ public QuantizedInstanceNorm quantizedInstanceNorm(Operan * @param strides The stride of the sliding window for each dimension of the input * tensor. The length must be 4 to match the number of dimensions of the input. * @param padding The type of padding algorithm to use. - * @param data type for {@code QuantizedMaxPool} output and operands * @return a new instance of QuantizedMaxPool */ - public QuantizedMaxPool quantizedMaxPool(Operand input, + public QuantizedMaxPool quantizedMaxPool(Operand input, Operand minInput, Operand maxInput, List ksize, List strides, String padding) { return QuantizedMaxPool.create(scope, input, minInput, maxInput, ksize, strides, padding); } /** - * Computes Quantized Rectified Linear: {@code max(features, 0)} + * Computes Quantized Rectified Linear: `max(features, 0)` * - * @param data type for {@code activations} output - * @param features the features value + * @param data type for {@code activations()} output + * @param features * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property - * @param data type for {@code QuantizedRelu} output and operands + * @param outType * @return a new instance of QuantizedRelu */ - public QuantizedRelu quantizedRelu(Operand features, + public QuantizedRelu quantizedRelu(Operand features, Operand minFeatures, Operand maxFeatures, Class outType) { return QuantizedRelu.create(scope, features, minFeatures, maxFeatures, outType); } /** - * Computes Quantized Rectified Linear 6: {@code min(max(features, 0), 6)} + * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` * - * @param data type for {@code activations} output - * @param features the features value + * @param data type for {@code activations()} output + * @param features * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property - * @param data type for {@code QuantizedRelu6} output and operands + * @param outType * @return a new instance of QuantizedRelu6 */ - public QuantizedRelu6 quantizedRelu6(Operand features, + public QuantizedRelu6 quantizedRelu6(Operand features, Operand minFeatures, Operand maxFeatures, Class outType) { return QuantizedRelu6.create(scope, features, minFeatures, maxFeatures, outType); } /** - * Computes Quantized Rectified Linear X: {@code min(max(features, 0), max_value)} + * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` * - * @param data type for {@code activations} output - * @param features the features value - * @param maxValue the maxValue value + * @param data type for {@code activations()} output + * @param features + * @param maxValue * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property - * @param data type for {@code QuantizedReluX} output and operands + * @param outType * @return a new instance of QuantizedReluX */ - public QuantizedReluX quantizedReluX(Operand features, + public QuantizedReluX quantizedReluX(Operand features, Operand maxValue, Operand minFeatures, Operand maxFeatures, Class outType) { return QuantizedReluX.create(scope, features, maxValue, minFeatures, maxFeatures, outType); } /** - * Computes rectified linear: {@code max(features, 0)}. + * Computes rectified linear: `max(features, 0)`. + *

      * See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) * Example usage: - *

      - *
      - *
      - *

      tf.nn.relu([-2., 0., -0., 3.]).numpy() + * >>> tf.nn.relu([-2., 0., -0., 3.]).numpy() * array([ 0., 0., -0., 3.], dtype=float32) - *

      - *
      - *
      * - * @param data type for {@code activations} output - * @param features the features value - * @param data type for {@code Relu} output and operands + * @param data type for {@code activations()} output + * @param features * @return a new instance of Relu */ - public Relu relu(Operand features) { + public Relu relu(Operand features) { return Relu.create(scope, features); } /** - * Computes rectified linear 6: {@code min(max(features, 0), 6)}. + * Computes rectified linear 6: `min(max(features, 0), 6)`. * - * @param data type for {@code activations} output - * @param features the features value - * @param data type for {@code Relu6} output and operands + * @param data type for {@code activations()} output + * @param features * @return a new instance of Relu6 */ public Relu6 relu6(Operand features) { @@ -1777,16 +1773,18 @@ public Relu6 relu6(Operand features) { } /** - * Computes scaled exponential linear: {@code scale * alpha * (exp(features) - 1)} - * if < 0, {@code scale * features} otherwise. - *

      To be used together with - * {@code initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')}. - * For correct dropout, use {@code tf.contrib.nn.alpha_dropout}. - *

      See Self-Normalizing Neural Networks + * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` + *

      + * if < 0, `scale * features` otherwise. + *

      + * To be used together with + * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. + * For correct dropout, use `tf.contrib.nn.alpha_dropout`. + *

      + * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) * - * @param data type for {@code activations} output - * @param features the features value - * @param data type for {@code Selu} output and operands + * @param data type for {@code activations()} output + * @param features * @return a new instance of Selu */ public Selu selu(Operand features) { @@ -1795,14 +1793,13 @@ public Selu selu(Operand features) { /** * Computes softmax activations. - * For each batch {@code i} and class {@code j} we have - *

      -   *  $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
      -   *  
      - * - * @param data type for {@code softmax} output - * @param logits 2-D with shape {@code [batch_size, num_classes]}. - * @param data type for {@code Softmax} output and operands + *

      + * For each batch `i` and class `j` we have + *

      + * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ + * + * @param data type for {@code softmax()} output + * @param logits 2-D with shape `[batch_size, num_classes]`. * @return a new instance of Softmax */ public Softmax softmax(Operand logits) { @@ -1811,14 +1808,14 @@ public Softmax softmax(Operand logits) { /** * Computes softmax cross entropy cost and gradients to backpropagate. + *

      * Inputs are the logits, not probabilities. * - * @param data type for {@code loss} output + * @param data type for {@code loss()} output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. - * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( @@ -1827,11 +1824,10 @@ public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyW } /** - * Computes softsign: {@code features / (abs(features) + 1)}. + * Computes softsign: `features / (abs(features) + 1)`. * - * @param data type for {@code activations} output - * @param features the features value - * @param data type for {@code Softsign} output and operands + * @param data type for {@code activations()} output + * @param features * @return a new instance of Softsign */ public Softsign softsign(Operand features) { @@ -1840,85 +1836,87 @@ public Softsign softsign(Operand features) { /** * SpaceToBatch for 4-D tensors of type T. + *

      * This is a legacy version of the more general SpaceToBatchND. - *

      Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + *

      + * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. * More specifically, this op outputs a copy of the input tensor where values from - * the {@code height} and {@code width} dimensions are moved to the {@code batch} dimension. After - * the zero-padding, both {@code height} and {@code width} of the input must be divisible by the + * the `height` and `width` dimensions are moved to the `batch` dimension. After + * the zero-padding, both `height` and `width` of the input must be divisible by the * block size. * - * @param data type for {@code output} output - * @param input 4-D with shape {@code [batch, height, width, depth]}. - * @param paddings 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies - * the padding of the input with zeros across the spatial dimensions as follows: - *

      -   *    paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
      -   *  
      - *

      The effective spatial dimensions of the zero-padded input tensor will be: - *

      -   *    height_pad = pad_top + height + pad_bottom
      -   *    width_pad = pad_left + width + pad_right
      -   *  
      - *

      The attr {@code block_size} must be greater than one. It indicates the block size. - *

        - *
      • Non-overlapping blocks of size {@code block_size x block size} in the height and - * width dimensions are rearranged into the batch dimension at each location.
      • - *
      • The batch of the output tensor is {@code batch * block_size * block_size}.
      • - *
      • Both height_pad and width_pad must be divisible by block_size.
      • - *
      - *

      The shape of the output will be: - *

      -   *  [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
      -   *   depth]
      -   *  
      - *

      Some examples: - *

      (1) For the following input of shape {@code [1, 2, 2, 1]} and block_size of 2: - *

      +   * @param  data type for {@code output()} output
      +   * @param input 4-D with shape `[batch, height, width, depth]`.
      +   * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
      +   *    the padding of the input with zeros across the spatial dimensions as follows:
      +   *  

      + * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] + *

      + * The effective spatial dimensions of the zero-padded input tensor will be: + *

      + * height_pad = pad_top + height + pad_bottom + * width_pad = pad_left + width + pad_right + *

      + * The attr `block_size` must be greater than one. It indicates the block size. + *

      + * Non-overlapping blocks of size `block_size x block size` in the height and + * width dimensions are rearranged into the batch dimension at each location. + * The batch of the output tensor is `batch * block_size * block_size`. + * Both height_pad and width_pad must be divisible by block_size. + *

      + * The shape of the output will be: + *

      + * [batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, + * depth] + *

      + * Some examples: + *

      + * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: + *

      {@code
          *  x = [[[[1], [2]], [[3], [4]]]]
      -   *  
      - *

      The output tensor has shape {@code [4, 1, 1, 1]} and value: - *

      +   *  }
      + * The output tensor has shape `[4, 1, 1, 1]` and value: + *
      {@code
          *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
      -   *  
      - *

      (2) For the following input of shape {@code [1, 2, 2, 3]} and block_size of 2: - *

      +   *  }
      + * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: + *
      {@code
          *  x = [[[[1, 2, 3], [4, 5, 6]],
          *        [[7, 8, 9], [10, 11, 12]]]]
      -   *  
      - *

      The output tensor has shape {@code [4, 1, 1, 3]} and value: - *

      +   *  }
      + * The output tensor has shape `[4, 1, 1, 3]` and value: + *
      {@code
          *  [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
      -   *  
      - *

      (3) For the following input of shape {@code [1, 4, 4, 1]} and block_size of 2: - *

      +   *  }
      + * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: + *
      {@code
          *  x = [[[[1],   [2],  [3],  [4]],
          *        [[5],   [6],  [7],  [8]],
          *        [[9],  [10], [11],  [12]],
          *        [[13], [14], [15],  [16]]]]
      -   *  
      - *

      The output tensor has shape {@code [4, 2, 2, 1]} and value: - *

      +   *  }
      + * The output tensor has shape `[4, 2, 2, 1]` and value: + *
      {@code
          *  x = [[[[1], [3]], [[9], [11]]],
          *       [[[2], [4]], [[10], [12]]],
          *       [[[5], [7]], [[13], [15]]],
          *       [[[6], [8]], [[14], [16]]]]
      -   *  
      - *

      (4) For the following input of shape {@code [2, 2, 4, 1]} and block_size of 2: - *

      +   *  }
      + * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: + *
      {@code
          *  x = [[[[1],   [2],  [3],  [4]],
          *        [[5],   [6],  [7],  [8]]],
          *       [[[9],  [10], [11],  [12]],
          *        [[13], [14], [15],  [16]]]]
      -   *  
      - *

      The output tensor has shape {@code [8, 1, 2, 1]} and value: - *

      +   *  }
      + * The output tensor has shape `[8, 1, 2, 1]` and value: + *
      {@code
          *  x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
          *       [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
      -   *  
      - *

      Among others, this operation is useful for reducing atrous convolution into + * }

      + * Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * @param blockSize the value of the blockSize property - * @param data type for {@code SpaceToBatch} output and operands + * @param blockSize * @return a new instance of SpaceToBatch */ public SpaceToBatch spaceToBatch(Operand input, @@ -1928,80 +1926,84 @@ public SpaceToBatch spaceToBatch(Operand input, /** * SpaceToDepth for tensors of type T. + *

      * Rearranges blocks of spatial data, into depth. More specifically, - * this op outputs a copy of the input tensor where values from the {@code height} - * and {@code width} dimensions are moved to the {@code depth} dimension. - * The attr {@code block_size} indicates the input block size. - *

        - *
      • Non-overlapping blocks of size {@code block_size x block size} are rearranged - * into depth at each location.
      • - *
      • The depth of the output tensor is {@code block_size * block_size * input_depth}.
      • - *
      • The Y, X coordinates within each block of the input become the high order - * component of the output channel index.
      • - *
      • The input tensor's height and width must be divisible by block_size.
      • - *
      - *

      The {@code data_format} attr specifies the layout of the input and output tensors + * this op outputs a copy of the input tensor where values from the `height` + * and `width` dimensions are moved to the `depth` dimension. + * The attr `block_size` indicates the input block size. + *

      + * Non-overlapping blocks of size `block_size x block size` are rearranged + * into depth at each location. + * The depth of the output tensor is `block_size * block_size * input_depth`. + * The Y, X coordinates within each block of the input become the high order + * component of the output channel index. + * The input tensor's height and width must be divisible by block_size. + *

      + * The `data_format` attr specifies the layout of the input and output tensors * with the following options: - * "NHWC": {@code [ batch, height, width, channels ]} - * "NCHW": {@code [ batch, channels, height, width ]} - * "NCHW_VECT_C": - * {@code qint8 [ batch, channels / 4, height, width, 4 ]} - *

      It is useful to consider the operation as transforming a 6-D Tensor. + * "NHWC": `[ batch, height, width, channels ]` + * "NCHW": `[ batch, channels, height, width ]` + * "NCHW_VECT_C": + * `qint8 [ batch, channels / 4, height, width, 4 ]` + *

      + * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, - * Each element in the input tensor can be specified via 6 coordinates, - * ordered by decreasing memory layout significance as: - * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates - * within the output image, bX, bY means coordinates - * within the input block, iC means input channels). - * The output would be a transpose to the following layout: - * n,oY,oX,bY,bX,iC - *

      This operation is useful for resizing the activations between convolutions + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + * within the output image, bX, bY means coordinates + * within the input block, iC means input channels). + * The output would be a transpose to the following layout: + * n,oY,oX,bY,bX,iC + *

      + * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - *

      For example, given an input of shape {@code [1, 2, 2, 1]}, data_format = "NHWC" and + *

      + * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and * block_size = 2: - *

      +   *  
      {@code
          *  x = [[[[1], [2]],
          *        [[3], [4]]]]
      -   *  
      - *

      This operation will output a tensor of shape {@code [1, 1, 1, 4]}: - *

      +   *  }
      + * This operation will output a tensor of shape `[1, 1, 1, 4]`: + *
      {@code
          *  [[[[1, 2, 3, 4]]]]
      -   *  
      - *

      Here, the input has a batch of 1 and each batch element has shape {@code [2, 2, 1]}, + * }

      + * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, * the corresponding output will have a single element (i.e. width and height are * both 1) and will have a depth of 4 channels (1 * block_size * block_size). - * The output element shape is {@code [1, 1, 4]}. - *

      For an input tensor with larger depth, here of shape {@code [1, 2, 2, 3]}, e.g. - *

      +   *  The output element shape is `[1, 1, 4]`.
      +   *  

      + * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. + *

      {@code
          *  x = [[[[1, 2, 3], [4, 5, 6]],
          *        [[7, 8, 9], [10, 11, 12]]]]
      -   *  
      - *

      This operation, for block_size of 2, will return the following tensor of shape - * {@code [1, 1, 1, 12]} - *

      +   *  }
      + * This operation, for block_size of 2, will return the following tensor of shape + * `[1, 1, 1, 12]` + *
      {@code
          *  [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
      -   *  
      - *

      Similarly, for the following input of shape {@code [1 4 4 1]}, and a block size of 2: - *

      +   *  }
      + * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: + *
      {@code
          *  x = [[[[1],   [2],  [5],  [6]],
          *        [[3],   [4],  [7],  [8]],
          *        [[9],  [10], [13],  [14]],
          *        [[11], [12], [15],  [16]]]]
      -   *  
      - *

      the operator will return the following tensor of shape {@code [1 2 2 4]}: - *

      +   *  }
      + * the operator will return the following tensor of shape `[1 2 2 4]`: + *
      {@code
          *  x = [[[[1, 2, 3, 4],
          *         [5, 6, 7, 8]],
          *        [[9, 10, 11, 12],
          *         [13, 14, 15, 16]]]]
      -   *  
      + * }
      * - * @param data type for {@code output} output - * @param input the input value + * @param data type for {@code output()} output + * @param input * @param blockSize The size of the spatial block. - * @param options carries optional attribute values - * @param data type for {@code SpaceToDepth} output and operands + * @param options carries optional attributes values * @return a new instance of SpaceToDepth */ public SpaceToDepth spaceToDepth(Operand input, Long blockSize, @@ -2011,17 +2013,18 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo /** * Computes softmax cross entropy cost and gradients to backpropagate. - * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept + *

      + * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

      Inputs are the logits, not probabilities. + *

      + * Inputs are the logits, not probabilities. * - * @param data type for {@code loss} output + * @param data type for {@code loss()} output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. - * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( @@ -2030,23 +2033,24 @@ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxC } /** - * Finds values and indices of the {@code k} largest elements for the last dimension. - * If the input is a vector (rank-1), finds the {@code k} largest entries in the vector - * and outputs their values and indices as vectors. Thus {@code values[j]} is the - * {@code j}-th largest entry in {@code input}, and its index is {@code indices[j]}. - *

      For matrices (resp. higher rank input), computes the top {@code k} entries in each + * Finds values and indices of the `k` largest elements for the last dimension. + *

      + * If the input is a vector (rank-1), finds the `k` largest entries in the vector + * and outputs their values and indices as vectors. Thus `values[j]` is the + * `j`-th largest entry in `input`, and its index is `indices[j]`. + *

      + * For matrices (resp. higher rank input), computes the top `k` entries in each * row (resp. vector along the last dimension). Thus, - *

      -   *  values.shape = indices.shape = input.shape[:-1] + [k]
      -   *  
      - *

      If two elements are equal, the lower-index element appears first. + *

      + * values.shape = indices.shape = input.shape[:-1] + [k] + *

      + * If two elements are equal, the lower-index element appears first. * - * @param data type for {@code values} output - * @param input 1-D or higher with last dimension at least {@code k}. + * @param data type for {@code values()} output + * @param input 1-D or higher with last dimension at least `k`. * @param k 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). - * @param options carries optional attribute values - * @param data type for {@code TopKV2} output and operands + * @param options carries optional attributes values * @return a new instance of TopK */ public TopK topK(Operand input, Operand k, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index d6eed5cbe28..5d3ab3c1100 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -29,68 +29,57 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. + *

      * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output + * + * @param data type for {@code loss()} output */ -@Operator( - group = "nn" -) +@Operator(group = "nn") public final class SoftmaxCrossEntropyWithLogits extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; - - private Output loss; - - private Output backprop; - - private SoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx++); - } - + /** * Factory method to create a class wrapping a new SoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. - * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ - @Endpoint( - describeByClass = true - ) - public static SoftmaxCrossEntropyWithLogits create(Scope scope, - Operand features, Operand labels) { + @Endpoint(describeByClass = true) + public static SoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { OperationBuilder opBuilder = scope.env().opBuilder("SoftmaxCrossEntropyWithLogits", scope.makeOpName("SoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** - * Gets loss. * Per example loss (batch_size vector). - * @return loss. */ public Output loss() { return loss; } - + /** - * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). - * @return backprop. */ public Output backprop() { return backprop; } + + /** The name of this op, as known by TensorFlow core engine */ + public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; + + private Output loss; + private Output backprop; + + private SoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 26498cdce7a..794beab4ded 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -29,71 +29,61 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept + *

      + * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

      Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output + *

      + * Inputs are the logits, not probabilities. + * + * @param data type for {@code loss()} output */ -@Operator( - group = "nn" -) +@Operator(group = "nn") public final class SparseSoftmaxCrossEntropyWithLogits extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; - - private Output loss; - - private Output backprop; - - private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx++); - } - + /** * Factory method to create a class wrapping a new SparseSoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. - * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ - @Endpoint( - describeByClass = true - ) - public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, - Operand features, Operand labels) { + @Endpoint(describeByClass = true) + public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { OperationBuilder opBuilder = scope.env().opBuilder("SparseSoftmaxCrossEntropyWithLogits", scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SparseSoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** - * Gets loss. * Per example loss (batch_size vector). - * @return loss. */ public Output loss() { return loss; } - + /** - * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). - * @return backprop. */ public Output backprop() { return backprop; } + + /** The name of this op, as known by TensorFlow core engine */ + public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; + + private Output loss; + private Output backprop; + + private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx); + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index 432e1b47a3f..b55385839d3 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -3,6 +3,8 @@ import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.op.core.Select; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; @@ -16,17 +18,18 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -// @Operator(group = "nn") +//@Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** - * Computes sigmoid cross entropy given {@code logits}. + * Computes sigmoid cross entropy given logits. * *

      Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - *

      For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in pseudo-code is + *

      For brevity, let x = logits, z = labels. The logistic loss in + * pseudo-code is * *

          * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
      @@ -37,7 +40,7 @@ public class SigmoidCrossEntropyWithLogits {
          *  = x - x * z + log(1 + exp(-x))
          * 
      * - *

      For {@code x < 0}, to avoid overflow in {@code exp(-x)}, we reformulate the above + *

      For x < 0, to avoid overflow in exp(-x), we reformulate the above * *

          * x - x * z + log(1 + exp(-x))
      @@ -52,7 +55,7 @@ public class SigmoidCrossEntropyWithLogits {
          *   max(x, 0) - x * z + log(1 + exp(-abs(x)))
          * 
      * - *

      {@code logits} and {@code labels} must have the same type and shape. + *

      * @@ -63,7 +66,7 @@ public class SigmoidCrossEntropyWithLogits { * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape */ - // @Endpoint(name = "sigmoidCrossEntropyWithLogits") + //@Endpoint(name = "sigmoidCrossEntropyWithLogits") public static Operand sigmoidCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { if (!isCompatible(labels.shape(), logits.shape())) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 553adf90aad..64faa7c5d70 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -14,11 +14,7 @@ import org.tensorflow.types.TBfloat16; import org.tensorflow.types.TFloat16; import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.family.TFloating; -import org.tensorflow.types.family.TIntegral; import org.tensorflow.types.family.TNumber; import java.util.ArrayList; @@ -29,7 +25,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { /** - * Computes sparse softmax cross entropy between {@code logits} and {@code labels}. + * Computes sparse softmax cross entropy between logits and labels. * *

      Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is @@ -38,61 +34,56 @@ public class SparseSoftmaxCrossEntropyWithLogits { *

      NOTE: * *

      For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the {@code labels} vector must provide a single specific index for - * the true class for each row of {@code logits} (each minibatch entry). For soft softmax - * classification with a probability distribution for each entry, {@link + * classes are not allowed, and the labels vector must provide a single specific + * index for the true class for each row of logits (each minibatch entry). For soft + * softmax classification with a probability distribution for each entry, {@link * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. * *

      WARNING: * - *

      This op expects unscaled logits, since it performs a {@code softmax} on {@code logits } - * internally for efficiency. Do not call this op with the output of {@code softmax}, as it will - * produce incorrect results. + *

      This op expects unscaled logits, since it performs a softmax on logits + * internally for efficiency. Do not call this op with the output of softmax, + * as it will produce incorrect results. * - *

      A common use case is to have logits of shape {@code [batchSize, numClasses]} and have labels - * of shape {@code [batchSize]}, but higher dimensions are supported, in which case the {@code - * dim}-th dimension is assumed to be of size {@code numClasses}. {@code logits} must have the - * {@code dataType} of {@code TFloat16}, {@code TFloat32} , or {@code TFloat64}, and {@code - * labels} must have the dtype of {@code TInt32} or {@code TInt64}. + *

      A common use case is to have logits of shape [batchSize, numClasses] and have + * labels of shape [batchSize], but higher dimensions are supported, in which case + * the dim-th dimension is assumed to be of size numClasses. + * logits must have the dataType of TFloat16, TFloat32 + * , or TFloat64, and labels must have the dtype of TInt32 + * or TInt64. * * @param scope current scope - * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r } is - * rank of {@code labels} and result) and the dataType is {@code TInt32} or {@code TInt64}. - * Each entry in {@code labels} must be an index in {@code [0, numClasses)}. Other values will - * raise an exception when this op is run on CPU, and return {@code NaN} for corresponding - * loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., - * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code - * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. - * @param the data type for the labels - * @param the data tyoe for the loss and logits. - * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if - * the rank of the labels is not equal to the rank of the logits minus one. + * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r + * is rank of labels and result) and the dataType is TInt32 + * or TInt64. Each entry in labels must be an index in [0, + * numClasses). Other values will raise an exception when this op is run on CPU, and + * return NaN for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., + * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, + * or TFloat64. These activation energies are interpreted as unnormalized log + * probabilities. + * @param the data type for the labels + * @param the data tyoe for the loss and logits. + * @return A Tensor of the same shape as labels and of the same type as + * logits with the softmax cross entropy loss. + * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * of the labels is not equal to the rank of the logits minus one. */ @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") public static - Operand sparseSoftmaxCrossEntropyWithLogits( - Scope scope, Operand labels, Operand logits) { + Operand sparseSoftmaxCrossEntropyWithLogits( + Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); - Operand preciseLogits; + Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { preciseLogits = Cast.create(scope, logits, TFloat32.class); - } else if (TFloating.class.isAssignableFrom(logits.type())) { - preciseLogits = (Operand) logits; } else { - preciseLogits = Cast.create(scope, logits, TFloat64.class); + preciseLogits = logits; } - Operand iLabels; - if (TIntegral.class.isAssignableFrom(labels.type())) { - iLabels = (Operand) labels; - } else { - iLabels = Cast.create(scope, labels, TInt64.class); - } - Shape labelsStaticShape = iLabels.shape(); + Shape labelsStaticShape = labels.shape(); org.tensorflow.op.core.Shape labelsShape = - org.tensorflow.op.core.Shape.create(scope, iLabels); + org.tensorflow.op.core.Shape.create(scope, labels); Shape logitsShape = logits.shape(); Shape logitsShortened = logitsShape.take(logitsShape.numDimensions() - 1); @@ -123,13 +114,13 @@ Operand sparseSoftmaxCrossEntropyWithLogits( if (logitsShape.numDimensions() == 2) { org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( - scope, preciseLogits, iLabels); + scope, preciseLogits, labels); Operand cost = smax.loss(); if (cost.type() != logits.type()) { return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } @@ -141,7 +132,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( scope, Equal.create( scope, - org.tensorflow.op.core.Shape.create(scope, iLabels), + org.tensorflow.op.core.Shape.create(scope, labels), Shapes.take( scope, org.tensorflow.op.core.Shape.create(scope, logits), @@ -158,19 +149,19 @@ Operand sparseSoftmaxCrossEntropyWithLogits( long numClassses = logitsShape.size(-1); preciseLogits = Reshape.create(scope, preciseLogits, Constant.arrayOf(scope, -1L, numClassses)); - iLabels = Reshape.create(scope, iLabels, Constant.scalarOf(scope, -1)); + labels = Reshape.create(scope, labels, Constant.scalarOf(scope, -1)); scope.withControlDependencies(shapeChecks); // call raw op org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits smax = org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits.create( - scope, preciseLogits, iLabels); + scope, preciseLogits, labels); Operand cost = smax.loss(); cost = Reshape.create(scope, cost, labelsShape); if (cost.type() != logits.type()) { return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } } From 4468be23a567ed77a9509c915e0236b9d0ed34ad Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Fri, 26 Mar 2021 18:02:55 -0400 Subject: [PATCH 54/60] Added FrameworkOps analogous to Ops. Added NnOps and SetOps as groups. Fixed MetricsHelper and Losses to use the bew FrameworkOps. Moved SetsOps to framework.op. --- .../tensorflow/framework/losses/Losses.java | 3 - .../SparseSoftmaxCrossEntropyWithLogits.java | 3 +- .../framework/metrics/impl/SetOpsTest.java | 126 ++++++++++++++++++ 3 files changed, 127 insertions(+), 5 deletions(-) create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 6700f2569f0..bcabd968d3f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -27,7 +27,6 @@ import org.tensorflow.op.math.Softplus; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; -import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; import static org.tensorflow.framework.utils.CastHelper.cast; @@ -338,7 +337,6 @@ public static Operand categoricalHinge( */ public static Operand cosineSimilarity( Ops tf, Operand labels, Operand predictions, int[] axis) { - FrameworkOps fops = FrameworkOps.create(tf); Operand tLabels = cast(tf, labels, predictions.type()); LossTuple lossTuple = LossesHelper.squeezeOrExpandDimensions(tf, tLabels, predictions, null); predictions = lossTuple.getTarget(); @@ -521,7 +519,6 @@ public static Operand sparseCategoricalCrossentropy( boolean fromLogits, int axis) { Class predictionType = predictions.type(); - FrameworkOps fop = FrameworkOps.create(tf); Operand epsilonConst = cast(tf, tf.constant(EPSILON), predictionType); Operand one = cast(tf, tf.constant(1), predictionType); Operand oneMinusEpsilonConst = tf.math.sub(one, epsilonConst); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 64faa7c5d70..75766cf9bfb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -64,8 +64,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { * probabilities. * @param the data type for the labels * @param the data tyoe for the loss and logits. - * @return A Tensor of the same shape as labels and of the same type as - * logits with the softmax cross entropy loss. + * @return the loss * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank * of the labels is not equal to the rank of the logits minus one. */ diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java new file mode 100644 index 00000000000..e10f016bd94 --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java @@ -0,0 +1,126 @@ +package org.tensorflow.framework.metrics.impl; + +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.op.FrameworkOps; +import org.tensorflow.framework.op.SetsOps; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.TUint8; +import org.tensorflow.types.family.TType; + +import java.util.Arrays; +import java.util.List; + +import static org.tensorflow.framework.utils.CastHelper.cast; + +class SetOpsTest { + + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + List> types = Arrays.asList(TInt32.class, TInt64.class, TUint8.class); + + @Test + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSetIntersectionMultirow2() { + + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); + Operand b = tf.constant(new int[][] {{1, 9}, {1, 5}}); + int[][] expected = new int[][] {{1, 9}, {0, 0}}; + Shape expectedShape = Shape.of(2, 2); + for (Class type : types) { + Operand aa = cast(tf, a, type); + Operand bb = cast(tf, b, type); + Operand intersection = fops.sets.intersection(aa, bb); + session.evaluate(cast(tf, tf.constant(expected), type), intersection); + session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); + } + } + } + + @Test + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSetIntersectionDuplicates2d() { + + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand a = tf.constant(new int[][] {{1, 1, 3}}); + Operand b = tf.constant(new int[][] {{1, 1}}); + int[][] expected = {{1}}; + Shape expectedShape = Shape.of(1, 1); + for (Class type : types) { + Operand aa = cast(tf, a, type); + Operand bb = cast(tf, b, type); + Operand intersection = fops.sets.intersection(aa, bb); + + session.evaluate(cast(tf, tf.constant(expected), type), intersection); + + session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); + } + } + } + + @Test + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testDenseSetDifferenceMultirow2d() { + + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand a = tf.constant(new int[][] {{1, 5, 9}, {4, 5, 3}}); + Operand b = tf.constant(new int[][] {{1, 2, 6}, {1, 2, 2}}); + + for (Class type : types) { + Operand aa = cast(tf, a, type); + Operand bb = cast(tf, b, type); + int[][] expected = {{5, 9, 0}, {3, 4, 5}}; + // a- b + Shape expectedShape = Shape.of(2, 3); + Operand intersection = fops.sets.difference(aa, bb); + session.evaluate(cast(tf, tf.constant(expected), type), intersection); + session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); + + // b - a + expected = new int[][] {{2, 6}, {1, 2}}; + expectedShape = Shape.of(2, 2); + intersection = fops.sets.difference(aa, bb, false); + + session.evaluate(cast(tf, tf.constant(expected), type), intersection); + session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); + } + } + } + + @Test + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testDenseUnionMultirow2d() { + + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + FrameworkOps fops = FrameworkOps.create(tf); + Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); + Operand b = tf.constant(new int[][] {{1, 9}, {1, 2}}); + int[][] expected = new int[][] {{5, 0}, {3, 4}}; + for (Class type : types) { + Operand aa = cast(tf, a, type); + Operand bb = cast(tf, b, type); + Shape expectedShape = Shape.of(2, 2); + // a- b + Operand intersection = fops.sets.difference(aa, bb); + session.evaluate(cast(tf, tf.constant(expected), type), intersection); + session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); + } + } + } +} From eb64cd0776079c6215b2066f4cc3c1ec18213956 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 15:36:41 -0400 Subject: [PATCH 55/60] Move l2Normalize to MathOps --- .../main/java/org/tensorflow/framework/losses/Losses.java | 1 + .../main/java/org/tensorflow/framework/op/FrameworkOps.java | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index bcabd968d3f..82ba43bd317 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -337,6 +337,7 @@ public static Operand categoricalHinge( */ public static Operand cosineSimilarity( Ops tf, Operand labels, Operand predictions, int[] axis) { + FrameworkOps fops = FrameworkOps.create(tf); Operand tLabels = cast(tf, labels, predictions.type()); LossTuple lossTuple = LossesHelper.squeezeOrExpandDimensions(tf, tLabels, predictions, null); predictions = lossTuple.getTarget(); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index d9e3eec4b21..c0177a885eb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -118,9 +118,6 @@ public final Ops coreOps() { * Returns an API that builds operations with the provided name prefix. * *

      @link Scope#withSubScope(String)} - * - * @param childScopeName the name of the child scope - * @return the FrameworkOps */ public FrameworkOps withSubScope(String childScopeName) { return new FrameworkOps(scope.withSubScope(childScopeName)); @@ -130,9 +127,6 @@ public FrameworkOps withSubScope(String childScopeName) { * Returns an API that uses the provided name for an op. * *

      {@link Scope#withName(String)} - * - * @param opName the name of the scope - * @return the FrameworkOps */ public FrameworkOps withName(String opName) { return new FrameworkOps(scope.withName(opName)); From 134a11d885376aea2f758d66da3451852516474a Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 27 Mar 2021 18:50:26 -0400 Subject: [PATCH 56/60] Reformat code, fix javadocs --- .../op/nn/SigmoidCrossEntropyWithLogits.java | 14 +++-- .../SparseSoftmaxCrossEntropyWithLogits.java | 52 +++++++++---------- 2 files changed, 32 insertions(+), 34 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index b55385839d3..fc3f7739363 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -3,8 +3,6 @@ import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.Operator; import org.tensorflow.op.core.Select; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.op.dtypes.Cast; @@ -18,17 +16,17 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -//@Operator(group = "nn") +// @Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** - * Computes sigmoid cross entropy given logits. + * Computes sigmoid cross entropy given {@code logits}. * *

      Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - *

      For brevity, let x = logits, z = labels. The logistic loss in + *

      For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in * pseudo-code is * *

      @@ -40,7 +38,7 @@ public class SigmoidCrossEntropyWithLogits {
          *  = x - x * z + log(1 + exp(-x))
          * 
      * - *

      For x < 0, to avoid overflow in exp(-x), we reformulate the above + *

      For {@code x < 0}, to avoid overflow in {@code exp(-x)}, we reformulate the above * *

          * x - x * z + log(1 + exp(-x))
      @@ -55,7 +53,7 @@ public class SigmoidCrossEntropyWithLogits {
          *   max(x, 0) - x * z + log(1 + exp(-abs(x)))
          * 
      * - *

      logits and labels must have the same type and shape. * *

      logits and labels must have the same type and shape. + *

      {@code logits} and {@code labels} must have the same type and shape. * *

      * @@ -66,7 +64,7 @@ public class SigmoidCrossEntropyWithLogits { * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape */ - //@Endpoint(name = "sigmoidCrossEntropyWithLogits") + // @Endpoint(name = "sigmoidCrossEntropyWithLogits") public static Operand sigmoidCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { if (!isCompatible(labels.shape(), logits.shape())) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 75766cf9bfb..0b2d29d6092 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -25,7 +25,7 @@ public class SparseSoftmaxCrossEntropyWithLogits { /** - * Computes sparse softmax cross entropy between logits and labels. + * Computes sparse softmax cross entropy between {@code logits} and {@code labels}. * *

      Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is @@ -34,45 +34,45 @@ public class SparseSoftmaxCrossEntropyWithLogits { *

      NOTE: * *

      For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For soft + * classes are not allowed, and the {@code labels} vector must provide a single specific + * index for the true class for each row of {@code logits} (each minibatch entry). For soft * softmax classification with a probability distribution for each entry, {@link * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. * *

      WARNING: * - *

      This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, + *

      This op expects unscaled logits, since it performs a {@code softmax} on {@code logits + * } internally for efficiency. Do not call this op with the output of {@code softmax}, * as it will produce incorrect results. * - *

      A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. + *

      A common use case is to have logits of shape {@code [batchSize, numClasses]} and have + * labels of shape {@code [batchSize]}, but higher dimensions are supported, in which case + * the {@code dim}-th dimension is assumed to be of size {@code numClasses}. {@code + * logits} must have the {@code dataType} of {@code TFloat16}, {@code TFloat32} + * , or {@code TFloat64}, and {@code labels} must have the dtype of {@code TInt32} + * or {@code TInt64}. * * @param scope current scope - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r + * } is rank of {@code labels} and result) and the dataType is {@code TInt32} + * or {@code TInt64}. Each entry in {@code labels} must be an index in {@code [0, + * numClasses)}. Other values will raise an exception when this op is run on CPU, and + * return {@code NaN} for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, + * or {@code TFloat64}. These activation energies are interpreted as unnormalized log * probabilities. - * @param the data type for the labels - * @param the data tyoe for the loss and logits. + * @param the data type for the labels + * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank * of the labels is not equal to the rank of the logits minus one. */ @SuppressWarnings("unchecked") @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") public static - Operand sparseSoftmaxCrossEntropyWithLogits( - Scope scope, Operand labels, Operand logits) { + Operand sparseSoftmaxCrossEntropyWithLogits( + Scope scope, Operand labels, Operand logits) { scope = scope.withSubScope("SparseSoftmaxCrossEntropyWithLogits"); Operand preciseLogits; if (logits.asOutput().type() == TFloat16.class || logits.asOutput().type() == TBfloat16.class) { @@ -119,7 +119,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } @@ -160,7 +160,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( return Cast.create(scope, cost, logits.type()); } else { // Unchecked cast already checked with previous if - return (Operand) cost; + return (Operand) cost; } } } From 1f9626c5ac94224546014626e39d1ee30d8533d8 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sun, 2 May 2021 19:13:00 -0400 Subject: [PATCH 57/60] Update FrameworkOps.java --- .../src/main/java/org/tensorflow/framework/op/FrameworkOps.java | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index c0177a885eb..6c8ed05ad66 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -62,6 +62,7 @@ private FrameworkOps(Ops coreOps) { sets = new SetOps(this); math = new MathOps(this); linalg = new LinalgOps(this); + } /** From 7860a719dd7b4fee1b61f5abba886d64e0b20582 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Mon, 3 May 2021 10:05:28 -0400 Subject: [PATCH 58/60] Fix unusual regression error in confustion matrix. Needed to reduceAll on the AssertThats. This change is unrelated to this PR, but the bug showed up here. --- .../org/tensorflow/framework/op/MathOps.java | 300 +++++++++--------- 1 file changed, 151 insertions(+), 149 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 4c2210feb9c..8fda58806ca 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -56,11 +56,13 @@ import org.tensorflow.op.math.Square; import org.tensorflow.op.math.Sub; import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TBool; import org.tensorflow.types.TFloat16; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; import java.util.ArrayList; import java.util.Arrays; @@ -110,27 +112,27 @@ public Operand l2Normalize(Operand x, int[] axis) { * Computes the confusion matrix from predictions and labels. * *

      The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid * labels for a given classification task. Both prediction and labels must be 1-D arrays of the * same shape in order for this function to work. * - *

      If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in * either predictions or labels. Class labels are expected to start at 0. For example, if - * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

      If `weights` is not `None`, then each prediction contributes its corresponding weight to the + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to the * total value of the confusion matrix cell. * *

      For example: * - *

      +   * 
      {@code
          *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
          *         [[0 0 0 0 0]
          *          [0 0 1 0 0]
          *          [0 0 1 0 0]
          *          [0 0 0 0 0]
          *          [0 0 0 0 1]]
      -   * 
      + * }
      * *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 * confusion matrix. @@ -152,27 +154,27 @@ public Operand confusionMatrix(Operand labels, Operand * Computes the confusion matrix from predictions and labels. * *

      The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid * labels for a given classification task. Both prediction and labels must be 1-D arrays of the * same shape in order for this function to work. * - *

      If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in * either predictions or labels. Class labels are expected to start at 0. For example, if - * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

      If `weights` is not `None`, then each prediction contributes its corresponding weight to the + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to the * total value of the confusion matrix cell. * *

      For example: * - *

      +   * 
      {@code
          *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
          *         [[0 0 0 0 0]
          *          [0 0 1 0 0]
          *          [0 0 1 0 0]
          *          [0 0 0 0 0]
          *          [0 0 0 0 1]]
      -   * 
      + * }
      * *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 * confusion matrix. @@ -196,27 +198,27 @@ public Operand confusionMatrix( * Computes the confusion matrix from predictions and labels. * *

      The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid * labels for a given classification task. Both prediction and labels must be 1-D arrays of the * same shape in order for this function to work. * - *

      If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in * either predictions or labels. Class labels are expected to start at 0. For example, if - * `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

      If `weights` is not `None`, then each prediction contributes its corresponding weight to the + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to the * total value of the confusion matrix cell. * *

      For example: * - *

      +   * 
      {@code
          *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
          *         [[0 0 0 0 0]
          *          [0 0 1 0 0]
          *          [0 0 1 0 0]
          *          [0 0 0 0 0]
          *          [0 0 0 0 1]]
      -   * 
      + * }
      * *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 * confusion matrix. @@ -277,19 +279,21 @@ public Operand confusionMatrix( one); } else { lNumClasses = Cast.create(lScope, numClasses, TInt64.class); + Operand less = Less.create(lScope, lLabels, lNumClasses); AssertThat labelsLess = AssertThat.create( lScope, - Less.create(lScope, lLabels, lNumClasses), + ReduceAll.create(scope, less, allAxes(less), ReduceAll.keepDims(false)), Collections.singletonList(Constant.scalarOf(lScope, "labels out of bounds"))); lLabels = Identity.create( lScope.withControlDependencies(Collections.singletonList(labelsLess)), lLabels); + less = Less.create(lScope, lPredictions, lNumClasses); AssertThat predictionsLess = AssertThat.create( lScope, - Less.create(lScope, lPredictions, lNumClasses), + ReduceAll.create(scope, less, allAxes(less), ReduceAll.keepDims(false)), Collections.singletonList(Constant.scalarOf(lScope, "predictions out of bounds"))); lPredictions = Identity.create( @@ -319,12 +323,12 @@ public Operand confusionMatrix( /** * Squeeze last dim if ranks differ from expected by exactly 1. * - * @param labels Label values, a Operand whose dimensions match predictions - * . - * @param predictions Predicted values, a Tensor of arbitrary dimensions. - * @param expectedRankDiff Expected result of rank(predictions) - rank(labels). + * @param labels Label values, a {@code Operand} whose dimensions match {@code predictions + * }. + * @param predictions Predicted values, a {@code Tensor} of arbitrary dimensions. + * @param expectedRankDiff Expected result of {@code rank(predictions) - rank(labels)}. * @param the data type for the labels, predictions and result - * @return labels and predictions, possibly with last dim squeezed. + * @return {@code labels} and {@code predictions}, possibly with last dim squeezed. */ public LossTuple removeSqueezableDimensions( Operand labels, Operand predictions, int expectedRankDiff) { @@ -372,10 +376,9 @@ public LossTuple removeSqueezableDimensions( * Creates an Operand that has all axes contained in the Operand's shape. * * @param op the Operand - * @param THe Data type for the Operand * @return an Operand that has all axes contained in the Operand's shape.. */ - public Operand allAxes(Operand op) { + public Operand allAxes(Operand op) { int rank = op.shape().numDimensions(); if (rank != Shape.UNKNOWN_SIZE) { int[] axes = new int[rank]; @@ -392,18 +395,18 @@ public Operand allAxes(Operand op) { /** * Transpose and reshape the input for contraction op. * - *

      This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul` using - * `array_ops.transpose` and `array_ops.reshape`. The method takes a tensor and performs the + *

      This method is helpful in reducing {@code math.tensordot} to {@code math_ops.matmul} using + * {@code array_ops.transpose} and {@code array_ops.reshape}. The method takes a tensor and performs the * correct transpose and reshape operation for a given set of indices. It returns the reshaped * tensor as well as a list of indices necessary to reshape the tensor again after matrix * multiplication. * * @param the type of Operand * @param a the Tensor - * @param axis unique indices specifying valid axes of `a`. + * @param axis unique indices specifying valid axes of {@code a}. * @param flipped whether to flip the dimensions or not * @return A tuple (reshapedA, freeDims, freeDimsStatic) where reshapedA is a reshaped to allow - * contraction via matmul, freeDims` is a TInt32 Operand, depending on whether the shape of a + * contraction via matmul, freeDims is a TInt32 Operand, depending on whether the shape of a * is fully specified, and freeDimsStatic is either a list of integers and null values, or * None, representing the inferred shape of the free dimensions */ @@ -703,50 +706,48 @@ private Operand[] tensordotAxes(Operand a, Operan * Tensor contraction of a and b along specified axes and outer product. *

      * Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

      - * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

      - * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

      - * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and {@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

      - * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

      * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: - *

      * cjklm = Σi aijk * blmi . *

      - * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). - *

      + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. * - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param axis sum over the last N axes of a and the - * first N axes of b in order. If `axes=0`, computes the outer - * product between `a` and `b`. + * first N axes of b in order. If {@code axis=0}, computes the outer + * product between {@code a} and {@code b}. * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @Endpoint(name = "tensordot") @@ -762,53 +763,53 @@ public Operand tensordot(Operand a, Operand b, in * Tensor contraction of a and b along specified axes and outer product. *

      * Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

      - * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

      - * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

      - * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and {@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

      - * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

      * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: *

      * cjklm = Σi aijk * blmi . *

      - * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. *

      * - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param axes If axes is a scalar, sum over the last N axes of a and the * first N axes of b in order. If axes is a list, the first and second row * contain the set of unique integers specifying axes along which the - * contraction is computed, for `a` and `b`, respectively. The number of - * axes for `a` and `b` must be equal. If `axes=0`, computes the outer - * product between `a` and `b`. + * contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. If {@code axis=0}, computes the outer + * product between {@code a} and {@code b}. * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @Endpoint(name = "tensordot") @@ -826,51 +827,51 @@ public Operand tensordot( * Tensor contraction of a and b along specified axes and outer product. *

      * Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

      - * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

      - * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

      - * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and{@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

      - * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

      * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: *

      * cjklm = Σi aijk * blmi . *

      - * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. *

      * - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param axes the first and second row * contain the set of unique integers specifying axes along which the - * contraction is computed, for `a` and `b`, respectively. The number of - * axes for `a` and `b` must be equal. I + * contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. I * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @Endpoint(name = "tensordot") @@ -887,51 +888,51 @@ public Operand tensordot(Operand a, Operand b, in * Tensor contraction of a and b along specified axes and outer product. *

      * Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

      - * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

      - * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

      - * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and{@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

      - * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

      * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: *

      * cjklm = Σi aijk * blmi . *

      - * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. *

      * - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param axes the first and second row * contain the set of unique integers specifying axes along which the - * contraction is computed, for `a` and `b`, respectively. The number of - * axes for `a` and `b` must be equal. I + * contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. I * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @Endpoint(name = "tensordot") @@ -948,49 +949,49 @@ public Operand tensordot(Operand a, Operand b, in * Tensor contraction of a and b along specified axes and outer product. *

      * Tensordot (also known as tensor contraction) sums the product of elements - * from a and b` over the indices specified by - * a_axes and b_axes. The lists - * a_axes and b_axes specify those pairs of axes - * along which to contract the tensors. The axis a_axes[i] of - * a must have the same dimension as axis - * b_axes[i] of b for all i in - * range(0, len(a_axes)). The lists - * a_axes and b_axes must have identical length + * from {@code a} and {@code b} over the indices specified by + * {@code a_axes} and {@code b_axes}. The lists + * {@code a_axes} and {@code b_axes} specify those pairs of axes + * along which to contract the tensors. The axis {@code a_axes[i]} of + * {@code a} must have the same dimension as axis + * {@code b_axes[i]} of {@code b} for all {@code i} in + * {@code range(0, len(a_axes))}. The lists + * {@code a_axes} and {@code b_axes} must have identical length * and consist of unique integers that specify valid axes for each of the * tensors. Additionally outer product is supported by passing - * axes=0. + * {@code axes=0}. *

      - * This operation corresponds to numpy.tensordot(a, b, axes). + * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. *

      - * Example 1: When a and b are matrices (order 2), - * the case axes = 1 is equivalent to matrix multiplication. + * Example 1: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes = 1} is equivalent to matrix multiplication. *

      - * Example 2: When a and`b are matrices (order 2), + * Example 2: When {@code a} and{@code b} are matrices (order 2), * the case - * axes = [[1], [0]] is equivalent to matrix multiplication. + * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. *

      - * Example 3: When a and b are matrices (order 2), - * the case axes=0 gives the outer product, a tensor of order + * Example 3: When {@code a} and {@code b} are matrices (order 2), + * the case {@code axes=0} gives the outer product, a tensor of order * 4. *

      * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor + * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor * cjklm whose entry corresponding to the indices * (j,k,l,m) is given by: *

      * cjklm = Σi aijk * blmi . *

      - * In general, order(c) = order(a) + order(b) - 2*len(axes[0]). + * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. *

      * - * @param a `Operand` of type `float32` or `float64`. - * @param b `Operand` with the same type as `a`. + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. * @param aAxis axes for the a Operand * @param bAxis axes for the b Operand * @param the datatype of the Operands, must be either TFloat32 or * TFloat64 - * @return A `Operand` with the same type as `a`. + * @return A {@code Operand} with the same type as {@code a}. * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type */ @SuppressWarnings({"unchecked", "unused"}) @@ -1042,7 +1043,7 @@ public Operand tensordot( * Computes log(sum(exp(elements across dimensions of a tensor))). Reduces {@code input_tensor} * along the dimensions given in {@code axes}. * - *

      Reduces `{@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} + *

      Reduces {@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} * is true, the rank of the tensor is reduced by 1 for each of the entries in {@code axes}, which * must be unique. If {@code keepdims} is true, the reduced dimensions are retained with length 1. * If {@code axes} has no entries, all dimensions are reduced, and a tensor with a single element @@ -1052,8 +1053,9 @@ public Operand tensordot( * * @param input The tensor to reduce. * @param axes The dimensions to reduce. If null, reduces all dimensions. Must be in the range - * {@link [-rank(input_tensor), rank(input_tensor)]}. + * {@code [-rank(input_tensor), rank(input_tensor)]}. * @param keepDims If true, retains reduced dimensions with length 1. + * @param the data type for the input and the result * @return The reduced tensor. */ @Endpoint(name = "reduceLogSumExp") From d967a99110f18a90d60ee197304fe5b668f9a9a2 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Mon, 3 May 2021 10:05:53 -0400 Subject: [PATCH 59/60] javadoc fixes --- .../tensorflow/framework/op/LinalgOps.java | 94 +++++++++---------- 1 file changed, 46 insertions(+), 48 deletions(-) diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java index eb069a2db22..931f7f851c2 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java @@ -45,15 +45,15 @@ public class LinalgOps { } /** - * Multiplies matrix a by matrix b, producing a * b - * . + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b + * }. * - *

      The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions * specify matching batch size. * - *

      Both matrices must be of the same type. The supported types are: TFloat16, - * TFloat32, TFloat64, TInt32. + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, + * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. * *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. @@ -80,15 +80,15 @@ public class LinalgOps { * *

      Note: This is matrix product, not element-wise product. * - * @param a an Operand of of type TFloat16, TFloat32, TFloat64 - * , TInt32. with a rank > 1 - * @param b an Operand with same type and rank as a. + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 + * }, {@code TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. * @param the data type of the Operands - * @return A Operand of the same type as a and b where each inner-most - * matrix is the product of the corresponding matrices in a and b. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most + * matrix is the product of the corresponding matrices in {@code a} and {@code b}. * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If transposeA and adjointA - * , or transposeB and adjointB are both set to `true`. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} + * , or {@code transposeB} and {@code adjointB} are both set to `true`. */ @Endpoint(name = "matmul") public Operand matmul(Operand a, Operand b) { @@ -96,21 +96,19 @@ public Operand matmul(Operand a, Operand b) { } /** - * Multiplies matrix a by matrix b, producing a * b - * . + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b + * }. * - *

      The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions * specify matching batch size. * - *

      Both matrices must be of the same type. The supported types are: TFloat16, - * TFloat32, TFloat64, TInt32. + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, + * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. * *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. * - *

      - * *

      Note: This is matrix product, not element-wise product. * *

      A simple 2-D tensor matrix multiplication: @@ -133,17 +131,17 @@ public Operand matmul(Operand a, Operand b) { * * }

      * - * @param a an Operand of of type TFloat16, TFloat32, TFloat64 - * , TInt32. with a rank > 1 - * @param b an Operand with same type and rank as a. - * @param transposeA If `true`, a is transposed before multiplication. - * @param transposeB If `True`, b is transposed before multiplication + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 + * }, {@code TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. + * @param transposeA If true, {@code a} is transposed before multiplication. + * @param transposeB If true, {@code b} is transposed before multiplication * @param the data type of the Operands - * @return A Operand of the same type as a and b where each inner-most - * matrix is the product of the corresponding matrices in a and b. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most + * matrix is the product of the corresponding matrices in {@code a} and {@code b}. * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If transposeA and adjointA - * , or transposeB and adjointB are both set to `true`. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} + * , or {@code transposeB} and {@code adjointB} are both set to `true`. */ @Endpoint(name = "matmul") public Operand matmul( @@ -152,15 +150,15 @@ public Operand matmul( } /** - * Multiplies matrix a by matrix b, producing a * b - * . + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b + * }. * - *

      The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions * specify matching batch size. * - *

      Both matrices must be of the same type. The supported types are: TFloat16, - * TFloat32, TFloat64, TInt32. + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, + * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. * *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. @@ -187,25 +185,25 @@ public Operand matmul( * * }

      * - * @param a an Operand of of type TFloat16, TFloat32, TFloat64 - * , TInt32. with a rank > 1 - * @param b an Operand with same type and rank as a. - * @param transposeA If true, a is transposed before multiplication. - * @param transposeB If True, b is transposed before multiplication - * @param adjointA If true, a is conjugated and transposed before multiplication. - * @param adjointB If true, b is conjugated and transposed before multiplication. - * @param aIsSparse If true, a is treated as a sparse matrix. Notice, this does + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 + * }, {@code TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. + * @param transposeA If true, {@code a} is transposed before multiplication. + * @param transposeB If True, {@code b} is transposed before multiplication + * @param adjointA If true, {@code a} is conjugated and transposed before multiplication. + * @param adjointB If true, {@code b} is conjugated and transposed before multiplication. + * @param aIsSparse If true, {@code a} is treated as a sparse matrix. Notice, this does * not support {@link SparseTensor}, it just makes optimizations that assume most values - * in a are zero. - * @param bIsSparse If true, b is treated as a sparse matrix. Notice, this does + * in {@code a} are zero. + * @param bIsSparse If true, {@code b} is treated as a sparse matrix. Notice, this does * not support {@link SparseTensor}, it just makes optimizations that assume most values - * in b are zero. + * in {@code b} are zero. * @param the data type of the Operands - * @return A Operand of the same type as a and b where each inner-most - * matrix is the product of the corresponding matrices in a and b. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most + * matrix is the product of the corresponding matrices in {@code a} and {@code b}. * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If transposeA and adjointA - * , or transposeB and adjointB are both set to `true`. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} + * , or {@code transposeB} and {@code adjointB} are both set to `true`. */ @SuppressWarnings("unchecked") @Endpoint(name = "matmul") From e84981fdd71c6003d6cdbf3de69689d5f301f832 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Thu, 17 Jun 2021 09:50:31 -0400 Subject: [PATCH 60/60] Rebase with latest master Move the functions to seperate classes. --- .../annotations/org/tensorflow/op/NnOps.java | 1542 +++++++++-------- .../annotations/org/tensorflow/op/Ops.java | 6 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 59 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 64 +- .../src/gen/resources/ops.pb | Bin 1462288 -> 1480972 bytes .../tensorflow/framework/losses/Losses.java | 13 +- .../framework/metrics/impl/MetricsHelper.java | 25 +- .../tensorflow/framework/op/FrameworkOps.java | 22 +- .../tensorflow/framework/op/LinalgOps.java | 200 +-- .../org/tensorflow/framework/op/MathOps.java | 1138 +++--------- .../org/tensorflow/framework/op/NnOps.java | 3 +- .../org/tensorflow/framework/op/SetOps.java | 74 +- .../framework/op/linalg/MatMul.java | 289 +++ .../tensorflow/framework/op/math/Axes.java | 49 + .../framework/op/math/ConfusionMatrix.java | 318 ++++ .../framework/op/math/L2Normalize.java | 54 + .../framework/op/math/ReduceLogSumExp.java | 171 ++ .../framework/op/math/TensorDot.java | 663 +++++++ .../op/nn/SigmoidCrossEntropyWithLogits.java | 6 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 6 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 52 +- .../tensorflow/framework/op/sets/Sets.java | 148 ++ .../framework/metrics/impl/SetOpsTest.java | 126 -- .../tensorflow/framework/op/MathOpsTest.java | 6 +- .../tensorflow/framework/op/SetOpsTest.java | 9 +- .../optimizers/GradientDescentTest.java | 2 +- 26 files changed, 2882 insertions(+), 2163 deletions(-) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/linalg/MatMul.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/Axes.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/ConfusionMatrix.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/L2Normalize.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/ReduceLogSumExp.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/TensorDot.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/sets/Sets.java delete mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 0269d387859..cc4f53394d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -113,16 +113,16 @@ public final class NnOps { /** * Performs average pooling on the input. - *

      - * Each entry in `output` is the mean of the corresponding size `ksize` - * window in `value`. - * - * @param data type for {@code output()} output - * @param value 4-D with shape `[batch, height, width, channels]`. - * @param ksize The size of the sliding window for each dimension of `value`. - * @param strides The stride of the sliding window for each dimension of `value`. + * Each entry in {@code output} is the mean of the corresponding size {@code ksize} + * window in {@code value}. + * + * @param data type for {@code output} output + * @param value 4-D with shape {@code [batch, height, width, channels]}. + * @param ksize The size of the sliding window for each dimension of {@code value}. + * @param strides The stride of the sliding window for each dimension of {@code value}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code AvgPool} output and operands * @return a new instance of AvgPool */ public AvgPool avgPool(Operand value, List ksize, @@ -132,18 +132,18 @@ public AvgPool avgPool(Operand value, List ksize /** * Performs 3D average pooling on the input. - *

      - * Each entry in `output` is the mean of the corresponding size `ksize` window in - * `value`. + * Each entry in {@code output} is the mean of the corresponding size {@code ksize} window in + * {@code value}. * - * @param data type for {@code output()} output - * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + * @param data type for {@code output} output + * @param input Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code AvgPool3D} output and operands * @return a new instance of AvgPool3d */ public AvgPool3d avgPool3d(Operand input, List ksize, @@ -154,15 +154,16 @@ public AvgPool3d avgPool3d(Operand input, List k /** * Computes gradients of average pooling function. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param origInputShape The original input dimensions. - * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code AvgPool3DGrad} output and operands * @return a new instance of AvgPool3dGrad */ public AvgPool3dGrad avgPool3dGrad(Operand origInputShape, @@ -173,10 +174,9 @@ public AvgPool3dGrad avgPool3dGrad(Operand origIn /** * Batch normalization. - *

      - * This op is deprecated. Prefer `tf.nn.batch_normalization`. + * This op is deprecated. Prefer {@code tf.nn.batch_normalization}. * - * @param data type for {@code result()} output + * @param data type for {@code result} output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -187,11 +187,12 @@ public AvgPool3dGrad avgPool3dGrad(Operand origIn * @param beta A 1D beta Tensor with size matching the last dimension of t. * An offset to be added to the normalized tensor. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this tensor will be multiplied + * If "scale_after_normalization" is true, this tensor will be multiplied * with the normalized tensor. * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. + * @param data type for {@code BatchNormWithGlobalNormalization} output and operands * @return a new instance of BatchNormWithGlobalNormalization */ public BatchNormWithGlobalNormalization batchNormWithGlobalNormalization( @@ -202,10 +203,9 @@ public BatchNormWithGlobalNormalization batchNormWithGlobal /** * Gradients for batch normalization. - *

      - * This op is deprecated. See `tf.nn.batch_normalization`. + * This op is deprecated. See {@code tf.nn.batch_normalization}. * - * @param data type for {@code dx()} output + * @param data type for {@code dx} output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -214,12 +214,13 @@ public BatchNormWithGlobalNormalization batchNormWithGlobal * This is the second output from tf.nn.moments, * or a saved moving average thereof. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this Tensor will be multiplied + * If "scale_after_normalization" is true, this Tensor will be multiplied * with the normalized Tensor. * @param backprop 4D backprop Tensor. * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. + * @param data type for {@code BatchNormWithGlobalNormalizationGrad} output and operands * @return a new instance of BatchNormWithGlobalNormalizationGrad */ public BatchNormWithGlobalNormalizationGrad batchNormWithGlobalNormalizationGrad( @@ -229,15 +230,15 @@ public BatchNormWithGlobalNormalizationGrad batchNormWithGl } /** - * Adds `bias` to `value`. - *

      - * This is a special case of `tf.add` where `bias` is restricted to be 1-D. - * Broadcasting is supported, so `value` may have any number of dimensions. + * Adds {@code bias} to {@code value}. + * This is a special case of {@code tf.add} where {@code bias} is restricted to be 1-D. + * Broadcasting is supported, so {@code value} may have any number of dimensions. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param value Any number of dimensions. - * @param bias 1-D with size the last dimension of `value`. - * @param options carries optional attributes values + * @param bias 1-D with size the last dimension of {@code value}. + * @param options carries optional attribute values + * @param data type for {@code BiasAdd} output and operands * @return a new instance of BiasAdd */ public BiasAdd biasAdd(Operand value, Operand bias, @@ -246,15 +247,15 @@ public BiasAdd biasAdd(Operand value, Operand bias, } /** - * The backward operation for "BiasAdd" on the "bias" tensor. - *

      + * The backward operation for "BiasAdd" on the "bias" tensor. * It accumulates all the values from out_backprop into the feature dimension. * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param outBackprop Any number of dimensions. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code BiasAddGrad} output and operands * @return a new instance of BiasAddGrad */ public BiasAddGrad biasAddGrad(Operand outBackprop, @@ -264,7 +265,6 @@ public BiasAddGrad biasAddGrad(Operand outBackprop, /** * Computes the ids of the positions in sampled_candidates that match true_labels. - *

      * When doing log-odds NCE, the result of this op should be passed through a * SparseToDense op, then added to the logits of the sampled candidates. This has * the effect of 'removing' the sampled labels that match the true labels by @@ -273,7 +273,7 @@ public BiasAddGrad biasAddGrad(Operand outBackprop, * @param trueClasses The true_classes output of UnpackSparseLabels. * @param sampledCandidates The sampled_candidates output of CandidateSampler. * @param numTrue Number of true labels per context. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ComputeAccidentalHits */ public ComputeAccidentalHits computeAccidentalHits(Operand trueClasses, @@ -282,40 +282,39 @@ public ComputeAccidentalHits computeAccidentalHits(Operand trueClasses, } /** - * Computes a 2-D convolution given 4-D `input` and `filter` tensors. - *

      - * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + * Computes a 2-D convolution given 4-D {@code input} and {@code filter} tensors. + * Given an input tensor of shape {@code [batch, in_height, in_width, in_channels]} * and a filter / kernel tensor of shape - * `[filter_height, filter_width, in_channels, out_channels]`, this op + * {@code [filter_height, filter_width, in_channels, out_channels]}, this op * performs the following: - *

      - * 1. Flattens the filter to a 2-D matrix with shape - * `[filter_height * filter_width * in_channels, output_channels]`. - * 2. Extracts image patches from the input tensor to form a virtual - * tensor of shape `[batch, out_height, out_width, - * filter_height * filter_width * in_channels]`. - * 3. For each patch, right-multiplies the filter matrix and the image patch - * vector. - *

      - * In detail, with the default NHWC format, - *

      - * output[b, i, j, k] = - * sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * - * filter[di, dj, q, k] - *

      - * Must have `strides[0] = strides[3] = 1`. For the most common case of the same - * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. - * - * @param data type for {@code output()} output + *

        + *
      1. Flattens the filter to a 2-D matrix with shape + * {@code [filter_height * filter_width * in_channels, output_channels]}.
      2. + *
      3. Extracts image patches from the input tensor to form a virtual + * tensor of shape {@code [batch, out_height, out_width, filter_height * filter_width * in_channels]}.
      4. + *
      5. For each patch, right-multiplies the filter matrix and the image patch + * vector.
      6. + *
      + *

      In detail, with the default NHWC format, + *

      +   *  output[b, i, j, k] =
      +   *      sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
      +   *                      filter[di, dj, q, k]
      +   *  
      + *

      Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same + * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. + * + * @param data type for {@code output} output * @param input A 4-D tensor. The dimension order is interpreted according to the value - * of `data_format`, see below for details. + * of {@code data_format}, see below for details. * @param filter A 4-D tensor of shape - * `[filter_height, filter_width, in_channels, out_channels]` + * {@code [filter_height, filter_width, in_channels, out_channels]} * @param strides 1-D tensor of length 4. The stride of the sliding window for each - * dimension of `input`. The dimension order is determined by the value of - * `data_format`, see below for details. + * dimension of {@code input}. The dimension order is determined by the value of + * {@code data_format}, see below for details. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code Conv2D} output and operands * @return a new instance of Conv2d */ public Conv2d conv2d(Operand input, Operand filter, @@ -326,18 +325,19 @@ public Conv2d conv2d(Operand input, Operand filter, /** * Computes the gradients of convolution with respect to the filter. * - * @param data type for {@code output()} output - * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. - * @param filterSizes An integer vector representing the tensor shape of `filter`, - * where `filter` is a 4-D - * `[filter_height, filter_width, in_channels, out_channels]` tensor. - * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. + * @param data type for {@code output} output + * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. + * @param filterSizes An integer vector representing the tensor shape of {@code filter}, + * where {@code filter} is a 4-D + * {@code [filter_height, filter_width, in_channels, out_channels]} tensor. + * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, out_channels]}. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code Conv2DBackpropFilter} output and operands * @return a new instance of Conv2dBackpropFilter */ public Conv2dBackpropFilter conv2dBackpropFilter(Operand input, @@ -349,18 +349,19 @@ public Conv2dBackpropFilter conv2dBackpropFilter(Operand< /** * Computes the gradients of convolution with respect to the input. * - * @param data type for {@code output()} output - * @param inputSizes An integer vector representing the shape of `input`, - * where `input` is a 4-D `[batch, height, width, channels]` tensor. + * @param data type for {@code output} output + * @param inputSizes An integer vector representing the shape of {@code input}, + * where {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. * @param filter 4-D with shape - * `[filter_height, filter_width, in_channels, out_channels]`. - * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. + * {@code [filter_height, filter_width, in_channels, out_channels]}. + * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, out_channels]}. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code Conv2DBackpropInput} output and operands * @return a new instance of Conv2dBackpropInput */ public Conv2dBackpropInput conv2dBackpropInput(Operand inputSizes, @@ -370,22 +371,20 @@ public Conv2dBackpropInput conv2dBackpropInput(Operand + * Computes a 3-D convolution given 5-D {@code input} and {@code filter} tensors. * In signal processing, cross-correlation is a measure of similarity of * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. - *

      - * Our Conv3D implements a form of cross-correlation. + *

      Our Conv3D implements a form of cross-correlation. * - * @param data type for {@code output()} output - * @param input Shape `[batch, in_depth, in_height, in_width, in_channels]`. - * @param filter Shape `[filter_depth, filter_height, filter_width, in_channels, - * out_channels]`. `in_channels` must match between `input` and `filter`. + * @param data type for {@code output} output + * @param input Shape {@code [batch, in_depth, in_height, in_width, in_channels]}. + * @param filter Shape {@code [filter_depth, filter_height, filter_width, in_channels, out_channels]}. {@code in_channels} must match between {@code input} and {@code filter}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code Conv3D} output and operands * @return a new instance of Conv3d */ public Conv3d conv3d(Operand input, Operand filter, @@ -396,18 +395,18 @@ public Conv3d conv3d(Operand input, Operand filter, /** * Computes the gradients of 3-D convolution with respect to the filter. * - * @param data type for {@code output()} output - * @param input Shape `[batch, depth, rows, cols, in_channels]`. - * @param filterSizes An integer vector representing the tensor shape of `filter`, - * where `filter` is a 5-D - * `[filter_depth, filter_height, filter_width, in_channels, out_channels]` + * @param data type for {@code output} output + * @param input Shape {@code [batch, depth, rows, cols, in_channels]}. + * @param filterSizes An integer vector representing the tensor shape of {@code filter}, + * where {@code filter} is a 5-D + * {@code [filter_depth, filter_height, filter_width, in_channels, out_channels]} * tensor. - * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - * out_channels]`. + * @param outBackprop Backprop signal of shape {@code [batch, out_depth, out_rows, out_cols, out_channels]}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code Conv3DBackpropFilterV2} output and operands * @return a new instance of Conv3dBackpropFilter */ public Conv3dBackpropFilter conv3dBackpropFilter(Operand input, @@ -419,18 +418,18 @@ public Conv3dBackpropFilter conv3dBackpropFilter(Operand< /** * Computes the gradients of 3-D convolution with respect to the input. * - * @param data type for {@code output()} output - * @param inputSizes An integer vector representing the tensor shape of `input`, - * where `input` is a 5-D - * `[batch, depth, rows, cols, in_channels]` tensor. - * @param filter Shape `[depth, rows, cols, in_channels, out_channels]`. - * `in_channels` must match between `input` and `filter`. - * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - * out_channels]`. + * @param data type for {@code output} output + * @param inputSizes An integer vector representing the tensor shape of {@code input}, + * where {@code input} is a 5-D + * {@code [batch, depth, rows, cols, in_channels]} tensor. + * @param filter Shape {@code [depth, rows, cols, in_channels, out_channels]}. + * {@code in_channels} must match between {@code input} and {@code filter}. + * @param outBackprop Backprop signal of shape {@code [batch, out_depth, out_rows, out_cols, out_channels]}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code Conv3DBackpropInputV2} output and operands * @return a new instance of Conv3dBackpropInput */ public Conv3dBackpropInput conv3dBackpropInput( @@ -441,19 +440,19 @@ public Conv3dBackpropInput conv3dBackpropInput( /** * Performs beam search decoding on the logits given in input. - *

      * A note about the attribute merge_repeated: For the beam search decoder, * this means that if consecutive entries in a beam are the same, only - * the first of these is emitted. That is, when the top path is "A B B B B", - * "A B" is returned if merge_repeated = True but "A B B B B" is + * the first of these is emitted. That is, when the top path is "A B B B B", + * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. * - * @param data type for {@code logProbability()} output - * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - * @param sequenceLength A vector containing sequence lengths, size `(batch)`. - * @param beamWidth A scalar >= 0 (beam search beam width). - * @param topPaths A scalar >= 0, <= beam_width (controls output size). - * @param options carries optional attributes values + * @param data type for {@code log_probability} output + * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. + * @param sequenceLength A vector containing sequence lengths, size {@code (batch)}. + * @param beamWidth A scalar >= 0 (beam search beam width). + * @param topPaths A scalar >= 0, <= beam_width (controls output size). + * @param options carries optional attribute values + * @param data type for {@code CTCBeamSearchDecoder} output and operands * @return a new instance of CtcBeamSearchDecoder */ public CtcBeamSearchDecoder ctcBeamSearchDecoder(Operand inputs, @@ -464,21 +463,20 @@ public CtcBeamSearchDecoder ctcBeamSearchDecoder(Operand< /** * Performs greedy decoding on the logits given in inputs. - *

      * A note about the attribute merge_repeated: if enabled, when * consecutive logits' maximum indices are the same, only the first of - * these is emitted. Labeling the blank '*', the sequence "A B B * B B" - * becomes "A B B" if merge_repeated = True and "A B B B B" if + * these is emitted. Labeling the blank '*', the sequence "A B B * B B" + * becomes "A B B" if merge_repeated = True and "A B B B B" if * merge_repeated = False. - *

      - * Regardless of the value of merge_repeated, if the maximum index of a given - * time and batch corresponds to the blank, index `(num_classes - 1)`, no new + *

      Regardless of the value of merge_repeated, if the maximum index of a given + * time and batch corresponds to the blank, index {@code (num_classes - 1)}, no new * element is emitted. * - * @param data type for {@code logProbability()} output - * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - * @param sequenceLength A vector containing sequence lengths, size `(batch_size)`. - * @param options carries optional attributes values + * @param data type for {@code log_probability} output + * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. + * @param sequenceLength A vector containing sequence lengths, size {@code (batch_size)}. + * @param options carries optional attribute values + * @param data type for {@code CTCGreedyDecoder} output and operands * @return a new instance of CtcGreedyDecoder */ public CtcGreedyDecoder ctcGreedyDecoder(Operand inputs, @@ -488,18 +486,18 @@ public CtcGreedyDecoder ctcGreedyDecoder(Operand input /** * Calculates the CTC Loss (log probability) for each batch entry. Also calculates - *

      * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. * - * @param data type for {@code loss()} output - * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - * @param labelsIndices The indices of a `SparseTensor`. - * `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for - * `(batch b, time t)`. + * @param data type for {@code loss} output + * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. + * @param labelsIndices The indices of a {@code SparseTensor}. + * {@code labels_indices(i, :) == [b, t]} means {@code labels_values(i)} stores the id for + * {@code (batch b, time t)}. * @param labelsValues The values (labels) associated with the given batch and time. * @param sequenceLength A vector containing sequence lengths (batch). - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code CTCLoss} output and operands * @return a new instance of CtcLoss */ public CtcLoss ctcLoss(Operand inputs, Operand labelsIndices, @@ -509,45 +507,43 @@ public CtcLoss ctcLoss(Operand inputs, Operand /** * Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM. - *

      * Writes a set of weights into the opaque params buffer so they can be used in * upcoming training or inferences. - *

      - * Note that the params buffer may not be compatible across different GPUs. So any + *

      Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - *

      - * num_layers: Specifies the number of layers in the RNN model. + *

      num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * weights: the canonical form of weights that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * biases: the canonical form of biases that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * num_params_weights: number of weight parameter matrix for all layers. * num_params_biases: number of bias parameter vector for all layers. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, - * no projection is performed. - * - * @param data type for {@code params()} output - * @param numLayers - * @param numUnits - * @param inputSize - * @param weights - * @param biases - * @param options carries optional attributes values + * no projection is performed. + * + * @param data type for {@code params} output + * @param numLayers the numLayers value + * @param numUnits the numUnits value + * @param inputSize the inputSize value + * @param weights the weights value + * @param biases the biases value + * @param options carries optional attribute values + * @param data type for {@code CudnnRNNCanonicalToParamsV2} output and operands * @return a new instance of CudnnRNNCanonicalToParams */ public CudnnRNNCanonicalToParams cudnnRNNCanonicalToParams( @@ -559,46 +555,44 @@ public CudnnRNNCanonicalToParams cudnnRNNCanonicalToParam /** * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. - *

      * Retrieves a set of weights from the opaque params buffer that can be saved and * restored in a way compatible with future runs. - *

      - * Note that the params buffer may not be compatible across different GPUs. So any + *

      Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - *

      - * num_layers: Specifies the number of layers in the RNN model. + *

      num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * num_params_weights: number of weight parameter matrix for all layers. * num_params_biases: number of bias parameter vector for all layers. * weights: the canonical form of weights that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * biases: the canonical form of biases that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, - * no projection is performed. - * - * @param data type for {@code weights()} output - * @param numLayers - * @param numUnits - * @param inputSize - * @param params - * @param numParamsWeights - * @param numParamsBiases - * @param options carries optional attributes values + * no projection is performed. + * + * @param data type for {@code weights} output + * @param numLayers the numLayers value + * @param numUnits the numUnits value + * @param inputSize the inputSize value + * @param params the params value + * @param numParamsWeights the value of the numParamsWeights property + * @param numParamsBiases the value of the numParamsBiases property + * @param options carries optional attribute values + * @param data type for {@code CudnnRNNParamsToCanonicalV2} output and operands * @return a new instance of CudnnRNNParamsToCanonical */ public CudnnRNNParamsToCanonical cudnnRNNParamsToCanonical( @@ -610,53 +604,53 @@ public CudnnRNNParamsToCanonical cudnnRNNParamsToCanonica /** * Computes size of weights that can be used by a Cudnn RNN model. - *

      * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - *

      - * num_layers: Specifies the number of layers in the RNN model. + *

      num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * params_size: The size of the params buffer that should be allocated and - * initialized for this RNN model. Note that this params buffer may not be - * compatible across GPUs. Please use CudnnRNNParamsWeights and - * CudnnRNNParamsBiases to save and restore them in a way that is compatible - * across different runs. - * - * @param data type for {@code paramsSize()} output - * @param numLayers - * @param numUnits - * @param inputSize - * @param T - * @param S - * @param options carries optional attributes values + * initialized for this RNN model. Note that this params buffer may not be + * compatible across GPUs. Please use CudnnRNNParamsWeights and + * CudnnRNNParamsBiases to save and restore them in a way that is compatible + * across different runs. + * + * @param data type for {@code params_size} output + * @param numLayers the numLayers value + * @param numUnits the numUnits value + * @param inputSize the inputSize value + * @param T the value of the T property + * @param S the value of the S property + * @param options carries optional attribute values + * @param data type for {@code CudnnRNNParamsSize} output and operands + * @param data type for {@code CudnnRNNParamsSize} output and operands * @return a new instance of CudnnRnnParamsSize */ - public CudnnRnnParamsSize cudnnRnnParamsSize( - Operand numLayers, Operand numUnits, Operand inputSize, Class T, - Class S, CudnnRnnParamsSize.Options... options) { + public CudnnRnnParamsSize cudnnRnnParamsSize( + Operand numLayers, Operand numUnits, Operand inputSize, Class T, + Class S, CudnnRnnParamsSize.Options... options) { return CudnnRnnParamsSize.create(scope, numLayers, numUnits, inputSize, T, S, options); } /** * Returns the dimension index in the destination data format given the one in - *

      * the source data format. * - * @param data type for {@code y()} output + * @param data type for {@code y} output * @param x A Tensor with each element as a dimension index in source data format. * Must be in the range [-4, 4). - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code DataFormatDimMap} output and operands * @return a new instance of DataFormatDimMap */ public DataFormatDimMap dataFormatDimMap(Operand x, @@ -665,32 +659,31 @@ public DataFormatDimMap dataFormatDimMap(Operand x, } /** - * Permute input tensor from `src_format` to `dst_format`. - *

      + * Permute input tensor from {@code src_format} to {@code dst_format}. * Input tensor must be a vector of size 4, or a 4x2 tensor. - *

      - * For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs: - *

      {@code
      +   *  

      For example, with {@code src_format} of {@code NHWC}, {@code dst_format} of {@code NCHW}, and inputs: + *

          *  [1, 2, 3, 4]
      -   *  }
      - * and - *
      {@code
      +   *  
      + *

      and + *

          *  [[1, 2, 3, 4],
          *   [5, 6, 7, 8]]
      -   *  }
      - * , the outputs will be (respectively): - *
      {@code
      +   *  
      + *

      , the outputs will be (respectively): + *

          *  [1, 4, 2, 3]
      -   *  }
      - * and - *
      {@code
      +   *  
      + *

      and + *

          *  [[1, 4, 2, 3],
          *   [5, 8, 6, 7]]
      -   *  }
      + *
      * - * @param data type for {@code y()} output + * @param data type for {@code y} output * @param x Vector of size 4 or Tensor of shape (4, 2) in source data format. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code DataFormatVecPermute} output and operands * @return a new instance of DataFormatVecPermute */ public DataFormatVecPermute dataFormatVecPermute(Operand x, @@ -700,90 +693,86 @@ public DataFormatVecPermute dataFormatVecPermute(Operand< /** * DepthToSpace for tensors of type T. - *

      * Rearranges data from depth into blocks of spatial data. * This is the reverse transformation of SpaceToDepth. More specifically, - * this op outputs a copy of the input tensor where values from the `depth` - * dimension are moved in spatial blocks to the `height` and `width` dimensions. - * The attr `block_size` indicates the input block size and how the data is moved. - *

      - * Chunks of data of size `block_size * block_size` from depth are rearranged - * into non-overlapping blocks of size `block_size x block_size` - * The width the output tensor is `input_depth * block_size`, whereas the - * height is `input_height * block_size`. - * The Y, X coordinates within each block of the output image are determined - * by the high order component of the input channel index. - * The depth of the input tensor must be divisible by - * `block_size * block_size`. - *

      - * The `data_format` attr specifies the layout of the input and output tensors + * this op outputs a copy of the input tensor where values from the {@code depth} + * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions. + * The attr {@code block_size} indicates the input block size and how the data is moved. + *

        + *
      • Chunks of data of size {@code block_size * block_size} from depth are rearranged + * into non-overlapping blocks of size {@code block_size x block_size}
      • + *
      • The width the output tensor is {@code input_depth * block_size}, whereas the + * height is {@code input_height * block_size}.
      • + *
      • The Y, X coordinates within each block of the output image are determined + * by the high order component of the input channel index.
      • + *
      • The depth of the input tensor must be divisible by + * {@code block_size * block_size}.
      • + *
      + *

      The {@code data_format} attr specifies the layout of the input and output tensors * with the following options: - * "NHWC": `[ batch, height, width, channels ]` - * "NCHW": `[ batch, channels, height, width ]` - * "NCHW_VECT_C": - * `qint8 [ batch, channels / 4, height, width, 4 ]` - *

      - * It is useful to consider the operation as transforming a 6-D Tensor. + * "NHWC": {@code [ batch, height, width, channels ]} + * "NCHW": {@code [ batch, channels, height, width ]} + * "NCHW_VECT_C": + * {@code qint8 [ batch, channels / 4, height, width, 4 ]} + *

      It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, - * Each element in the input tensor can be specified via 6 coordinates, - * ordered by decreasing memory layout significance as: - * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates - * within the input image, bX, bY means coordinates - * within the output block, oC means output channels). - * The output would be the input transposed to the following layout: - * n,iY,bY,iX,bX,oC - *

      - * This operation is useful for resizing the activations between convolutions + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + * within the input image, bX, bY means coordinates + * within the output block, oC means output channels). + * The output would be the input transposed to the following layout: + * n,iY,bY,iX,bX,oC + *

      This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - *

      - * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and + *

      For example, given an input of shape {@code [1, 1, 1, 4]}, data_format = "NHWC" and * block_size = 2: - *

      {@code
      +   *  
          *  x = [[[[1, 2, 3, 4]]]]
          *
      -   *  }
      - * This operation will output a tensor of shape `[1, 2, 2, 1]`: - *
      {@code
      +   *  
      + *

      This operation will output a tensor of shape {@code [1, 2, 2, 1]}: + *

          *     [[[[1], [2]],
          *       [[3], [4]]]]
      -   *  }
      - * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + *
      + *

      Here, the input has a batch of 1 and each batch element has shape {@code [1, 1, 4]}, * the corresponding output will have 2x2 elements and will have a depth of - * 1 channel (1 = `4 / (block_size * block_size)`). - * The output element shape is `[2, 2, 1]`. - *

      - * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. - *

      {@code
      +   *  1 channel (1 = {@code 4 / (block_size * block_size)}).
      +   *  The output element shape is {@code [2, 2, 1]}.
      +   *  

      For an input tensor with larger depth, here of shape {@code [1, 1, 1, 12]}, e.g. + *

          *  x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
      -   *  }
      - * This operation, for block size of 2, will return the following tensor of shape - * `[1, 2, 2, 3]` - *
      {@code
      +   *  
      + *

      This operation, for block size of 2, will return the following tensor of shape + * {@code [1, 2, 2, 3]} + *

          *     [[[[1, 2, 3], [4, 5, 6]],
          *       [[7, 8, 9], [10, 11, 12]]]]
          *
      -   *  }
      - * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: - *
      {@code
      +   *  
      + *

      Similarly, for the following input of shape {@code [1 2 2 4]}, and a block size of 2: + *

          *  x =  [[[[1, 2, 3, 4],
          *         [5, 6, 7, 8]],
          *        [[9, 10, 11, 12],
          *         [13, 14, 15, 16]]]]
      -   *  }
      - * the operator will return the following tensor of shape `[1 4 4 1]`: - *
      {@code
      +   *  
      + *

      the operator will return the following tensor of shape {@code [1 4 4 1]}: + *

          *  x = [[[ [1],   [2],  [5],  [6]],
          *        [ [3],   [4],  [7],  [8]],
          *        [ [9],  [10], [13],  [14]],
          *        [ [11], [12], [15],  [16]]]]
          *
      -   *  }
      + *
      * - * @param data type for {@code output()} output - * @param input + * @param data type for {@code output} output + * @param input the input value * @param blockSize The size of the spatial block, same as in Space2Depth. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code DepthToSpace} output and operands * @return a new instance of DepthToSpace */ public DepthToSpace depthToSpace(Operand input, Long blockSize, @@ -792,32 +781,32 @@ public DepthToSpace depthToSpace(Operand input, Long blo } /** - * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. - *

      - * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + * Computes a 2-D depthwise convolution given 4-D {@code input} and {@code filter} tensors. + * Given an input tensor of shape {@code [batch, in_height, in_width, in_channels]} * and a filter / kernel tensor of shape - * `[filter_height, filter_width, in_channels, channel_multiplier]`, containing - * `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies + * {@code [filter_height, filter_width, in_channels, channel_multiplier]}, containing + * {@code in_channels} convolutional filters of depth 1, {@code depthwise_conv2d} applies * a different filter to each input channel (expanding from 1 channel to - * `channel_multiplier` channels for each), then concatenates the results - * together. Thus, the output has `in_channels * channel_multiplier` channels. - *

      {@code
      +   *  {@code channel_multiplier} channels for each), then concatenates the results
      +   *  together. Thus, the output has {@code in_channels * channel_multiplier} channels.
      +   *  
          *  for k in 0..in_channels-1
          *    for q in 0..channel_multiplier-1
          *      output[b, i, j, k * channel_multiplier + q] =
          *        sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
          *                          filter[di, dj, k, q]
      -   *  }
      - * Must have `strides[0] = strides[3] = 1`. For the most common case of the same - * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + *
      + *

      Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same + * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. * - * @param data type for {@code output()} output - * @param input - * @param filter + * @param data type for {@code output} output + * @param input the input value + * @param filter the filter value * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of `input`. + * of {@code input}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code DepthwiseConv2dNative} output and operands * @return a new instance of DepthwiseConv2dNative */ public DepthwiseConv2dNative depthwiseConv2dNative(Operand input, @@ -829,21 +818,21 @@ public DepthwiseConv2dNative depthwiseConv2dNative(Operan /** * Computes the gradients of depthwise convolution with respect to the filter. * - * @param data type for {@code output()} output - * @param input 4-D with shape based on `data_format`. For example, if - * `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, - * in_width, in_channels]` tensor. - * @param filterSizes An integer vector representing the tensor shape of `filter`, - * where `filter` is a 4-D - * `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. - * @param outBackprop 4-D with shape based on `data_format`. - * For example, if `data_format` is 'NHWC' then - * out_backprop shape is `[batch, out_height, out_width, out_channels]`. + * @param data type for {@code output} output + * @param input 4-D with shape based on {@code data_format}. For example, if + * {@code data_format} is 'NHWC' then {@code input} is a 4-D {@code [batch, in_height, in_width, in_channels]} tensor. + * @param filterSizes An integer vector representing the tensor shape of {@code filter}, + * where {@code filter} is a 4-D + * {@code [filter_height, filter_width, in_channels, depthwise_multiplier]} tensor. + * @param outBackprop 4-D with shape based on {@code data_format}. + * For example, if {@code data_format} is 'NHWC' then + * out_backprop shape is {@code [batch, out_height, out_width, out_channels]}. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code DepthwiseConv2dNativeBackpropFilter} output and operands * @return a new instance of DepthwiseConv2dNativeBackpropFilter */ public DepthwiseConv2dNativeBackpropFilter depthwiseConv2dNativeBackpropFilter( @@ -855,20 +844,21 @@ public DepthwiseConv2dNativeBackpropFilter depthwiseConv2 /** * Computes the gradients of depthwise convolution with respect to the input. * - * @param data type for {@code output()} output - * @param inputSizes An integer vector representing the shape of `input`, based - * on `data_format`. For example, if `data_format` is 'NHWC' then - * `input` is a 4-D `[batch, height, width, channels]` tensor. + * @param data type for {@code output} output + * @param inputSizes An integer vector representing the shape of {@code input}, based + * on {@code data_format}. For example, if {@code data_format} is 'NHWC' then + * {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. * @param filter 4-D with shape - * `[filter_height, filter_width, in_channels, depthwise_multiplier]`. - * @param outBackprop 4-D with shape based on `data_format`. - * For example, if `data_format` is 'NHWC' then - * out_backprop shape is `[batch, out_height, out_width, out_channels]`. + * {@code [filter_height, filter_width, in_channels, depthwise_multiplier]}. + * @param outBackprop 4-D with shape based on {@code data_format}. + * For example, if {@code data_format} is 'NHWC' then + * out_backprop shape is {@code [batch, out_height, out_width, out_channels]}. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code DepthwiseConv2dNativeBackpropInput} output and operands * @return a new instance of DepthwiseConv2dNativeBackpropInput */ public DepthwiseConv2dNativeBackpropInput depthwiseConv2dNativeBackpropInput( @@ -878,40 +868,38 @@ public DepthwiseConv2dNativeBackpropInput depthwiseConv2d } /** - * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. - *

      - * The `input` tensor has shape `[batch, in_height, in_width, depth]` and the - * `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each + * Computes the grayscale dilation of 4-D {@code input} and 3-D {@code filter} tensors. + * The {@code input} tensor has shape {@code [batch, in_height, in_width, depth]} and the + * {@code filter} tensor has shape {@code [filter_height, filter_width, depth]}, i.e., each * input channel is processed independently of the others with its own structuring - * function. The `output` tensor has shape - * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output - * tensor depend on the `padding` algorithm. We currently only support the default - * "NHWC" `data_format`. - *

      - * In detail, the grayscale morphological 2-D dilation is the max-sum correlation - * (for consistency with `conv2d`, we use unmirrored filters): - *

      - * output[b, y, x, c] = - * max_{dy, dx} input[b, - * strides[1] * y + rates[1] * dy, - * strides[2] * x + rates[2] * dx, - * c] + - * filter[dy, dx, c] - *

      - * Max-pooling is a special case when the filter has size equal to the pooling + * function. The {@code output} tensor has shape + * {@code [batch, out_height, out_width, depth]}. The spatial dimensions of the output + * tensor depend on the {@code padding} algorithm. We currently only support the default + * "NHWC" {@code data_format}. + *

      In detail, the grayscale morphological 2-D dilation is the max-sum correlation + * (for consistency with {@code conv2d}, we use unmirrored filters): + *

      +   *  output[b, y, x, c] =
      +   *     max_{dy, dx} input[b,
      +   *                        strides[1] * y + rates[1] * dy,
      +   *                        strides[2] * x + rates[2] * dx,
      +   *                        c] +
      +   *                  filter[dy, dx, c]
      +   *  
      + *

      Max-pooling is a special case when the filter has size equal to the pooling * kernel size and contains all zeros. - *

      - * Note on duality: The dilation of `input` by the `filter` is equal to the - * negation of the erosion of `-input` by the reflected `filter`. + *

      Note on duality: The dilation of {@code input} by the {@code filter} is equal to the + * negation of the erosion of {@code -input} by the reflected {@code filter}. * - * @param data type for {@code output()} output - * @param input 4-D with shape `[batch, in_height, in_width, depth]`. - * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param data type for {@code output} output + * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. + * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. * @param strides The stride of the sliding window for each dimension of the input - * tensor. Must be: `[1, stride_height, stride_width, 1]`. + * tensor. Must be: {@code [1, stride_height, stride_width, 1]}. * @param rates The input stride for atrous morphological dilation. Must be: - * `[1, rate_height, rate_width, 1]`. + * {@code [1, rate_height, rate_width, 1]}. * @param padding The type of padding algorithm to use. + * @param data type for {@code Dilation2D} output and operands * @return a new instance of Dilation2d */ public Dilation2d dilation2d(Operand input, Operand filter, @@ -922,15 +910,16 @@ public Dilation2d dilation2d(Operand input, Operand /** * Computes the gradient of morphological 2-D dilation with respect to the filter. * - * @param data type for {@code filterBackprop()} output - * @param input 4-D with shape `[batch, in_height, in_width, depth]`. - * @param filter 3-D with shape `[filter_height, filter_width, depth]`. - * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. + * @param data type for {@code filter_backprop} output + * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. + * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. + * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, depth]}. * @param strides 1-D of length 4. The stride of the sliding window for each dimension of - * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + * the input tensor. Must be: {@code [1, stride_height, stride_width, 1]}. * @param rates 1-D of length 4. The input stride for atrous morphological dilation. - * Must be: `[1, rate_height, rate_width, 1]`. + * Must be: {@code [1, rate_height, rate_width, 1]}. * @param padding The type of padding algorithm to use. + * @param data type for {@code Dilation2DBackpropFilter} output and operands * @return a new instance of Dilation2dBackpropFilter */ public Dilation2dBackpropFilter dilation2dBackpropFilter(Operand input, @@ -942,15 +931,16 @@ public Dilation2dBackpropFilter dilation2dBackpropFilter( /** * Computes the gradient of morphological 2-D dilation with respect to the input. * - * @param data type for {@code inBackprop()} output - * @param input 4-D with shape `[batch, in_height, in_width, depth]`. - * @param filter 3-D with shape `[filter_height, filter_width, depth]`. - * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. + * @param data type for {@code in_backprop} output + * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. + * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. + * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, depth]}. * @param strides 1-D of length 4. The stride of the sliding window for each dimension of - * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + * the input tensor. Must be: {@code [1, stride_height, stride_width, 1]}. * @param rates 1-D of length 4. The input stride for atrous morphological dilation. - * Must be: `[1, rate_height, rate_width, 1]`. + * Must be: {@code [1, rate_height, rate_width, 1]}. * @param padding The type of padding algorithm to use. + * @param data type for {@code Dilation2DBackpropInput} output and operands * @return a new instance of Dilation2dBackpropInput */ public Dilation2dBackpropInput dilation2dBackpropInput(Operand input, @@ -960,13 +950,31 @@ public Dilation2dBackpropInput dilation2dBackpropInput(Op } /** - * Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. - *

      - * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - * ](http://arxiv.org/abs/1511.07289) - * - * @param data type for {@code activations()} output - * @param features + * Computes the exponential linear function. + * The ELU function is defined as: + *

        + *
      • $ e ^ x - 1 $ if $ x < 0 $
      • + *
      • $ x $ if $ x >= 0 $
      • + *
      + *

      Examples: + *

      + *
      + *
      + *

      tf.nn.elu(1.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=1.0> + * tf.nn.elu(0.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=0.0> + * tf.nn.elu(-1000.0) + * <tf.Tensor: shape=(), dtype=float32, numpy=-1.0> + *

      + *
      + *
      + *

      See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * + * + * @param data type for {@code activations} output + * @param features the features value + * @param data type for {@code Elu} output and operands * @return a new instance of Elu */ public Elu elu(Operand features) { @@ -975,18 +983,14 @@ public Elu elu(Operand features) { /** * Generates labels for candidate sampling with a learned unigram distribution. - *

      * A unigram sampler could use a fixed unigram distribution read from a * file or passed in as an in-memory array instead of building up the distribution * from data on the fly. There is also an option to skew the distribution by * applying a distortion power to the weights. - *

      - * The vocabulary file should be in CSV-like format, with the last field + *

      The vocabulary file should be in CSV-like format, with the last field * being the weight associated with the word. - *

      - * For each batch, this op picks a single set of sampled candidate labels. - *

      - * The advantages of sampling candidates per-batch are simplicity and the + *

      For each batch, this op picks a single set of sampled candidate labels. + *

      The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -999,7 +1003,7 @@ public Elu elu(Operand features) { * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * @param rangeMax The sampler will sample integers from the interval [0, range_max). - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of FixedUnigramCandidateSampler */ public FixedUnigramCandidateSampler fixedUnigramCandidateSampler(Operand trueClasses, @@ -1010,21 +1014,21 @@ public FixedUnigramCandidateSampler fixedUnigramCandidateSampler(Operand /** * Performs fractional average pooling on the input. - *

      * Fractional average pooling is similar to Fractional max pooling in the pooling * region generation step. The only difference is that after pooling regions are * generated, a mean operation is performed instead of a max operation in each * pooling region. * - * @param data type for {@code output()} output - * @param value 4-D with shape `[batch, height, width, channels]`. - * @param poolingRatio Pooling ratio for each dimension of `value`, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid + * @param data type for {@code output} output + * @param value 4-D with shape {@code [batch, height, width, channels]}. + * @param poolingRatio Pooling ratio for each dimension of {@code value}, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code FractionalAvgPool} output and operands * @return a new instance of FractionalAvgPool */ public FractionalAvgPool fractionalAvgPool(Operand value, @@ -1034,45 +1038,43 @@ public FractionalAvgPool fractionalAvgPool(Operand val /** * Performs fractional max pooling on the input. - *

      * Fractional max pooling is slightly different than regular max pooling. In * regular max pooling, you downsize an input set by taking the maximum value of * smaller N x N subsections of the set (often 2x2), and try to reduce the set by * a factor of N, where N is an integer. Fractional max pooling, as you might - * expect from the word "fractional", means that the overall reduction ratio N + * expect from the word "fractional", means that the overall reduction ratio N * does not have to be an integer. - *

      - * The sizes of the pooling regions are generated randomly but are fairly uniform. + *

      The sizes of the pooling regions are generated randomly but are fairly uniform. * For example, let's look at the height dimension, and the constraints on the * list of rows that will be pool boundaries. - *

      - * First we define the following: - *

      - * 1. input_row_length : the number of rows from the input set - * 2. output_row_length : which will be smaller than the input - * 3. alpha = input_row_length / output_row_length : our reduction ratio - * 4. K = floor(alpha) - * 5. row_pooling_sequence : this is the result list of pool boundary rows - *

      - * Then, row_pooling_sequence should satisfy: - *

      - * 1. a[0] = 0 : the first value of the sequence is 0 - * 2. a[end] = input_row_length : the last value of the sequence is the size - * 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size - * 4. length(row_pooling_sequence) = output_row_length+1 - *

      - * For more details on fractional max pooling, see this paper: - * [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) - * - * @param data type for {@code output()} output - * @param value 4-D with shape `[batch, height, width, channels]`. - * @param poolingRatio Pooling ratio for each dimension of `value`, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid + *

      First we define the following: + *

        + *
      1. input_row_length : the number of rows from the input set
      2. + *
      3. output_row_length : which will be smaller than the input
      4. + *
      5. alpha = input_row_length / output_row_length : our reduction ratio
      6. + *
      7. K = floor(alpha)
      8. + *
      9. row_pooling_sequence : this is the result list of pool boundary rows
      10. + *
      + *

      Then, row_pooling_sequence should satisfy: + *

        + *
      1. a[0] = 0 : the first value of the sequence is 0
      2. + *
      3. a[end] = input_row_length : the last value of the sequence is the size
      4. + *
      5. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
      6. + *
      7. length(row_pooling_sequence) = output_row_length+1
      8. + *
      + *

      For more details on fractional max pooling, see this paper: + * Benjamin Graham, Fractional Max-Pooling + * + * @param data type for {@code output} output + * @param value 4-D with shape {@code [batch, height, width, channels]}. + * @param poolingRatio Pooling ratio for each dimension of {@code value}, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code FractionalMaxPool} output and operands * @return a new instance of FractionalMaxPool */ public FractionalMaxPool fractionalMaxPool(Operand value, @@ -1082,12 +1084,11 @@ public FractionalMaxPool fractionalMaxPool(Operand val /** * Batch normalization. - *

      - * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param data type for {@code y()} output - * @param data type for {@code batchMean()} output + * @param data type for {@code y} output + * @param data type for {@code batch_mean} output * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. * @param offset A 1D Tensor for offset, to shift to the normalized x. @@ -1095,7 +1096,9 @@ public FractionalMaxPool fractionalMaxPool(Operand val * must be empty for training. * @param variance A 1D Tensor for population variance. Used for inference only; * must be empty for training. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code FusedBatchNormV3} output and operands + * @param data type for {@code FusedBatchNormV3} output and operands * @return a new instance of FusedBatchNorm */ public FusedBatchNorm fusedBatchNorm(Operand x, @@ -1106,12 +1109,11 @@ public FusedBatchNorm fusedBatchNor /** * Gradient for batch normalization. - *

      - * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param data type for {@code xBackprop()} output - * @param data type for {@code scaleBackprop()} output + * @param data type for {@code x_backprop} output + * @param data type for {@code scale_backprop} output * @param yBackprop A 4D Tensor for the gradient with respect to y. * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. @@ -1127,7 +1129,9 @@ public FusedBatchNorm fusedBatchNor * @param reserveSpace3 When is_training is True, a 1D Tensor for some intermediate results to be reused * in gradient computation. When is_training is False, a dummy empty Tensor will be * created. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code FusedBatchNormGradV3} output and operands + * @param data type for {@code FusedBatchNormGradV3} output and operands * @return a new instance of FusedBatchNormGrad */ public FusedBatchNormGrad fusedBatchNormGrad( @@ -1138,7 +1142,6 @@ public FusedBatchNormGrad fusedBatc /** * Performs a padding as a preprocess during a convolution. - *

      * Similar to FusedResizeAndPadConv2d, this op allows for an optimized * implementation where the spatial padding transformation stage is fused with the * im2col lookup, but in this case without the bilinear filtering required for @@ -1151,16 +1154,17 @@ public FusedBatchNormGrad fusedBatc * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param data type for {@code output()} output - * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param data type for {@code output} output + * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of `input`. + * rows must be the same as the rank of {@code input}. * @param filter 4-D with shape - * `[filter_height, filter_width, in_channels, out_channels]`. - * @param mode + * {@code [filter_height, filter_width, in_channels, out_channels]}. + * @param mode the value of the mode property * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of `input`. Must be in the same order as the dimension specified with format. + * of {@code input}. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. + * @param data type for {@code FusedPadConv2D} output and operands * @return a new instance of FusedPadConv2d */ public FusedPadConv2d fusedPadConv2d(Operand input, @@ -1171,7 +1175,6 @@ public FusedPadConv2d fusedPadConv2d(Operand input, /** * Performs a resize and padding as a preprocess during a convolution. - *

      * It's often possible to do spatial transformations more efficiently as part of * the packing stage of a convolution, so this op allows for an optimized * implementation where these stages are fused together. This prevents the need to @@ -1183,48 +1186,46 @@ public FusedPadConv2d fusedPadConv2d(Operand input, * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param data type for {@code output()} output - * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. - * @param size A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * @param data type for {@code output} output + * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. + * @param sizeOutput A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of `input`. + * rows must be the same as the rank of {@code input}. * @param filter 4-D with shape - * `[filter_height, filter_width, in_channels, out_channels]`. - * @param mode + * {@code [filter_height, filter_width, in_channels, out_channels]}. + * @param mode the value of the mode property * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of `input`. Must be in the same order as the dimension specified with format. + * of {@code input}. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code FusedResizeAndPadConv2D} output and operands * @return a new instance of FusedResizeAndPadConv2d */ public FusedResizeAndPadConv2d fusedResizeAndPadConv2d(Operand input, - Operand size, Operand paddings, Operand filter, String mode, + Operand sizeOutput, Operand paddings, Operand filter, String mode, List strides, String padding, FusedResizeAndPadConv2d.Options... options) { - return FusedResizeAndPadConv2d.create(scope, input, size, paddings, filter, mode, strides, padding, options); - } - - /** - * Says whether the targets are in the top `K` predictions. - *

      - * This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the - * prediction for the target class is among the top `k` predictions among - * all predictions for example `i`. Note that the behavior of `InTopK` differs - * from the `TopK` op in its handling of ties; if multiple classes have the - * same prediction value and straddle the top-`k` boundary, all of those - * classes are considered to be in the top `k`. - *

      - * More formally, let - *

      - * \\(predictions_i\\) be the predictions for all classes for example `i`, - * \\(targets_i\\) be the target class for example `i`, - * \\(out_i\\) be the output for example `i`, - *

      - * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ - * - * @param predictions A `batch_size` x `classes` tensor. - * @param targets A `batch_size` vector of class ids. + return FusedResizeAndPadConv2d.create(scope, input, sizeOutput, paddings, filter, mode, strides, padding, options); + } + + /** + * Says whether the targets are in the top {@code K} predictions. + * This outputs a {@code batch_size} bool array, an entry {@code out[i]} is {@code true} if the + * prediction for the target class is among the top {@code k} predictions among + * all predictions for example {@code i}. Note that the behavior of {@code InTopK} differs + * from the {@code TopK} op in its handling of ties; if multiple classes have the + * same prediction value and straddle the top-{@code k} boundary, all of those + * classes are considered to be in the top {@code k}. + *

      More formally, let + *

      \(predictions_i\) be the predictions for all classes for example {@code i}, + * \(targets_i\) be the target class for example {@code i}, + * \(out_i\) be the output for example {@code i}, + *

      $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + * + * @param predictions A {@code batch_size} x {@code classes} tensor. + * @param targets A {@code batch_size} vector of class ids. * @param k Number of top elements to look at for computing precision. + * @param data type for {@code InTopKV2} output and operands * @return a new instance of InTopK */ public InTopK inTopK(Operand predictions, Operand targets, @@ -1234,13 +1235,14 @@ public InTopK inTopK(Operand predictions, Operand< /** * L2 Loss. - *

      - * Computes half the L2 norm of a tensor without the `sqrt`: - *

      - * output = sum(t ** 2) / 2 + * Computes half the L2 norm of a tensor without the {@code sqrt}: + *

      +   *  output = sum(t ** 2) / 2
      +   *  
      * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param t Typically 2-D, but may have any dimensions. + * @param data type for {@code L2Loss} output and operands * @return a new instance of L2Loss */ public L2Loss l2Loss(Operand t) { @@ -1248,11 +1250,12 @@ public L2Loss l2Loss(Operand t) { } /** - * Computes rectified linear: `max(features, features * alpha)`. + * Computes rectified linear: {@code max(features, features * alpha)}. * - * @param data type for {@code activations()} output - * @param features - * @param options carries optional attributes values + * @param data type for {@code activations} output + * @param features the features value + * @param options carries optional attribute values + * @param data type for {@code LeakyRelu} output and operands * @return a new instance of LeakyRelu */ public LeakyRelu leakyRelu(Operand features, @@ -1262,13 +1265,10 @@ public LeakyRelu leakyRelu(Operand features, /** * Generates labels for candidate sampling with a learned unigram distribution. - *

      * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - *

      - * For each batch, this op picks a single set of sampled candidate labels. - *

      - * The advantages of sampling candidates per-batch are simplicity and the + *

      For each batch, this op picks a single set of sampled candidate labels. + *

      The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -1281,7 +1281,7 @@ public LeakyRelu leakyRelu(Operand features, * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * @param rangeMax The sampler will sample integers from the interval [0, range_max). - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of LearnedUnigramCandidateSampler */ public LearnedUnigramCandidateSampler learnedUnigramCandidateSampler(Operand trueClasses, @@ -1292,22 +1292,22 @@ public LearnedUnigramCandidateSampler learnedUnigramCandidateSampler(Operand - * The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last + * The 4-D {@code input} tensor is treated as a 3-D array of 1-D vectors (along the last * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within - * `depth_radius`. In detail, - *

      - * sqr_sum[a, b, c, d] = - * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) - * output = input / (bias + alpha * sqr_sum) ** beta - *

      - * For details, see [Krizhevsky et al., ImageNet classification with deep - * convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). - * - * @param data type for {@code output()} output + * {@code depth_radius}. In detail, + *

      +   *  sqr_sum[a, b, c, d] =
      +   *      sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
      +   *  output = input / (bias + alpha * sqr_sum) ** beta
      +   *  
      + *

      For details, see Krizhevsky et al., ImageNet classification with deep + * convolutional neural networks (NIPS 2012) . + * + * @param data type for {@code output} output * @param input 4-D. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code LRN} output and operands * @return a new instance of LocalResponseNormalization */ public LocalResponseNormalization localResponseNormalization( @@ -1317,13 +1317,14 @@ public LocalResponseNormalization localResponseNormalizat /** * Computes log softmax activations. - *

      - * For each batch `i` and class `j` we have - *

      - * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) - * - * @param data type for {@code logsoftmax()} output - * @param logits 2-D with shape `[batch_size, num_classes]`. + * For each batch {@code i} and class {@code j} we have + *

      +   *  logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
      +   *  
      + * + * @param data type for {@code logsoftmax} output + * @param logits 2-D with shape {@code [batch_size, num_classes]}. + * @param data type for {@code LogSoftmax} output and operands * @return a new instance of LogSoftmax */ public LogSoftmax logSoftmax(Operand logits) { @@ -1333,16 +1334,17 @@ public LogSoftmax logSoftmax(Operand logits) { /** * Performs max pooling on the input. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param input 4-D input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code MaxPoolV2} output and operands * @return a new instance of MaxPool */ - public MaxPool maxPool(Operand input, Operand ksize, + public MaxPool maxPool(Operand input, Operand ksize, Operand strides, String padding, MaxPool.Options... options) { return MaxPool.create(scope, input, ksize, strides, padding, options); } @@ -1350,14 +1352,15 @@ public MaxPool maxPool(Operand input, Operand ks /** * Performs 3D max pooling on the input. * - * @param data type for {@code output()} output - * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + * @param data type for {@code output} output + * @param input Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code MaxPool3D} output and operands * @return a new instance of MaxPool3d */ public MaxPool3d maxPool3d(Operand input, List ksize, @@ -1368,16 +1371,18 @@ public MaxPool3d maxPool3d(Operand input, List k /** * Computes gradients of 3D max pooling function. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code MaxPool3DGrad} output and operands + * @param data type for {@code MaxPool3DGrad} output and operands * @return a new instance of MaxPool3dGrad */ public MaxPool3dGrad maxPool3dGrad(Operand origInput, @@ -1389,16 +1394,17 @@ public MaxPool3dGrad maxPool3dGrad(Ope /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code MaxPool3DGradGrad} output and operands * @return a new instance of MaxPool3dGradGrad */ public MaxPool3dGradGrad maxPool3dGradGrad(Operand origInput, @@ -1410,15 +1416,16 @@ public MaxPool3dGradGrad maxPool3dGradGrad(Operand ori /** * Computes gradients of the maxpooling function. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad 4-D. Gradients w.r.t. the output of `max_pool`. + * @param grad 4-D. Gradients w.r.t. the output of {@code max_pool}. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code MaxPoolGradV2} output and operands * @return a new instance of MaxPoolGrad */ public MaxPoolGrad maxPoolGrad(Operand origInput, Operand origOutput, @@ -1430,15 +1437,16 @@ public MaxPoolGrad maxPoolGrad(Operand origInput, Oper /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad 4-D. Gradients of gradients w.r.t. the input of `max_pool`. + * @param grad 4-D. Gradients of gradients w.r.t. the input of {@code max_pool}. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code MaxPoolGradGradV2} output and operands * @return a new instance of MaxPoolGradGrad */ public MaxPoolGradGrad maxPoolGradGrad(Operand origInput, @@ -1450,16 +1458,17 @@ public MaxPoolGradGrad maxPoolGradGrad(Operand origInp /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param input The original input. - * @param grad 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the - * input of `max_pool`. - * @param argmax The indices of the maximum values chosen for each output of `max_pool`. + * @param grad 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. the + * input of {@code max_pool}. + * @param argmax The indices of the maximum values chosen for each output of {@code max_pool}. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code MaxPoolGradGradWithArgmax} output and operands * @return a new instance of MaxPoolGradGradWithArgmax */ public MaxPoolGradGradWithArgmax maxPoolGradGradWithArgmax( @@ -1470,54 +1479,53 @@ public MaxPoolGradGradWithArgmax maxPoolGradGradWithArgma /** * Performs max pooling on the input and outputs both max values and indices. - *

      - * The indices in `argmax` are flattened, so that a maximum value at position - * `[b, y, x, c]` becomes flattened index: - * `(y * width + x) * channels + c` if `include_batch_in_index` is False; - * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - *

      - * The indices returned are always in `[0, height) x [0, width)` before flattening, + * The indices in {@code argmax} are flattened, so that a maximum value at position + * {@code [b, y, x, c]} becomes flattened index: + * {@code (y * width + x) * channels + c} if {@code include_batch_in_index} is False; + * {@code ((b * height + y) * width + x) * channels + c} if {@code include_batch_in_index} is True. + *

      The indices returned are always in {@code [0, height) x [0, width)} before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param data type for {@code output()} output - * @param data type for {@code argmax()} output - * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param data type for {@code output} output + * @param data type for {@code argmax} output + * @param input 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values - * @return a new instance of MaxPoolWithArgmax + * @param options carries optional attribute values + * @param data type for {@code MaxPoolWithArgmax} output and operands + * @return a new instance of MaxPoolWithArgmax, with default output types */ public MaxPoolWithArgmax maxPoolWithArgmax(Operand input, - List ksize, List strides, String padding, MaxPoolWithArgmax.Options... options) { + List ksize, List strides, String padding, MaxPoolWithArgmax.Options[] options) { return MaxPoolWithArgmax.create(scope, input, ksize, strides, padding, options); } /** * Performs max pooling on the input and outputs both max values and indices. - *

      - * The indices in `argmax` are flattened, so that a maximum value at position - * `[b, y, x, c]` becomes flattened index: - * `(y * width + x) * channels + c` if `include_batch_in_index` is False; - * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - *

      - * The indices returned are always in `[0, height) x [0, width)` before flattening, + * The indices in {@code argmax} are flattened, so that a maximum value at position + * {@code [b, y, x, c]} becomes flattened index: + * {@code (y * width + x) * channels + c} if {@code include_batch_in_index} is False; + * {@code ((b * height + y) * width + x) * channels + c} if {@code include_batch_in_index} is True. + *

      The indices returned are always in {@code [0, height) x [0, width)} before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param data type for {@code output()} output - * @param data type for {@code argmax()} output - * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param data type for {@code output} output + * @param data type for {@code argmax} output + * @param input 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. - * @param Targmax + * @param Targmax the value of the Targmax property * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code MaxPoolWithArgmax} output and operands + * @param data type for {@code MaxPoolWithArgmax} output and operands * @return a new instance of MaxPoolWithArgmax */ public MaxPoolWithArgmax maxPoolWithArgmax( @@ -1527,21 +1535,21 @@ public MaxPoolWithArgmax maxPoolWit } /** - * Finds values of the `n`-th order statistic for the last dimension. - *

      + * Finds values of the {@code n}-th order statistic for the last dimension. * If the input is a vector (rank-1), finds the entries which is the nth-smallest * value in the vector and outputs their values as scalar tensor. - *

      - * For matrices (resp. higher rank input), computes the entries which is the + *

      For matrices (resp. higher rank input), computes the entries which is the * nth-smallest value in each row (resp. vector along the last dimension). Thus, - *

      - * values.shape = input.shape[:-1] + *

      +   *  values.shape = input.shape[:-1]
      +   *  
      * - * @param data type for {@code values()} output - * @param input 1-D or higher with last dimension at least `n+1`. + * @param data type for {@code values} output + * @param input 1-D or higher with last dimension at least {@code n+1}. * @param n 0-D. Position of sorted vector to select along the last dimension (along - * each row for matrices). Valid range of n is `[0, input.shape[:-1])` - * @param options carries optional attributes values + * each row for matrices). Valid range of n is {@code [0, input.shape[:-1])} + * @param options carries optional attribute values + * @param data type for {@code NthElement} output and operands * @return a new instance of NthElement */ public NthElement nthElement(Operand input, Operand n, @@ -1552,8 +1560,8 @@ public NthElement nthElement(Operand input, Operand data type for {@code output()} output - * @param input 4-D with shape `[batch, height, width, channels]`. + * @param data type for {@code output} output + * @param input 4-D with shape {@code [batch, height, width, channels]}. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param ksize The size of the window for each dimension of the input tensor. @@ -1561,9 +1569,10 @@ public NthElement nthElement(Operand input, Operand data type for {@code QuantizedAvgPool} output and operands * @return a new instance of QuantizedAvgPool */ - public QuantizedAvgPool quantizedAvgPool(Operand input, + public QuantizedAvgPool quantizedAvgPool(Operand input, Operand minInput, Operand maxInput, List ksize, List strides, String padding) { return QuantizedAvgPool.create(scope, input, minInput, maxInput, ksize, strides, padding); @@ -1571,11 +1580,10 @@ public QuantizedAvgPool quantizedAvgPool(Operand input, /** * Quantized Batch normalization. - *

      * This op is deprecated and will be removed in the future. Prefer - * `tf.nn.batch_normalization`. + * {@code tf.nn.batch_normalization}. * - * @param data type for {@code result()} output + * @param data type for {@code result} output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. * @param tMax The value represented by the highest quantized input. @@ -1594,17 +1602,19 @@ public QuantizedAvgPool quantizedAvgPool(Operand input, * @param betaMin The value represented by the lowest quantized offset. * @param betaMax The value represented by the highest quantized offset. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this tensor will be multiplied + * If "scale_after_normalization" is true, this tensor will be multiplied * with the normalized tensor. * @param gammaMin The value represented by the lowest quantized gamma. * @param gammaMax The value represented by the highest quantized gamma. - * @param outType + * @param outType the value of the outType property * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. + * @param data type for {@code QuantizedBatchNormWithGlobalNormalization} output and operands + * @param data type for {@code QuantizedBatchNormWithGlobalNormalization} output and operands * @return a new instance of QuantizedBatchNormWithGlobalNormalization */ - public QuantizedBatchNormWithGlobalNormalization quantizedBatchNormWithGlobalNormalization( + public QuantizedBatchNormWithGlobalNormalization quantizedBatchNormWithGlobalNormalization( Operand t, Operand tMin, Operand tMax, Operand m, Operand mMin, Operand mMax, Operand v, Operand vMin, Operand vMax, Operand beta, Operand betaMin, Operand betaMax, @@ -1615,49 +1625,49 @@ public QuantizedBatchNormWithGlobalNormalizat /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. - *

      * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * - * @param data type for {@code output()} output - * @param input + * @param data type for {@code output} output + * @param input the input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minBias The float value that the lowest quantized bias value represents. * @param maxBias The float value that the highest quantized bias value represents. - * @param outType + * @param outType the value of the outType property + * @param data type for {@code QuantizedBiasAdd} output and operands * @return a new instance of QuantizedBiasAdd */ - public QuantizedBiasAdd quantizedBiasAdd(Operand input, - Operand bias, Operand minInput, Operand maxInput, + public QuantizedBiasAdd quantizedBiasAdd(Operand input, + Operand bias, Operand minInput, Operand maxInput, Operand minBias, Operand maxBias, Class outType) { return QuantizedBiasAdd.create(scope, input, bias, minInput, maxInput, minBias, maxBias, outType); } /** * Computes a 2D convolution given quantized 4D input and filter tensors. - *

      * The inputs are quantized tensors where the lowest value represents the real * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. * - * @param data type for {@code output()} output - * @param input + * @param data type for {@code output} output + * @param input the input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minFilter The float value that the lowest quantized filter value represents. * @param maxFilter The float value that the highest quantized filter value represents. - * @param outType + * @param outType the value of the outType property * @param strides The stride of the sliding window for each dimension of the input * tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2D} output and operands * @return a new instance of QuantizedConv2d */ - public QuantizedConv2d quantizedConv2d(Operand input, - Operand filter, Operand minInput, Operand maxInput, + public QuantizedConv2d quantizedConv2d(Operand input, + Operand filter, Operand minInput, Operand maxInput, Operand minFilter, Operand maxFilter, Class outType, List strides, String padding, QuantizedConv2d.Options... options) { return QuantizedConv2d.create(scope, input, filter, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); @@ -1666,14 +1676,15 @@ public QuantizedConv2d quantizedConv2d(Operand data type for {@code y()} output + * @param data type for {@code y} output * @param x A 4D input Tensor. * @param xMin The value represented by the lowest quantized input. * @param xMax The value represented by the highest quantized input. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code QuantizedInstanceNorm} output and operands * @return a new instance of QuantizedInstanceNorm */ - public QuantizedInstanceNorm quantizedInstanceNorm(Operand x, + public QuantizedInstanceNorm quantizedInstanceNorm(Operand x, Operand xMin, Operand xMax, QuantizedInstanceNorm.Options... options) { return QuantizedInstanceNorm.create(scope, x, xMin, xMax, options); } @@ -1681,7 +1692,7 @@ public QuantizedInstanceNorm quantizedInstanceNorm(Operand< /** * Produces the max pool of the input tensor for quantized types. * - * @param data type for {@code output()} output + * @param data type for {@code output} output * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. @@ -1690,82 +1701,93 @@ public QuantizedInstanceNorm quantizedInstanceNorm(Operand< * @param strides The stride of the sliding window for each dimension of the input * tensor. The length must be 4 to match the number of dimensions of the input. * @param padding The type of padding algorithm to use. + * @param data type for {@code QuantizedMaxPool} output and operands * @return a new instance of QuantizedMaxPool */ - public QuantizedMaxPool quantizedMaxPool(Operand input, + public QuantizedMaxPool quantizedMaxPool(Operand input, Operand minInput, Operand maxInput, List ksize, List strides, String padding) { return QuantizedMaxPool.create(scope, input, minInput, maxInput, ksize, strides, padding); } /** - * Computes Quantized Rectified Linear: `max(features, 0)` + * Computes Quantized Rectified Linear: {@code max(features, 0)} * - * @param data type for {@code activations()} output - * @param features + * @param data type for {@code activations} output + * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param data type for {@code QuantizedRelu} output and operands * @return a new instance of QuantizedRelu */ - public QuantizedRelu quantizedRelu(Operand features, + public QuantizedRelu quantizedRelu(Operand features, Operand minFeatures, Operand maxFeatures, Class outType) { return QuantizedRelu.create(scope, features, minFeatures, maxFeatures, outType); } /** - * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + * Computes Quantized Rectified Linear 6: {@code min(max(features, 0), 6)} * - * @param data type for {@code activations()} output - * @param features + * @param data type for {@code activations} output + * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param data type for {@code QuantizedRelu6} output and operands * @return a new instance of QuantizedRelu6 */ - public QuantizedRelu6 quantizedRelu6(Operand features, + public QuantizedRelu6 quantizedRelu6(Operand features, Operand minFeatures, Operand maxFeatures, Class outType) { return QuantizedRelu6.create(scope, features, minFeatures, maxFeatures, outType); } /** - * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + * Computes Quantized Rectified Linear X: {@code min(max(features, 0), max_value)} * - * @param data type for {@code activations()} output - * @param features - * @param maxValue + * @param data type for {@code activations} output + * @param features the features value + * @param maxValue the maxValue value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param data type for {@code QuantizedReluX} output and operands * @return a new instance of QuantizedReluX */ - public QuantizedReluX quantizedReluX(Operand features, + public QuantizedReluX quantizedReluX(Operand features, Operand maxValue, Operand minFeatures, Operand maxFeatures, Class outType) { return QuantizedReluX.create(scope, features, maxValue, minFeatures, maxFeatures, outType); } /** - * Computes rectified linear: `max(features, 0)`. - *

      + * Computes rectified linear: {@code max(features, 0)}. * See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) * Example usage: - * >>> tf.nn.relu([-2., 0., -0., 3.]).numpy() - * array([ 0., 0., -0., 3.], dtype=float32) - * - * @param data type for {@code activations()} output - * @param features + *

      + *
      + *
      + *

      tf.nn.relu([-2., 0., 3.]).numpy() + * array([0., 0., 3.], dtype=float32) + *

      + *
      + *
      + * + * @param data type for {@code activations} output + * @param features the features value + * @param data type for {@code Relu} output and operands * @return a new instance of Relu */ - public Relu relu(Operand features) { + public Relu relu(Operand features) { return Relu.create(scope, features); } /** - * Computes rectified linear 6: `min(max(features, 0), 6)`. + * Computes rectified linear 6: {@code min(max(features, 0), 6)}. * - * @param data type for {@code activations()} output - * @param features + * @param data type for {@code activations} output + * @param features the features value + * @param data type for {@code Relu6} output and operands * @return a new instance of Relu6 */ public Relu6 relu6(Operand features) { @@ -1773,18 +1795,16 @@ public Relu6 relu6(Operand features) { } /** - * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` - *

      - * if < 0, `scale * features` otherwise. - *

      - * To be used together with - * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. - * For correct dropout, use `tf.contrib.nn.alpha_dropout`. - *

      - * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + * Computes scaled exponential linear: {@code scale * alpha * (exp(features) - 1)} + * if < 0, {@code scale * features} otherwise. + *

      To be used together with + * {@code initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')}. + * For correct dropout, use {@code tf.contrib.nn.alpha_dropout}. + *

      See Self-Normalizing Neural Networks * - * @param data type for {@code activations()} output - * @param features + * @param data type for {@code activations} output + * @param features the features value + * @param data type for {@code Selu} output and operands * @return a new instance of Selu */ public Selu selu(Operand features) { @@ -1793,13 +1813,14 @@ public Selu selu(Operand features) { /** * Computes softmax activations. - *

      - * For each batch `i` and class `j` we have - *

      - * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ - * - * @param data type for {@code softmax()} output - * @param logits 2-D with shape `[batch_size, num_classes]`. + * For each batch {@code i} and class {@code j} we have + *

      +   *  $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
      +   *  
      + * + * @param data type for {@code softmax} output + * @param logits 2-D with shape {@code [batch_size, num_classes]}. + * @param data type for {@code Softmax} output and operands * @return a new instance of Softmax */ public Softmax softmax(Operand logits) { @@ -1808,14 +1829,14 @@ public Softmax softmax(Operand logits) { /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

      * Inputs are the logits, not probabilities. * - * @param data type for {@code loss()} output + * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. + * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyWithLogits( @@ -1824,10 +1845,11 @@ public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyW } /** - * Computes softsign: `features / (abs(features) + 1)`. + * Computes softsign: {@code features / (abs(features) + 1)}. * - * @param data type for {@code activations()} output - * @param features + * @param data type for {@code activations} output + * @param features the features value + * @param data type for {@code Softsign} output and operands * @return a new instance of Softsign */ public Softsign softsign(Operand features) { @@ -1836,87 +1858,85 @@ public Softsign softsign(Operand features) { /** * SpaceToBatch for 4-D tensors of type T. - *

      * This is a legacy version of the more general SpaceToBatchND. - *

      - * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + *

      Zero-pads and then rearranges (permutes) blocks of spatial data into batch. * More specifically, this op outputs a copy of the input tensor where values from - * the `height` and `width` dimensions are moved to the `batch` dimension. After - * the zero-padding, both `height` and `width` of the input must be divisible by the + * the {@code height} and {@code width} dimensions are moved to the {@code batch} dimension. After + * the zero-padding, both {@code height} and {@code width} of the input must be divisible by the * block size. * - * @param data type for {@code output()} output - * @param input 4-D with shape `[batch, height, width, depth]`. - * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies - * the padding of the input with zeros across the spatial dimensions as follows: - *

      - * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] - *

      - * The effective spatial dimensions of the zero-padded input tensor will be: - *

      - * height_pad = pad_top + height + pad_bottom - * width_pad = pad_left + width + pad_right - *

      - * The attr `block_size` must be greater than one. It indicates the block size. - *

      - * Non-overlapping blocks of size `block_size x block size` in the height and - * width dimensions are rearranged into the batch dimension at each location. - * The batch of the output tensor is `batch * block_size * block_size`. - * Both height_pad and width_pad must be divisible by block_size. - *

      - * The shape of the output will be: - *

      - * [batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, - * depth] - *

      - * Some examples: - *

      - * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: - *

      {@code
      +   * @param  data type for {@code output} output
      +   * @param input 4-D with shape {@code [batch, height, width, depth]}.
      +   * @param paddings 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies
      +   *  the padding of the input with zeros across the spatial dimensions as follows:
      +   *  
      +   *    paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
      +   *  
      + *

      The effective spatial dimensions of the zero-padded input tensor will be: + *

      +   *    height_pad = pad_top + height + pad_bottom
      +   *    width_pad = pad_left + width + pad_right
      +   *  
      + *

      The attr {@code block_size} must be greater than one. It indicates the block size. + *

        + *
      • Non-overlapping blocks of size {@code block_size x block size} in the height and + * width dimensions are rearranged into the batch dimension at each location.
      • + *
      • The batch of the output tensor is {@code batch * block_size * block_size}.
      • + *
      • Both height_pad and width_pad must be divisible by block_size.
      • + *
      + *

      The shape of the output will be: + *

      +   *  [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
      +   *   depth]
      +   *  
      + *

      Some examples: + *

      (1) For the following input of shape {@code [1, 2, 2, 1]} and block_size of 2: + *

          *  x = [[[[1], [2]], [[3], [4]]]]
      -   *  }
      - * The output tensor has shape `[4, 1, 1, 1]` and value: - *
      {@code
      +   *  
      + *

      The output tensor has shape {@code [4, 1, 1, 1]} and value: + *

          *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
      -   *  }
      - * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: - *
      {@code
      +   *  
      + *

      (2) For the following input of shape {@code [1, 2, 2, 3]} and block_size of 2: + *

          *  x = [[[[1, 2, 3], [4, 5, 6]],
          *        [[7, 8, 9], [10, 11, 12]]]]
      -   *  }
      - * The output tensor has shape `[4, 1, 1, 3]` and value: - *
      {@code
      +   *  
      + *

      The output tensor has shape {@code [4, 1, 1, 3]} and value: + *

          *  [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
      -   *  }
      - * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: - *
      {@code
      +   *  
      + *

      (3) For the following input of shape {@code [1, 4, 4, 1]} and block_size of 2: + *

          *  x = [[[[1],   [2],  [3],  [4]],
          *        [[5],   [6],  [7],  [8]],
          *        [[9],  [10], [11],  [12]],
          *        [[13], [14], [15],  [16]]]]
      -   *  }
      - * The output tensor has shape `[4, 2, 2, 1]` and value: - *
      {@code
      +   *  
      + *

      The output tensor has shape {@code [4, 2, 2, 1]} and value: + *

          *  x = [[[[1], [3]], [[9], [11]]],
          *       [[[2], [4]], [[10], [12]]],
          *       [[[5], [7]], [[13], [15]]],
          *       [[[6], [8]], [[14], [16]]]]
      -   *  }
      - * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: - *
      {@code
      +   *  
      + *

      (4) For the following input of shape {@code [2, 2, 4, 1]} and block_size of 2: + *

          *  x = [[[[1],   [2],  [3],  [4]],
          *        [[5],   [6],  [7],  [8]]],
          *       [[[9],  [10], [11],  [12]],
          *        [[13], [14], [15],  [16]]]]
      -   *  }
      - * The output tensor has shape `[8, 1, 2, 1]` and value: - *
      {@code
      +   *  
      + *

      The output tensor has shape {@code [8, 1, 2, 1]} and value: + *

          *  x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
          *       [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
      -   *  }
      - * Among others, this operation is useful for reducing atrous convolution into + *
      + *

      Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * @param blockSize + * @param blockSize the value of the blockSize property + * @param data type for {@code SpaceToBatch} output and operands * @return a new instance of SpaceToBatch */ public SpaceToBatch spaceToBatch(Operand input, @@ -1926,84 +1946,80 @@ public SpaceToBatch spaceToBatch(Operand input, /** * SpaceToDepth for tensors of type T. - *

      * Rearranges blocks of spatial data, into depth. More specifically, - * this op outputs a copy of the input tensor where values from the `height` - * and `width` dimensions are moved to the `depth` dimension. - * The attr `block_size` indicates the input block size. - *

      - * Non-overlapping blocks of size `block_size x block size` are rearranged - * into depth at each location. - * The depth of the output tensor is `block_size * block_size * input_depth`. - * The Y, X coordinates within each block of the input become the high order - * component of the output channel index. - * The input tensor's height and width must be divisible by block_size. - *

      - * The `data_format` attr specifies the layout of the input and output tensors + * this op outputs a copy of the input tensor where values from the {@code height} + * and {@code width} dimensions are moved to the {@code depth} dimension. + * The attr {@code block_size} indicates the input block size. + *

        + *
      • Non-overlapping blocks of size {@code block_size x block size} are rearranged + * into depth at each location.
      • + *
      • The depth of the output tensor is {@code block_size * block_size * input_depth}.
      • + *
      • The Y, X coordinates within each block of the input become the high order + * component of the output channel index.
      • + *
      • The input tensor's height and width must be divisible by block_size.
      • + *
      + *

      The {@code data_format} attr specifies the layout of the input and output tensors * with the following options: - * "NHWC": `[ batch, height, width, channels ]` - * "NCHW": `[ batch, channels, height, width ]` - * "NCHW_VECT_C": - * `qint8 [ batch, channels / 4, height, width, 4 ]` - *

      - * It is useful to consider the operation as transforming a 6-D Tensor. + * "NHWC": {@code [ batch, height, width, channels ]} + * "NCHW": {@code [ batch, channels, height, width ]} + * "NCHW_VECT_C": + * {@code qint8 [ batch, channels / 4, height, width, 4 ]} + *

      It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, - * Each element in the input tensor can be specified via 6 coordinates, - * ordered by decreasing memory layout significance as: - * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates - * within the output image, bX, bY means coordinates - * within the input block, iC means input channels). - * The output would be a transpose to the following layout: - * n,oY,oX,bY,bX,iC - *

      - * This operation is useful for resizing the activations between convolutions + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + * within the output image, bX, bY means coordinates + * within the input block, iC means input channels). + * The output would be a transpose to the following layout: + * n,oY,oX,bY,bX,iC + *

      This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - *

      - * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and + *

      For example, given an input of shape {@code [1, 2, 2, 1]}, data_format = "NHWC" and * block_size = 2: - *

      {@code
      +   *  
          *  x = [[[[1], [2]],
          *        [[3], [4]]]]
      -   *  }
      - * This operation will output a tensor of shape `[1, 1, 1, 4]`: - *
      {@code
      +   *  
      + *

      This operation will output a tensor of shape {@code [1, 1, 1, 4]}: + *

          *  [[[[1, 2, 3, 4]]]]
      -   *  }
      - * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + *
      + *

      Here, the input has a batch of 1 and each batch element has shape {@code [2, 2, 1]}, * the corresponding output will have a single element (i.e. width and height are * both 1) and will have a depth of 4 channels (1 * block_size * block_size). - * The output element shape is `[1, 1, 4]`. - *

      - * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. - *

      {@code
      +   *  The output element shape is {@code [1, 1, 4]}.
      +   *  

      For an input tensor with larger depth, here of shape {@code [1, 2, 2, 3]}, e.g. + *

          *  x = [[[[1, 2, 3], [4, 5, 6]],
          *        [[7, 8, 9], [10, 11, 12]]]]
      -   *  }
      - * This operation, for block_size of 2, will return the following tensor of shape - * `[1, 1, 1, 12]` - *
      {@code
      +   *  
      + *

      This operation, for block_size of 2, will return the following tensor of shape + * {@code [1, 1, 1, 12]} + *

          *  [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
      -   *  }
      - * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: - *
      {@code
      +   *  
      + *

      Similarly, for the following input of shape {@code [1 4 4 1]}, and a block size of 2: + *

          *  x = [[[[1],   [2],  [5],  [6]],
          *        [[3],   [4],  [7],  [8]],
          *        [[9],  [10], [13],  [14]],
          *        [[11], [12], [15],  [16]]]]
      -   *  }
      - * the operator will return the following tensor of shape `[1 2 2 4]`: - *
      {@code
      +   *  
      + *

      the operator will return the following tensor of shape {@code [1 2 2 4]}: + *

          *  x = [[[[1, 2, 3, 4],
          *         [5, 6, 7, 8]],
          *        [[9, 10, 11, 12],
          *         [13, 14, 15, 16]]]]
      -   *  }
      + *
      * - * @param data type for {@code output()} output - * @param input + * @param data type for {@code output} output + * @param input the input value * @param blockSize The size of the spatial block. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code SpaceToDepth} output and operands * @return a new instance of SpaceToDepth */ public SpaceToDepth spaceToDepth(Operand input, Long blockSize, @@ -2013,18 +2029,17 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

      - * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

      - * Inputs are the logits, not probabilities. + *

      Inputs are the logits, not probabilities. * - * @param data type for {@code loss()} output + * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. + * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( @@ -2033,24 +2048,23 @@ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxC } /** - * Finds values and indices of the `k` largest elements for the last dimension. - *

      - * If the input is a vector (rank-1), finds the `k` largest entries in the vector - * and outputs their values and indices as vectors. Thus `values[j]` is the - * `j`-th largest entry in `input`, and its index is `indices[j]`. - *

      - * For matrices (resp. higher rank input), computes the top `k` entries in each + * Finds values and indices of the {@code k} largest elements for the last dimension. + * If the input is a vector (rank-1), finds the {@code k} largest entries in the vector + * and outputs their values and indices as vectors. Thus {@code values[j]} is the + * {@code j}-th largest entry in {@code input}, and its index is {@code indices[j]}. + *

      For matrices (resp. higher rank input), computes the top {@code k} entries in each * row (resp. vector along the last dimension). Thus, - *

      - * values.shape = indices.shape = input.shape[:-1] + [k] - *

      - * If two elements are equal, the lower-index element appears first. + *

      +   *  values.shape = indices.shape = input.shape[:-1] + [k]
      +   *  
      + *

      If two elements are equal, the lower-index element appears first. * - * @param data type for {@code values()} output - * @param input 1-D or higher with last dimension at least `k`. + * @param data type for {@code values} output + * @param input 1-D or higher with last dimension at least {@code k}. * @param k 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param data type for {@code TopKV2} output and operands * @return a new instance of TopK */ public TopK topK(Operand input, Operand k, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index a4a7f5d6dbc..fde138e7296 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -367,10 +367,10 @@ public final class Ops { public final SignalOps signal; - public final TrainOps train; - public final QuantizationOps quantization; + public final TrainOps train; + private final Scope scope; private Ops(Scope scope) { @@ -393,8 +393,8 @@ private Ops(Scope scope) { math = new MathOps(this); audio = new AudioOps(this); signal = new SignalOps(this); - train = new TrainOps(this); quantization = new QuantizationOps(this); + train = new TrainOps(this); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 5d3ab3c1100..8c3ef2da29a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -29,57 +29,68 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

      * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss()} output + * + * @param data type for {@code loss} output */ -@Operator(group = "nn") +@Operator( + group = "nn" +) public final class SoftmaxCrossEntropyWithLogits extends RawOp { - + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; + + private Output loss; + + private Output backprop; + + private SoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx); + } + /** * Factory method to create a class wrapping a new SoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. + * @param data type for {@code SoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits */ - @Endpoint(describeByClass = true) - public static SoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { - OperationBuilder opBuilder = scope.env().opBuilder("SoftmaxCrossEntropyWithLogits", scope.makeOpName("SoftmaxCrossEntropyWithLogits")); + @Endpoint( + describeByClass = true + ) + public static SoftmaxCrossEntropyWithLogits create(Scope scope, + Operand features, Operand labels) { + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** + * Gets loss. * Per example loss (batch_size vector). + * @return loss. */ public Output loss() { return loss; } - + /** + * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). + * @return backprop. */ public Output backprop() { return backprop; } - - /** The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "SoftmaxCrossEntropyWithLogits"; - - private Output loss; - private Output backprop; - - private SoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx); - } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 794beab4ded..84feae55726 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -29,61 +29,71 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. - *

      - * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - *

      - * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss()} output + *

      Inputs are the logits, not probabilities. + * + * @param data type for {@code loss} output */ -@Operator(group = "nn") +@Operator( + group = "nn" +) public final class SparseSoftmaxCrossEntropyWithLogits extends RawOp { - + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; + + private Output loss; + + private Output backprop; + + private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation); + int outputIdx = 0; + loss = operation.output(outputIdx++); + backprop = operation.output(outputIdx); + } + /** * Factory method to create a class wrapping a new SparseSoftmaxCrossEntropyWithLogits operation. - * + * * @param scope current scope * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. + * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ - @Endpoint(describeByClass = true) - public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, Operand features, Operand labels) { - OperationBuilder opBuilder = scope.env().opBuilder("SparseSoftmaxCrossEntropyWithLogits", scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); + @Endpoint( + describeByClass = true + ) + public static SparseSoftmaxCrossEntropyWithLogits create(Scope scope, + Operand features, Operand labels) { + OperationBuilder opBuilder = scope.env().opBuilder(OP_NAME, scope.makeOpName("SparseSoftmaxCrossEntropyWithLogits")); opBuilder.addInput(features.asOutput()); opBuilder.addInput(labels.asOutput()); opBuilder = scope.apply(opBuilder); return new SparseSoftmaxCrossEntropyWithLogits<>(opBuilder.build()); } - + /** + * Gets loss. * Per example loss (batch_size vector). + * @return loss. */ public Output loss() { return loss; } - + /** + * Gets backprop. * backpropagated gradients (batch_size x num_classes matrix). + * @return backprop. */ public Output backprop() { return backprop; } - - /** The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "SparseSoftmaxCrossEntropyWithLogits"; - - private Output loss; - private Output backprop; - - private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); - int outputIdx = 0; - loss = operation.output(outputIdx++); - backprop = operation.output(outputIdx); - } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb index fbcecceb5bd35e3681296ef57e58ae324733949a..cc39a35b20302b28884325379198c85e4b01c89b 100644 GIT binary patch delta 17213 zcmd6O3w%`Nm3Z%+XTCfpbLWxC#A)P3hTN*TX$_KmHMdqKi|D~9+?Eh zZvVT>kIc;d9_KsX`Of=0ch|Qr|JtFRgZpOYJ;)M+KNwu+gK9k~bypCko6{%8Vqz0V zihV^zHip$(oh3%YgU4q6W;64X-H+Tc!(EygRYVGW#AwjvnK_0fj%!cfA)mg3+5Bhr z6(dA$Vb`%A8{lWRum{*@^x(aXy=FWvCX(@p#3zPCJ`{;16C@!6+?40zWG@sQ9T7Hj8u8|w%*1jYX3;9w{+=nV{sfsMV9 z!~UYXHVm5F#;eM2H}e_*fI+DeyDG>d%7JN|(X)_c zjt0b7f^Sp4LeU5>NIW9W-k_V~4i(U;` zLIOEEMLa{1fpfw$Fdd$O$#|}fCC-XyAcd%k=d#Ga^myXgkRGFYbmR`O?`AJ&o`A@1 z_A%x|*mNsfhEA2l)4~HWs<>?L+K5D`z1F zX-J5P9%Hxi4Y7OJ8t{EsPzI0hV^`|0*J)EM>wLvJcO@y03nS5(q!T%k$1P108IiTtQdnGvTVK0Og53@5(J?a2?6rV^`=;B7-X(Q&v|-Km2|674Yw8FM*fuWoOCT_p?h`_BjJQbAVkl zldB;`(RdJVhndVpQ25U02&2blc9z3dSK`-BmG zap<#ctocE9_l$Gc*q`9l2icp?d0$_Ai2cer?`zv3_EKeEw%mP@TgqkWVYY~2x4Y$Q zf5-+|_O}eY^&_^L{Fw1aP)rl&`CwjwvlQ-G8H-@7^(CfLGA%&r6vNtgl7kBhN!pgmm=-4Lq6_Xq|rE@WU&kW)W z3JIY%5f>uTfEe$M#iNO6AR6Y3QiAfIo8>;IaL_FV!Y#(3U1EH5C?L|Gh&)v9HEwC6#pmdTx4JR*(f{Ji=v`Z0|+Pc0vUP^`% z)bBtn;@S=+YDRHA)dWho1y^E7*<>i{rIZnSMb)3_lOJELFTaoZ799OZUsX1(i)FML zbj}jl@v;5~7a88v6I~k|^qObDF_+cJ-lGS7lVRmQ)U`&3(P0u*=SdumMI(qlqRWRv z5mAWq1Ib8$ns;x@w{_j9JK9#Rg~nS2UZ^yu{4oh#vF7Gx>LNuVUNM}c-WP28h1~(WzG2G)GuSLnY--LK)i`C88=G=R z&6(HOICZ3HjslR#dz%c=+4@~BG6Vd*1{?fKz+jc<_Zt>6wVn!6Kvlv3@=y|WDVG-x zNr_t2({*l^s2XmEW75QlfPpi!J{~?UV$0=&8x3;`m|ual-{6GaF$8QFH8eA`&IR!E zcKGdxVU`>lGaNRhAlT^1u4G@%hm(8o6!zR@`VqU0C=YVvZqqTA z*#N61Of}4fuxY|n>GZUAw0Cv;+q*ptJn9usdz-(rtxNvqgvmEU5Ba1F{Ldnsw|{0j zQ0lP~V{~8u&2!ofoG?C;r;U++_>{@UI(_!^AZ;*p7v<(7rUc9WV?JE+tm$X&X&@=i zz(42F{wn$N&z&7qm7e`r2fZ(u<}**q-+0LsVSv1Bs%Be0FVA_|^!&rKwwp=AN_17a zSFdeDUnfYtnI&=5w_+qP)+QxFsNSP-nEM@bSut95bf6NwL!m@3`Z+P!e!Y2Yqij4E z2&;v-Ff1m-xP&KuJ~>Z2@lRhmGtnc$;V+p>nH6wwlbN>-q_E`0G@5I!HJ7fBd9)bO4>atleNFi^ffhdZq(@VvqS-rCy3LI!GFYGHc7#51E%U=TBaXyq`y! zK7Ef+eZ9GEDtWg(V7_hE_2zSez}#srYiK3@bEYet{5I0^f0?B8yx8j*V78j)GV5U8 zR`chpr=cxR)yHsXi$LuqflXV@x0d?PbP#Z2o26d9@?P_KOwr`ifa;{#C4YE>`RbeC z(8Wa@EG*5lsJ~XbQ@@^ddAkk+b_3UoInS4&)!ylcKa@J<7Tj zd*LXbPBFr^)28LM6UIAr=_%ARMm2;oS3(>bZ)nic7UQa|(*c`K%X-%M757YyO!{~! zAYh!NG%5L#cpsIjjM`UvvK8^}lh4`UiT4f7Q1-IHR?WQ7IfN)q0!_xZmK(VxTK#V)|}NiuRmV+)eD&4!3k&4rRdPsmlTaNx5I&wqM@?$yjRqD z(I1PA)z%Rq9v8-HH#B-1c>I%_Tt)Lt@-J(O+&<Phb*g!Y-Dll_OxmBuUzPIpe5LY7kI+=w^+ zKSn%MzR@^_nZw@t8yMmkj0?e#7y+`?Jd5dpy=yIV&(Ta&w4L<;&{SN+$Zv%$qjk(_ z5N@~31^0I?ynO%dmisEnATf1@q=3I)f)oAB?D7lJ&&ZpYOOIoxH#Oq$08>s*vD%Oz zJh02?hA;Os_LPlNQJk_g?y3|oC(=u&e)x>^R)Xp7VMcqHG4jSF)M@05nSV?rfaOxNYtEd^d@U)~ zT*d>`MGz3DqLwnv8A+-n8guy6GX5dOscA+Zd9sQaTa1eHVLv z&4jTt(}GSVu7fu+S!78BAh3&_>%jez6POcJX2;Q$4B%)eSj%=S-JaWwKuSrkJ_&#%*l-mG1GP7lb!4ZSL>luZ0w*mxBIX zY@>zZdX+Xt422aLsGOf^K>V^NwbC(E3d~<;NP)7@2StnrUVlVi46y)XtX!uyw+=GI#Ot6Q$snp&fgfIvxWC6@Fowb4^djOq;y zr+s@0h+Y^CNgVxz7x@A^Y&>o&RtPiRqYdg&h7i!H2ZFGIA?jjsW}=T_815n*NAMy9 z^EjBGO^P0US3J3RJzvGZUjvPwl)1+Yt^}2N>HhhRh1bq!G+vjdVNRHAS3LK0U5%Ca z%ia&{y@0WeJG0Tx?8Jv4l|86i$olQWr4{NZpY(~6fm1$v6m)vB5a`KXk^G6(xC+ye zNF-!iOtt82uz$b4lCq8-ZZYUe!1IK0F0`LyYpR~mSQ$6IIhPHw39L61c;MLg^kuN? z#sWJsq5qLQ51iOk;Blvf!Z;8LV>p^p@~zRyN*Q`G9hbS`HbKuBMV~tx=a-T5TuxNrq^(T0kZaDlEt^}4^@_cZ|QbrY%ikmXya=)XG&adj;d9P>I)ZK?|9)+WB?Ady<{FolUa6vn&pA_IyMvht z!X}g3u2GH*lRPS+OTtl144`4GuSh@Qc}0`! zr2;xsah#eh*J>kF4Gdw=PRHVqgGCDYk-1v;M zvQM9mpqsZwNf9LxK8G-3@<)QeKW%n+CX7pSI}n0{*wO@q#-EuzMH>HHp$-QADYIkw z78jW?UOZ_!UI*k%*A~PM4|2>X$BfqF_ZE)XLUnssRVq;Ow7Ifw!q}!hYI8d1fn=MX zR6FebGE-Woq4c!o(yS6jDNBzcOVjT${C1m(Zde$F?fr~<{A-zUIW)qVDctDy7{`p& z6J;hxr>Fdcrm9qa4;yPo8~Cw0UJQ%Fm{OlJg4tS3qEF*7A#lZGGE8=v@+f_K5l|nT z{O2MY?0m~)FyLcocz1<;hE>~}Cg!V98PRGBMbgVPNyocu=<4A<1F1q| z|EMPMtA$+W6P|jTTnoj2BsFG?q!kZg_txL6o;iaQ#G+Bmgy0o;w%3TED0+?SmZFE~ zzYMCMFgEV4d`(qdRS#*hxEd!Sw%M{ro8X%V>@^m2Up7j3%fzW)u=B7(Y6^@#=Ma%L0b?qtfyQHn6_J!C+mHEox|vQfm<)(qKGAADs5lm? zevrqojNZ+;Q(NuhRxaq>8^Rnb+-iHO1Gp^G*J4L0k1wm!I^J=TIxuU6u zT%lC0RHf=|Qr`E!r$OS4t0cm!_gGpp#H_hm#ikQe1=u@fLE3sIYF=2JN*B?j5lt7J zfnMKb)kV6{jYii8PSzDK&f59jTZ_wQ?$glJ#^fN9nkkTcM>WPtNhK*F;T4(sV>)Ww zy@PYi>M>zy09GwXJ*lB7y8MDs-H9$NC4xWxsn`QQ4VV-T)5G4|9hGP$ugZ*|l*#rm z=qyo{2)>Uo!`P|fQd>$awBV&HG#F7Vb%hqZ&<2E)t(~w5tnb=NVG(+p=8rX;p!uWX z<}3>J`Ya`t6GjZA6c4B?j62Ff2!6ZN=7fz0?8SxX0xN1x`Go2fw)(HHwmKH>)0Avi zG@ekMT^cleilk02gZhZaOY(xDxTxGDrvv*^Qo`l=yIR}Y7cXmH%8|uwsoxVutVmIG zJY7DQ2-rkVa8huIyU4g+G>)n`i-7u^@Ys)a&NA%;G&#j1Rz$4}ZW_yvu@*3#MtPIIi>nq%czH8ma5 zMCPMSZdu9cTicdl7N@nNi_^7Twwg06S=rIi$?2D^@}t3C*?}MJZA&?QXPX}fcP(4m zuB3M^Ygaj1SrAdm7BQtQ<)cymk@{$;rBkUKF7+d=b^WkQ z_Z%ty@4|_e4 zA1$!fv)=MaQ?tNO06x^X3IOHP&AQ|_i>$xMWA?!VE^8|zz-gCt{f@1rR(WeFcg^d8 zTd@|gD}m`_%zH?xkAPR*{_vpNM0A+zbm z*oN5s#qtd;wvFug61;5JYI7RY8oTH~0-g1A{S2XHJ_5fPbs+v2g}_Fvp^V|S;?d;bkg|CVc}f(F9dT4J zilt;Df>KYGo9lNZJc_?>dvqPPCFIKek#S3Gr@E`k2OMQndo9lB`DFZED~6N zmJDOT06kM$fQBbA5|5%}D5|RtxiuU>W1!Y~M}mBU7TMu2{J{EjfsCUb#Dk3_6v@J} zAUtVh2hxxNn6f!t7Ec`g`ZI95!Wd^eK?xhWOeT?7e(BmHJ9 z_``y3rX91pMGhy}1&^8( zoi9uSk9((CoJK-)aZdT9rQnlS=W?DgK%0UKH>HKz-FB=MYr43Day@bm*4K>>VZ9~t zL}X4-q~*dqqJuoEzLBA7hnN@$4TSJYQ`x7t<)>`c{Leg{UlvjHQJCs{J~|!=k12(w zsFx@#Y7CThsBVgjn5334cY})~ms6oY+H1N9C1yAl(nLbpj#|%(gHb>;;N~VHX~-z> z$i2fTFkvcNSfM(u3jQ#bu<}~3m!C{+B54#*F;H&gQ`BhSG0LX}q?7nS(W{6;AQ_L- zT2ysIX%(sw*V}`~J0mXj^`0DuPlL^7urkV zriJz@<}lp1(7r(~Y_^w~<>MXpf7`)a3_p3*ezC6QIwQxc$Ns)d=wYs79zpL!%o= zgvWR&J7HhoBDl*H3Q_18D9Vec(G6VrYx{hL$lp0(Urv~x!yn(a&pvBWLgRnhJ52Hp z)^X=L=5d&Lx#J(1ee&VU9Zr_Lj>x~>;8?{LkE*62rTS{&$Vx|9`nlmpdxp$dwKgyx8TaWk$jO1(4pw7Dse7%1;9E-PzQRHZf4tYxMWI%ad41|TP~`6he%=+(yw>5Df86KL zCoB(GNZw^(fnIp5^KlhW{Jg_u(yttny17bdc-~QJ(0Aek+)?J-1+PBuI9ki6Gd)g} zHhi@Jty!;@Ggb0Eo*F^{>}HZxM*NA>!O9gcfRBzizESRJCuf+(=|P}nE0uXp)9&q8 zlrZ&h>?OwnIP!vHUHJ#5EHsN5OgCk)mYZL6{43)|9jKtKd{W<@;*AjrF27x8aY5rt zj{Cg6WoKM|8niM9$wyyuV7C2cGchBz@V*3zRR+zkIh-rhRm+GjJ|uBPl#4qr?b0g3 zJqi-;zlCJmUvu358K?z~uRE@Bzpu;Shg2e&C;0j6jujPWrct-A8|J;_m<@}MI*+;({Alw$$&qWsP&N0sh8 zSJvb-7&;yA3NzEwzi5wK@E6D3`Rs#*a3AZulG}^gmXwh&ZSQ0jfs|@9CURvp1XI3B7YGyN`ipgR_kNTS$7w4SzeA zkdw&j{%mA*Prp~M=V=DsF*s+k|I_sQ+V338((G0+5I3rkE-|rqEFuht0*jIH zBJ^dFE_#2V+*GHNcbJD8)?R?p(E&|gMCIO>MqVp3DV*re%UkOJj*m3k_s6V5) zPuLR)DzPnv7saF%?NTRbo{)xs0#5aAvXGfD{((IkSx}#q=kym&q8n8E7`rD;EiOYi z>L1fZIW=X9g$VBPrvD!4TK=1)8~A&YE|nI|?6^nSDLH(**Xb*z`Znd#X6xa4_^H>q zST|w3AGUa%%iUD5rpKj!DT?(;ud~O>JSZ-F#c)rn(?0%K0kNP!tfO%Rrt_5)_G)5ORIS9d`tjW+ zDM6KQUsgSE2xER+8nrVb9m5D69ZXE5N|^H)L%te7*MvIL^pa8X`Y;jRNpsqofN-sp zzu=WpMe)&a4dqObDc%-#LPn5IOZJaYkyKQ7NZ-$B{2%Zw83zOHt~kkIYlE%{6x|V z(=zXP2oD}lc*6Kk#wlEcN&{~Xb6Cr>HqBaa*aND=nCgnZ UA5-;cI;bb3IpjxLo$Cw!AHLK28UO$Q delta 12556 zcmeHNd2}4bnNQbTnvx|;Gn%n9@=+SwSd!&I7P7Gg0iW`L4_Q)dkRR5xW?ItVndwn? zk8MFrhrX_?nqvXO+ZPs|IgRlp2)i%)NPtZsvDvSxyGNtL z#!ejm*az=nO;2@IeMf!2@B4l=*FDm4!`&C{yJxa@KfO%Lr=+B=RG;!UNXcwO-+(H9Sp?_iX#?W`3Lfh$gpFoTFWCy;z(kOfy6$|Lv z3ZwEF6i31(^ooP1N?1&Vhg?DW#uVhEZyiKq3P>!;?X#`(Ib?J`=O}4HPrHr2zdFhV zTHg)w-Q}e4REA0fT4K0D#_#^-*e1{;#~i)JJq~C28sTR8bcgdi^rp)=+Tq+P_@71X zy$O}b8KNm-T1|?Yw5t;Z8y0R7Q<6jds;v}mk(mfbVNY<*Fn2w7| zoGfiyxdPL3FLo|**WpB3lXpc?g|3RJ-2B{hQ6*i8w5arxj3R4tOitpiYIP z$Z099CB!62Ceo6qG?6HLS=}v(S{7ujA=Ak8C`ojab4h(2iQ3;sP02PUR4Fn~F-9$M z$y3m)V~ouiS4^N^_PJc>gE9(FXBc&g%U57TFLl+7a?&w)5vqlUUIpi_M-{6|ML)cE2hTQiFM~pv(+(#XPL9eKGR|>b&JFDH77|I0qI;U_W zeSM<)e&P4VWs}@4=SZ&@;xzXq&Bnu7_l{cOQQCC3dlC`+&+vYddnlUeJv;YsLEeLL%}npAUkHcj+g?18zIVA$LT{UZ zM;lcbUsfmlh~7C9PslglTQl*hJ;vJK;ycbamQ60a;$4B#SJ0>le5$ip@S|-G$0)<$ zDB5rncCLwk&GudE!Y;K}%t$`-Pv{GJeM%4xCkd%}n8c>C-cd z7Yh$j_u0j|@MF65?Bd?)Y4wpha#rNxnn*gE%Jf&)U{O)T{_3@a4e)>XD~5Mg@pMo5 z`vrJRtCW&8sX41k@kNT5>0P)2BHQR>4Qg(Z_FYuVg+`y^KoHPyBu4{N)I3?9j!A#+GZFH65 ztk5wa;z*_kD=h(Yl8wEISTBjmX-!O|Rl*HU)Ql8MbSGk>mXOnGq^XuU3uX|JsM#*- zB{>a`rneYq$0QXpb7bCY4u;qEIxYDNRjXOp3!vT{O=zl=?5=~<3(l}`T?G`66FIA~ zuhNhyRCcr^qH3}NC@-pXx#AqBGxwZ8D-3&_{E>=aCh=gM!Rg5)%PqWWFw+ZxJjZMz z#dXlTC!uN*$b>Bu@B?dUW*%6u2I0xeel!K2DHsh|GTfa zlJ@l#Pol3Dx$rLe9=F@+b^w0RBR$36-u6*nvGGyg=-!|)YUk)pHNt0f%afze95Z&H z?kmNk8;W>t^1K8UwCX$`qqN@3vU9y}Ep=u#qj*GNJN&@8oE;>`H&jE!; z2$g?}r;k6D^F09{T>L}$Lm{`j&lV%}h7hfvwg7UqLiFf#_+;7H5|dOXB2t9(%Bt4M zj%ZUxR3k}k;Qln%wR!P)U+TJ$#Br3*8FHB z{FY!F*Jl|y^lt;2<|}G0KtNxe`GvsAVh8s0s&-PE=|fbrTrf=3O?~!<(Cfps zYs)1Xv(N-!VcA4&;tGPnZK*8t>R|8F;6{7ZQ}H5KRaaF@SIeo9+O@a8yKvU(dbG@QkSsf^ZdnUSO< zXcpK2H)mYx7U59mCPnbd49_S4`LSd+E|DocVDc%EhTDYvXdVdckj`TI@+b>$SQ-Km zLMo~$St&}Yt>tU1_+e=2wxSGJ%ID@Gc2qoJrW7-Sf}gpUeQfEr80`r-1i2wE>GX$Wi%&(Pu8Q3W}K z2f3O2(jXDTFow`N>mlsei%VjhwHtOf4EV%`pfOe%__H}2)(1}P1~HkH2!KV80-Xt< zKS!QuebllUn+`pmDUS$U3DaHL6qC50U~iqbbj%u>xE@wd%ITimGJ}9N8fhXelF}_J zDbgisv0f$%WNDacLs(?qe-_zorKt9&W4(%;ma{78n=xlXYX!v?YygD@gk@6F^B1UN zh%B0`v$`g?F3Eq3A)^Gjx*IGNCtYC6UPY2f3hEmUtq@4iRze|6ph=770HvDQf?@hp z2Ip+2c`MpThc#}ayR~JFIoSqUK9^}~0Vm4}1T4YbGkA*i1jmE^aaUp494m6TY8&9l z!X(hlLZv#`2L9t2+Js+gdJg<+=-f|B{d&KgCB5Q?+|gwTC=r9UIh#)N&KFc?S|#O} zm}E*eHJsHrqYkqS0c9A(BiN){gc)#FAb7eHJ#g^ja4?|sWrpdNlSz3aTZ$t9m=`Dq zh1Z8*Nj(~kVtdc!{eU=p_sc+h2C2*Ym-C!ny}ZBGJm@W~$*%ziLbnWn!3RtXvPRg{ z&3N-Un61ptU>af*Dl%TF@Ds~xax(*}>p%wfqV242lki|o!`Si+jO64y)0Qi2?$Qo= zMsxnLayCn}bng?cv8zBot4ATx!HR(HW)?Yz2QagjRl%Cy+nfa~ZYmk&5l5eU1C{CA z2Uh(}sSTFzoi=!a;a#kqyNNI@`Lm(j#E}Zm0fW^j8+4D?eo>moiD{_iJ z&7OrrYoQa=;H;49eUVHR-HQe#oH!%+zs@f_tEz(>NB|J~X$@w>o_;<@7g$ zjZOHoHc(WYw?4!Tl-UF8gQB&yD^|8I1iP(~mGw*z7IgIW&jNnkJXI_vS(R(!F7#=@ zumSnR_%JNSv@Fn2W?0HymjH&acm^nfQWNZ^%8>4Cd==9W;&BN&59nf9&VZ$Xw}Ls) z$gr~p(8-&fjY>jG=pali3$0hmN?afZ2Q`?r5rpjqNB{+Rl?%=>b1qwzWllIZHvX~3 zpHBb{?2u=9Mp0x1LI)@_AQE}hWMEujc?hz2Z$f9n*|~@Zbh01zYXdar&T3csTQf1{ zCYB``24)lqI74>Mp~8+sEp6vp@#`g}6$1UMS`eT!E-39lZ+Z>*z7`2xv^ZN3 zGTbXl_j*VDLcj%Wa<`UAW>u3?{JuaxUsJj!(o~n@6jkd5U=ovZPxW9naVnW!6OC+Z zxv{3*;-@%_A*kSe~R$;52=>np7HOd+iI%!g0|q%}JMn9*jTyOo}yMuixScQUoq2-`l=be5?( zB5m}=LNtCI zFrh0=YLYEvZDQB?J&6rarJ7_W5>w@6Ky=TS(?ERtN{s#+ z$_{N4=FsNyz_}hqZqNIkn?6_-m}>m2Jm4r0E})N-zzl)Vk4Yfln70Q}cU53Uz-&%g zrwJt^U>97rsF=1?1`VbKu%ZAD^5t|uOOG%K{xBqPF9weJ3@eZ))&B~sA7*)?m4{- z^e@NSvXuyWsK9>sN+LR+p0(TDf0T#T;Xs@$5e?PKY3HH!_{kH|5W&z<=X35~8|4m{dn0k~rl0KZPeny;m1_wbuo1A z>;nA6WV}_E0AI$*c(^ZsizAzj2UZDqb$(r5TML~TTu@ltJZ~d`O!!+iwYOq&#b z)Leo0Q`RpRtZ%GpCyBOO=4qaU)!h@_Pf7#1nDM zdQx_saSqoSR)VWx)5J^-K7svGj9Oi*ug%Hr*Cfw%%O94 z1}f?DEdfF=+ZpiE6_ygRcqW=yQ!h zKV3XMI7ztENKFq;a2N+0gLf68U0xcU6I@pUJZ%P2G2AhALgnmhqOZ*fHqlB?u&}fh zkFtk)6Ft4YS!`@9eSbl4ED0C1C(^*WNvdxJF0o(Z7t9G+@zt|e1GIi#u#*1kyxcQLP?;SGtM&Z@;{sqBtLf&@lyOj%rlZE_s-#H_=*ywBys#ApT(^Mwd zK#fdr5&Xve%S>>o&_o~41Q!@T?F$Aw>FC{U3K796ta6 diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index 82ba43bd317..5078d402257 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -14,12 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.losses; +import static org.tensorflow.framework.utils.CastHelper.cast; + import org.tensorflow.Operand; import org.tensorflow.framework.losses.impl.LossTuple; import org.tensorflow.framework.losses.impl.LossesHelper; +import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; -import org.tensorflow.framework.op.FrameworkOps; import org.tensorflow.op.core.ReduceAll; import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceSum; @@ -29,8 +31,6 @@ import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** Built-in loss functions. */ public class Losses { @@ -183,7 +183,9 @@ public static Operand binaryCrossentropy( private static Operand binaryCrossentropyHelper( Ops tf, Operand target, Operand output, boolean fromLogits) { FrameworkOps fop = FrameworkOps.create(tf); - if (fromLogits) { return fop.nn.sigmoidCrossEntropyWithLogits(target, output);} + if (fromLogits) { + return fop.nn.sigmoidCrossEntropyWithLogits(target, output); + } /* TODO - skip this logic for now. It requires walking back the inputs which is not yet possible if (!(output instanceof Variable) && (!tf.scope().env().isEager())) { @@ -519,6 +521,7 @@ public static Operand sparseCategoricalCrossentropy( Operand predictions, boolean fromLogits, int axis) { + FrameworkOps fop = FrameworkOps.create(tf); Class predictionType = predictions.type(); Operand epsilonConst = cast(tf, tf.constant(EPSILON), predictionType); Operand one = cast(tf, tf.constant(1), predictionType); @@ -651,8 +654,6 @@ private static Operand smoothCategoricalLabels( return tf.math.add(tf.math.mul(labels, oneMinusSmoothing), tf.math.div(smoothing, numClasses)); } - - /** * Converts binary labels into -1/1. * diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index 61d1962184f..ea162bbf90e 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -29,10 +29,10 @@ import org.tensorflow.framework.losses.impl.LossesHelper; import org.tensorflow.framework.metrics.exceptions.NotBroadcastableException; import org.tensorflow.framework.op.FrameworkOps; +import org.tensorflow.framework.utils.SparseTensor; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -; import org.tensorflow.op.core.Assign; import org.tensorflow.op.core.OneHot; import org.tensorflow.op.core.Rank; @@ -49,17 +49,6 @@ import org.tensorflow.types.family.TIntegral; import org.tensorflow.types.family.TNumber; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - -import static org.tensorflow.framework.losses.impl.LossesHelper.allAxes; -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * These are helper methods for Metrics and will be module private when Java modularity is applied * to TensorFlow Java. These methods should not be used outside of the metrics packages. @@ -70,8 +59,7 @@ public class MetricsHelper { "weights can not be broadcast to values."; /** - * Asserts that the {@code sampleWeights} can be broadcast to the same shape as {@code values - * } + * Asserts that the {@code sampleWeights} can be broadcast to the same shape as {@code values } * *

      In losses and metrics, limited weight broadcasting is supported. Weights must be either * scalar, or the same rank as the target values, with each dimension either 1, or the same as the @@ -80,8 +68,8 @@ public class MetricsHelper { * @param tf the TensorFlow Ops * @param sampleWeights the sample weights. * @param values the values to which weights are applied. - * @return {@code Operation} with control dependencies to ensure {@code sampleWeight} - * can be broadcast to {@code values} + * @return {@code Operation} with control dependencies to ensure {@code sampleWeight} can be + * broadcast to {@code values} * @param the type of Operand * @throws NotBroadcastableException If static checks determine {@code sampleWeights} has an * incorrect shape that prohibit broadcasting to {@code values} @@ -125,10 +113,7 @@ public static Op assertBroadcastable( throw new NotBroadcastableException( String.format( "%s Mismatch at dim %d. values.shape=%s weights.shape=%s.", - ASSERT_BROADCAST_ERROR_PREFIX, - i, - valuesShapeStatic, - weightsShapeStatic)); + ASSERT_BROADCAST_ERROR_PREFIX, i, valuesShapeStatic, weightsShapeStatic)); } } return tf.withSubScope("staticDimsCheckSuccess") diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index 6c8ed05ad66..7b6322d0f0d 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -29,7 +29,7 @@ * a core level Op. */ public class FrameworkOps { - public final Ops coreOps; + public final Ops core; public final NnOps nn; public final SetOps sets; public final MathOps math; @@ -42,7 +42,7 @@ public class FrameworkOps { * @param scope the scope */ private FrameworkOps(Scope scope) { - this.coreOps = Ops.create(scope.env()); + this.core = Ops.create(scope.env()); this.scope = scope; nn = new NnOps(this); sets = new SetOps(this); @@ -53,16 +53,15 @@ private FrameworkOps(Scope scope) { /** * Creates a FrameworkOps instance based on the provided Core Ops * - * @param coreOps The TensorFlow Core Ops + * @param core The TensorFlow Core Ops */ - private FrameworkOps(Ops coreOps) { - this.coreOps = coreOps; - this.scope = coreOps.scope(); + private FrameworkOps(Ops core) { + this.core = core; + this.scope = core.scope(); nn = new NnOps(this); sets = new SetOps(this); math = new MathOps(this); linalg = new LinalgOps(this); - } /** @@ -106,15 +105,6 @@ public final Scope scope() { return scope; } - /** - * Gets the core Ops - * - * @return coreOps - */ - public final Ops coreOps() { - return coreOps; - } - /** * Returns an API that builds operations with the provided name prefix. * diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java index 931f7f851c2..bc5118e494e 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/LinalgOps.java @@ -15,18 +15,9 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; +import org.tensorflow.framework.op.linalg.MatMul; import org.tensorflow.framework.utils.SparseTensor; -import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.dtypes.Cast; -import org.tensorflow.op.math.Conj; -import org.tensorflow.op.sparse.SparseMatMul; -import org.tensorflow.op.train.BatchMatMul; -import org.tensorflow.types.TBfloat16; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; public class LinalgOps { @@ -35,7 +26,7 @@ public class LinalgOps { private final FrameworkOps frameworkOps; /** - * Creates Framework {@code nn} Operations + * Creates Framework linear algegra Operations * * @param frameworkOps the TensorFLow framework Ops */ @@ -45,15 +36,14 @@ public class LinalgOps { } /** - * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b - * }. + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b }. * - *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 - * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions - * specify matching batch size. + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the + * inner 2 dimensions specify valid matrix multiplication dimensions, and any further outer + * dimensions specify matching batch size. * - *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, - * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, {@code + * TFloat32}, {@code TFloat64}, {@code TInt32}. * *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. @@ -80,31 +70,29 @@ public class LinalgOps { * *

      Note: This is matrix product, not element-wise product. * - * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 - * }, {@code TInt32}. with a {@code rank > 1} + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 }, {@code + * TInt32}. with a {@code rank > 1} * @param b an Operand with same type and rank as {@code a}. * @param the data type of the Operands - * @return A Operand of the same type as {@code a} and {@code b} where each inner-most - * matrix is the product of the corresponding matrices in {@code a} and {@code b}. - * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} - * , or {@code transposeB} and {@code adjointB} are both set to `true`. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most matrix is + * the product of the corresponding matrices in {@code a} and {@code b}. This is the matrix + * product not an element-wise product. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} , or + * {@code transposeB} and {@code adjointB} are both set to `true`. */ - @Endpoint(name = "matmul") public Operand matmul(Operand a, Operand b) { - return matmul(a, b, false, false, false, false, false, false); + return MatMul.matmul(scope, a, b, false, false, false, false, false, false); } /** - * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b - * }. + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b }. * - *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 - * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions - * specify matching batch size. + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the + * inner 2 dimensions specify valid matrix multiplication dimensions, and any further outer + * dimensions specify matching batch size. * - *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, - * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, {@code + * TFloat32}, {@code TFloat64}, {@code TInt32}. * *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. @@ -131,34 +119,32 @@ public Operand matmul(Operand a, Operand b) { * * }

      * - * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 - * }, {@code TInt32}. with a {@code rank > 1} + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 }, {@code + * TInt32}. with a {@code rank > 1} * @param b an Operand with same type and rank as {@code a}. * @param transposeA If true, {@code a} is transposed before multiplication. * @param transposeB If true, {@code b} is transposed before multiplication * @param the data type of the Operands - * @return A Operand of the same type as {@code a} and {@code b} where each inner-most - * matrix is the product of the corresponding matrices in {@code a} and {@code b}. - * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} - * , or {@code transposeB} and {@code adjointB} are both set to `true`. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most matrix is + * the product of the corresponding matrices in {@code a} and {@code b}. This is the matrix + * product not an element-wise product. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} , or + * {@code transposeB} and {@code adjointB} are both set to `true`. */ - @Endpoint(name = "matmul") public Operand matmul( Operand a, Operand b, boolean transposeA, boolean transposeB) { - return matmul(a, b, transposeA, transposeB, false, false, false, false); + return MatMul.matmul(scope, a, b, transposeA, transposeB, false, false, false, false); } /** - * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b - * }. + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b }. * - *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the inner 2 - * dimensions specify valid matrix multiplication dimensions, and any further outer dimensions - * specify matching batch size. + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the + * inner 2 dimensions specify valid matrix multiplication dimensions, and any further outer + * dimensions specify matching batch size. * - *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, - * {@code TFloat32}, {@code TFloat64}, {@code TInt32}. + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, {@code + * TFloat32}, {@code TFloat64}, {@code TInt32}. * *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by * setting one of the corresponding flag to true. These are false by default. @@ -185,28 +171,26 @@ public Operand matmul( * * }

      * - * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 - * }, {@code TInt32}. with a {@code rank > 1} + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 }, {@code + * TInt32}. with a {@code rank > 1} * @param b an Operand with same type and rank as {@code a}. * @param transposeA If true, {@code a} is transposed before multiplication. * @param transposeB If True, {@code b} is transposed before multiplication * @param adjointA If true, {@code a} is conjugated and transposed before multiplication. * @param adjointB If true, {@code b} is conjugated and transposed before multiplication. - * @param aIsSparse If true, {@code a} is treated as a sparse matrix. Notice, this does - * not support {@link SparseTensor}, it just makes optimizations that assume most values - * in {@code a} are zero. - * @param bIsSparse If true, {@code b} is treated as a sparse matrix. Notice, this does - * not support {@link SparseTensor}, it just makes optimizations that assume most values - * in {@code b} are zero. + * @param aIsSparse If true, {@code a} is treated as a sparse matrix. Notice, this does not + * support {@link SparseTensor}, it just makes optimizations that assume most values in + * {@code a} are zero. + * @param bIsSparse If true, {@code b} is treated as a sparse matrix. Notice, this does not + * support {@link SparseTensor}, it just makes optimizations that assume most values in + * {@code b} are zero. * @param the data type of the Operands - * @return A Operand of the same type as {@code a} and {@code b} where each inner-most - * matrix is the product of the corresponding matrices in {@code a} and {@code b}. - * This is the matrix product not an element-wise product. - * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} - * , or {@code transposeB} and {@code adjointB} are both set to `true`. + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most matrix is + * the product of the corresponding matrices in {@code a} and {@code b}. This is the matrix + * product not an element-wise product. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} , or + * {@code transposeB} and {@code adjointB} are both set to `true`. */ - @SuppressWarnings("unchecked") - @Endpoint(name = "matmul") public Operand matmul( Operand a, Operand b, @@ -216,89 +200,7 @@ public Operand matmul( boolean adjointB, boolean aIsSparse, boolean bIsSparse) { - Scope lscope = scope.withSubScope("MatMul"); - if (transposeA && adjointA) - throw new IllegalArgumentException("Only one of transposeA and adjointA can be true."); - if (transposeB && adjointB) - throw new IllegalArgumentException("Only one of transposeB and adjointB can be true."); - if (!(TFloating.class.isAssignableFrom(a.type()) || a.type().equals(TInt32.class))) - throw new IllegalArgumentException( - String.format( - "Operand 'a' must be of type 'TBfloat16','TFloat16', 'TFloat32', 'TFloat64' or 'TInt32'. found type : %s", - a.type().getSimpleName())); - if (!(TFloating.class.isAssignableFrom(a.type()) || b.type().equals(TInt32.class))) - throw new IllegalArgumentException( - String.format( - "Operand 'b' must be of type 'TBfloat16', 'TFloat32', 'TFloat64' or 'TInt32'. found type : %s", - b.type().getSimpleName())); - - Shape aShape = a.shape(); - Shape bShape = b.shape(); - if (aShape.numDimensions() != bShape.numDimensions()) - throw new IllegalArgumentException( - String.format( - "Parameters 'a' and 'b' must the same rank: found a rank = %d, b rank = %d", - aShape.numDimensions(), bShape.numDimensions())); - boolean outputMayHaveNonEmptyBatchShape = - aShape.numDimensions() == Shape.UNKNOWN_SIZE - || aShape.numDimensions() > 2 - || bShape.numDimensions() == Shape.UNKNOWN_SIZE; - - if ((!aIsSparse && !bIsSparse) && outputMayHaveNonEmptyBatchShape) { - // BatchMatmul does not support transpose, so we conjugate the matrix and - // use adjoint instead. Conj() is a noop for real matrices. - if (transposeA) { - a = Conj.create(scope, a); - adjointA = true; - } - if (transposeB) { - b = Conj.create(scope, b); - adjointB = true; - } - return BatchMatMul.create( - lscope, a, b, BatchMatMul.adjX(adjointA), BatchMatMul.adjY(adjointB)); - } - - // Neither matmul nor sparse_matmul support adjoint, so we conjugate - // the matrix and use transpose instead. Conj() is a noop for real - // matrices. - if (adjointA) { - a = Conj.create(scope, a); - transposeA = true; - } - if (adjointB) { - b = Conj.create(scope, b); - transposeB = true; - } - - boolean useSparseMatmul = false; - if (aIsSparse || bIsSparse) { - useSparseMatmul = - (a.type().equals(TBfloat16.class) || a.type().equals(TFloat32.class)) - && (b.type().equals(TBfloat16.class) || b.type().equals(TFloat32.class)); - } - if ((a.type().equals(TBfloat16.class) || b.type().equals(TBfloat16.class)) - && !a.type().equals(b.type())) useSparseMatmul = true; - - if (useSparseMatmul) { - Operand result = - SparseMatMul.create( - lscope, - a, - b, - SparseMatMul.transposeA(transposeA), - SparseMatMul.transposeB(transposeB), - SparseMatMul.aIsSparse(aIsSparse), - SparseMatMul.bIsSparse(bIsSparse)); - if (a.type().equals(TFloat32.class)) return (Operand) result; - else return Cast.create(scope, result, a.type()); - } - - return org.tensorflow.op.linalg.MatMul.create( - lscope, - a, - b, - org.tensorflow.op.linalg.MatMul.transposeA(transposeA), - org.tensorflow.op.linalg.MatMul.transposeB(transposeB)); + return MatMul.matmul( + scope, a, b, transposeA, transposeB, adjointA, adjointB, aIsSparse, bIsSparse); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java index 8fda58806ca..d8b0c7e6775 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/MathOps.java @@ -14,69 +14,26 @@ =======================================================================*/ package org.tensorflow.framework.op; -import org.tensorflow.Graph; import org.tensorflow.Operand; -import org.tensorflow.Session; -import org.tensorflow.framework.losses.impl.LossTuple; -import org.tensorflow.ndarray.Shape; +import org.tensorflow.framework.op.math.Axes; +import org.tensorflow.framework.op.math.ConfusionMatrix; +import org.tensorflow.framework.op.math.L2Normalize; +import org.tensorflow.framework.op.math.ReduceLogSumExp; +import org.tensorflow.framework.op.math.TensorDot; import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.core.AssertThat; -import org.tensorflow.op.core.Concat; -import org.tensorflow.op.core.Constant; -import org.tensorflow.op.core.Gather; -import org.tensorflow.op.core.Identity; -import org.tensorflow.op.core.OnesLike; -import org.tensorflow.op.core.Range; -import org.tensorflow.op.core.Rank; -import org.tensorflow.op.core.ReduceAll; -import org.tensorflow.op.core.ReduceMax; -import org.tensorflow.op.core.ReduceProd; -import org.tensorflow.op.core.ReduceSum; -import org.tensorflow.op.core.Reshape; -import org.tensorflow.op.core.ScatterNd; -import org.tensorflow.op.core.Select; -import org.tensorflow.op.core.SetDiff1d; -import org.tensorflow.op.core.Slice; -import org.tensorflow.op.core.Squeeze; -import org.tensorflow.op.core.Stack; -import org.tensorflow.op.core.StopGradient; -import org.tensorflow.op.core.ZerosLike; -import org.tensorflow.op.dtypes.Cast; -import org.tensorflow.op.linalg.Transpose; -import org.tensorflow.op.math.Add; -import org.tensorflow.op.math.Exp; -import org.tensorflow.op.math.GreaterEqual; -import org.tensorflow.op.math.IsFinite; -import org.tensorflow.op.math.Less; -import org.tensorflow.op.math.Log; -import org.tensorflow.op.math.Maximum; -import org.tensorflow.op.math.Mul; -import org.tensorflow.op.math.Rsqrt; -import org.tensorflow.op.math.Square; -import org.tensorflow.op.math.Sub; -import org.tensorflow.types.TBfloat16; -import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat16; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TFloating; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; - public class MathOps { private final Scope scope; private final FrameworkOps frameworkOps; /** - * Creates Framework {@code nn} Operations + * Creates Framework math Operations * * @param frameworkOps the TensorFLow framework Ops */ @@ -94,44 +51,33 @@ public class MathOps { * @return the normalized values based on L2 norm */ public Operand l2Normalize(Operand x, int[] axis) { - Operand squareSum = - ReduceSum.create( - scope, - Square.create(scope, x), - Constant.vectorOf(scope, axis), - ReduceSum.keepDims(Boolean.TRUE)); - Operand invNorm = - Rsqrt.create( - scope, - Maximum.create( - scope, squareSum, Cast.create(scope, Constant.scalarOf(scope, 1e-12F), x.type()))); - return Mul.create(scope, x, invNorm); + return L2Normalize.l2Normalize(scope, x, axis); } /** * Computes the confusion matrix from predictions and labels. * *

      The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid - * labels for a given classification task. Both prediction and labels must be 1-D arrays of the - * same shape in order for this function to work. + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the + * number of valid labels for a given classification task. Both prediction and labels must be 1-D + * arrays of the same shape in order for this function to work. * - *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in - * either predictions or labels. Class labels are expected to start at 0. For example, if + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum + * value in either predictions or labels. Class labels are expected to start at 0. For example, if * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to the - * total value of the confusion matrix cell. + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to + * the total value of the confusion matrix cell. * *

      For example: * *

      {@code
      -   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      -   *         [[0 0 0 0 0]
      -   *          [0 0 1 0 0]
      -   *          [0 0 1 0 0]
      -   *          [0 0 0 0 0]
      -   *          [0 0 0 0 1]]
      +   * fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *     [[0 0 0 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 0 0 0]
      +   *      [0 0 0 0 1]]
          * }
      * *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 @@ -147,33 +93,33 @@ public Operand l2Normalize(Operand x, int[] axis) { * predictions}. */ public Operand confusionMatrix(Operand labels, Operand predictions) { - return confusionMatrix(labels, predictions, null, null); + return ConfusionMatrix.confusionMatrix(scope, labels, predictions, null, null); } /** * Computes the confusion matrix from predictions and labels. * *

      The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid - * labels for a given classification task. Both prediction and labels must be 1-D arrays of the - * same shape in order for this function to work. + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the + * number of valid labels for a given classification task. Both prediction and labels must be 1-D + * arrays of the same shape in order for this function to work. * - *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in - * either predictions or labels. Class labels are expected to start at 0. For example, if + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum + * value in either predictions or labels. Class labels are expected to start at 0. For example, if * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to the - * total value of the confusion matrix cell. + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to + * the total value of the confusion matrix cell. * *

      For example: * *

      {@code
      -   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      -   *         [[0 0 0 0 0]
      -   *          [0 0 1 0 0]
      -   *          [0 0 1 0 0]
      -   *          [0 0 0 0 0]
      -   *          [0 0 0 0 1]]
      +   * fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *     [[0 0 0 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 0 0 0]
      +   *      [0 0 0 0 1]]
          * }
      * *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 @@ -191,33 +137,33 @@ public Operand confusionMatrix(Operand labels, Operand */ public Operand confusionMatrix( Operand labels, Operand predictions, Operand weights) { - return confusionMatrix(labels, predictions, weights, null); + return ConfusionMatrix.confusionMatrix(scope, labels, predictions, weights, null); } /** * Computes the confusion matrix from predictions and labels. * *

      The matrix columns represent the prediction labels and the rows represent the real labels. - * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the number of valid - * labels for a given classification task. Both prediction and labels must be 1-D arrays of the - * same shape in order for this function to work. + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the + * number of valid labels for a given classification task. Both prediction and labels must be 1-D + * arrays of the same shape in order for this function to work. * - *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum value in - * either predictions or labels. Class labels are expected to start at 0. For example, if + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum + * value in either predictions or labels. Class labels are expected to start at 0. For example, if * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. * - *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to the - * total value of the confusion matrix cell. + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to + * the total value of the confusion matrix cell. * *

      For example: * *

      {@code
      -   *     fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      -   *         [[0 0 0 0 0]
      -   *          [0 0 1 0 0]
      -   *          [0 0 1 0 0]
      -   *          [0 0 0 0 0]
      -   *          [0 0 0 0 1]]
      +   * fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *     [[0 0 0 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 0 0 0]
      +   *      [0 0 0 0 1]]
          * }
      * *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 @@ -237,139 +183,7 @@ public Operand confusionMatrix( */ public Operand confusionMatrix( Operand labels, Operand predictions, Operand weights, Operand numClasses) { - Scope lScope = scope.withSubScope("confusionMatrix"); - LossTuple tuple = removeSqueezableDimensions(labels, predictions, 0); - Operand lLabels = Cast.create(lScope, tuple.getLabels(), TInt64.class); - Operand lPredictions = Cast.create(lScope, tuple.getTarget(), TInt64.class); - - Operand zero = Constant.scalarOf(lScope, 0L); - Operand one = Constant.scalarOf(lScope, 1L); - - AssertThat labelsNonNegative = - AssertThat.create( - lScope, - ReduceAll.create(lScope, GreaterEqual.create(lScope, lLabels, zero), allAxes(lLabels)), - Collections.singletonList( - Constant.scalarOf(lScope, "labels contains negative values"))); - lLabels = - Identity.create( - lScope.withControlDependencies(Collections.singletonList(labelsNonNegative)), lLabels); - - AssertThat predictionsNonNegative = - AssertThat.create( - lScope, - ReduceAll.create( - lScope, GreaterEqual.create(lScope, lPredictions, zero), allAxes(lPredictions)), - Collections.singletonList( - Constant.scalarOf(lScope, "predictions contains negative values"))); - lPredictions = - Identity.create( - lScope.withControlDependencies(Collections.singletonList(predictionsNonNegative)), - lPredictions); - - Operand lNumClasses; - if (numClasses == null) { - lNumClasses = - Add.create( - lScope, - Maximum.create( - lScope, - ReduceMax.create(lScope, lPredictions, zero), - ReduceMax.create(lScope, lLabels, zero)), - one); - } else { - lNumClasses = Cast.create(lScope, numClasses, TInt64.class); - Operand less = Less.create(lScope, lLabels, lNumClasses); - AssertThat labelsLess = - AssertThat.create( - lScope, - ReduceAll.create(scope, less, allAxes(less), ReduceAll.keepDims(false)), - Collections.singletonList(Constant.scalarOf(lScope, "labels out of bounds"))); - lLabels = - Identity.create( - lScope.withControlDependencies(Collections.singletonList(labelsLess)), lLabels); - - less = Less.create(lScope, lPredictions, lNumClasses); - AssertThat predictionsLess = - AssertThat.create( - lScope, - ReduceAll.create(scope, less, allAxes(less), ReduceAll.keepDims(false)), - Collections.singletonList(Constant.scalarOf(lScope, "predictions out of bounds"))); - lPredictions = - Identity.create( - lScope.withControlDependencies(Collections.singletonList(predictionsLess)), - lPredictions); - } - - if (weights != null) { - if (!predictions.shape().isCompatibleWith(weights.shape())) { - throw new IllegalArgumentException( - String.format( - "predictions.shape() [%s], is not compatible with weights.shape() [ %s].", - predictions.shape(), weights.shape())); - } - } - - Operand shape = Stack.create(lScope, Arrays.asList(lNumClasses, lNumClasses)); - Operand indices = - Stack.create(lScope, Arrays.asList(lLabels, lPredictions), Stack.axis(1L)); - Operand values = weights == null ? OnesLike.create(lScope, predictions) : weights; - /// Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), - // type); - - return ScatterNd.create(lScope, indices, values, shape); - } - - /** - * Squeeze last dim if ranks differ from expected by exactly 1. - * - * @param labels Label values, a {@code Operand} whose dimensions match {@code predictions - * }. - * @param predictions Predicted values, a {@code Tensor} of arbitrary dimensions. - * @param expectedRankDiff Expected result of {@code rank(predictions) - rank(labels)}. - * @param the data type for the labels, predictions and result - * @return {@code labels} and {@code predictions}, possibly with last dim squeezed. - */ - public LossTuple removeSqueezableDimensions( - Operand labels, Operand predictions, int expectedRankDiff) { - Scope lScope = scope.withSubScope("removeSqueezableDimensions"); - Shape predictionsShape = predictions.shape(); - int predictionsRank = predictionsShape.numDimensions(); - Shape labelsShape = labels.shape(); - int labelsRank = labelsShape.numDimensions(); - - if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { - // Use rank. - int rankDiff = predictionsRank - labelsRank; - if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { - predictions = Squeeze.create(lScope, predictions); - } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.size(-1), 1)) { - labels = Squeeze.create(lScope, labels); - } - return new LossTuple<>(labels, predictions); - } - // Use dynamic rank. - - // TODO: hold for lazy select feature, - // Operand rankDiff = tf.math.sub(tf.rank(predictions), tf.rank(labels)); - if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.size(-1), 1)) { - /* - * TODO, if we ever get a select that does lazy evaluation, but for now do the tf.squeeze - * predictions = tf.select( tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), - * tf.squeeze(predictions, Squeeze.axis(Arrays.asList(-1L))), predictions ); * - */ - predictions = - Squeeze.create(lScope, predictions, Squeeze.axis(Collections.singletonList(-1L))); - } - if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.size(-1), 1)) { - /* - * TODO, if we ever get a select that does lazy evaluation labels = tf.select( - * tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), tf.squeeze(labels, - * Squeeze.axis(Arrays.asList(-1L))), predictions ); * - */ - labels = Squeeze.create(lScope, labels, Squeeze.axis(Collections.singletonList(-1L))); - } - return new LossTuple<>(labels, predictions); + return ConfusionMatrix.confusionMatrix(scope, labels, predictions, weights, numClasses); } /** @@ -378,673 +192,250 @@ public LossTuple removeSqueezableDimensions( * @param op the Operand * @return an Operand that has all axes contained in the Operand's shape.. */ - public Operand allAxes(Operand op) { - int rank = op.shape().numDimensions(); - if (rank != Shape.UNKNOWN_SIZE) { - int[] axes = new int[rank]; - for (int i = 0; i < rank; i++) { - axes[i] = i; - } - return Constant.vectorOf(scope, axes); - } else { - return Range.create( - scope, Constant.scalarOf(scope, 0), Rank.create(scope, op), Constant.scalarOf(scope, 1)); - } + public Operand allAxes(Operand op) { + return Axes.allAxes(scope, op); } /** - * Transpose and reshape the input for contraction op. - * - *

      This method is helpful in reducing {@code math.tensordot} to {@code math_ops.matmul} using - * {@code array_ops.transpose} and {@code array_ops.reshape}. The method takes a tensor and performs the - * correct transpose and reshape operation for a given set of indices. It returns the reshaped - * tensor as well as a list of indices necessary to reshape the tensor again after matrix - * multiplication. - * - * @param the type of Operand - * @param a the Tensor - * @param axis unique indices specifying valid axes of {@code a}. - * @param flipped whether to flip the dimensions or not - * @return A tuple (reshapedA, freeDims, freeDimsStatic) where reshapedA is a reshaped to allow - * contraction via matmul, freeDims is a TInt32 Operand, depending on whether the shape of a - * is fully specified, and freeDimsStatic is either a list of integers and null values, or - * None, representing the inferred shape of the free dimensions - */ - private Object[] tensordotReshape( - Operand a, Operand axis, boolean flipped) { - Shape aShape = a.shape(); - - if (!aShape.hasUnknownDimension()) { // calculate using values - long[] aShapeDims = aShape.asArray(); - if (aShapeDims == null) aShapeDims = new long[0]; - long[] aDimsIndex = new long[aShapeDims.length]; - for (int i = 0; i < aDimsIndex.length; i++) aDimsIndex[i] = i; - - // get int array from axis Operand - int[] iAxes = getIntArray(axis); - // Convert negative axes to positive - for (int i = 0; i < iAxes.length; i++) - iAxes[i] = iAxes[i] >= 0 ? iAxes[i] : Math.floorMod(iAxes[i], iAxes.length); - - // convert integer axis to long axis - long[] lAxes = Arrays.stream(iAxes).mapToLong(i -> i).toArray(); - - // create list of the axes, dims, and free axes - List axesList = Arrays.stream(lAxes).boxed().collect(Collectors.toList()); - List freeList = Arrays.stream(aDimsIndex).boxed().collect(Collectors.toList()); - freeList.removeAll(axesList); - - // create array of free dims - long[] free = freeList.stream().mapToLong(i -> i).toArray(); - long[] freeDims = new long[free.length]; - for (int i = 0; i < free.length; i++) freeDims[i] = aShapeDims[(int) free[i]]; - - // Calculate the free dim by doing a reduce prod - long prodFree = 1; - for (long i : freeDims) { - prodFree *= i; - } - - // calculate the used dims by doing a reduce prod - long prodAxis = 1; - for (long i : lAxes) { - prodAxis *= aShapeDims[(int) i]; - } - - // setup the permutations array for the transpose - long[] perm = new long[freeDims.length + lAxes.length]; - Shape newShape; - if (flipped) { - System.arraycopy(lAxes, 0, perm, 0, lAxes.length); - System.arraycopy(free, 0, perm, lAxes.length, free.length); - newShape = Shape.of(prodAxis, prodFree); - } else { - System.arraycopy(free, 0, perm, 0, free.length); - System.arraycopy(lAxes, 0, perm, freeDims.length, lAxes.length); - newShape = Shape.of(prodFree, prodAxis); - } - - Operand aTrans; - long[] arrange = new long[lAxes.length]; - for (int i = 0; i < arrange.length; i++) arrange[i] = i; - - // if the permutations is not equals to the natural order of the dims, then do a transpose - if (!Arrays.equals(perm, arrange)) { - aTrans = Transpose.create(scope, a, Constant.vectorOf(scope, perm)); - } else { - aTrans = a; - } - - // reshape the final result to the new Shape, if necessary - Operand aReshaped = - aTrans.asOutput().shape().equals(newShape) - ? aTrans - : Reshape.create(scope, aTrans, Constant.vectorOf(scope, newShape.asArray())); - // return a tuple for the reshaped Operand, and Operand for the free dimensions, and a long - // array for the free dimensions - return new Object[] {aReshaped, Constant.vectorOf(scope, freeDims), freeDims}; - - } else { // calculate dynamically - - long[] freeDimsStatic = null; - Operand one = Constant.scalarOf(scope, 1); - Operand minusOne = Constant.scalarOf(scope, -1); - Operand zero = Constant.scalarOf(scope, 0); - org.tensorflow.op.core.Shape tShape = org.tensorflow.op.core.Shape.create(scope, a); - Operand axesT; - Operand freeT; - if (aShape.numDimensions() - != Shape.UNKNOWN_SIZE) { // we know the rank, but there are unknown dimensions - long[] aShapeDims = aShape.asArray(); - if (aShapeDims == null) aShapeDims = new long[0]; - - // get int array from axis Operand - int[] iAxes = getIntArray(axis); - // Convert negative axes to positive - for (int i = 0; i < iAxes.length; i++) - iAxes[i] = iAxes[i] >= 0 ? iAxes[i] : Math.floorMod(iAxes[i], iAxes.length); - - // convert integer axis to long axis - long[] lAxes = Arrays.stream(iAxes).mapToLong(i -> i).toArray(); - - // create list of the axes, dims, and free axes - List axesList = Arrays.stream(lAxes).boxed().collect(Collectors.toList()); - List dimsList = Arrays.stream(aShapeDims).boxed().collect(Collectors.toList()); - List freeList = new ArrayList<>(axesList); - freeList.removeAll(dimsList); - - // create array of free dims - long[] freeDims = freeList.stream().mapToLong(i -> i).toArray(); - freeDimsStatic = freeDims; - - axesT = Constant.vectorOf(scope, iAxes); - freeT = Cast.create(scope, Constant.vectorOf(scope, freeDims), TInt32.class); - - } else { // we don't know the rank yet - Rank rank = Rank.create(scope, a); - - // convert axis to positive - axesT = - Select.create( - scope, - GreaterEqual.create(scope, axis, Constant.scalarOf(scope, 0)), - axis, - Add.create(scope, axis, rank)); - - SetDiff1d diff = - SetDiff1d.create( - scope, Range.create(scope, Constant.scalarOf(scope, 0), rank, one), axesT); - freeT = diff.out(); - } - Operand freeDims = Gather.create(scope, tShape, freeT, zero); - Operand axesDims = Gather.create(scope, tShape, axesT, zero); - Operand prodFreeDims = ReduceProd.create(scope, freeDims, minusOne); - Operand prodAxesDims = ReduceProd.create(scope, axesDims, minusOne); - Operand perm; - Operand newShape; - if (flipped) { - perm = Concat.create(scope, Arrays.asList(axesT, freeT), zero); - newShape = Stack.create(scope, Arrays.asList(prodAxesDims, prodFreeDims)); - } else { - perm = Concat.create(scope, Arrays.asList(freeT, axesT), zero); - newShape = Stack.create(scope, Arrays.asList(prodFreeDims, prodAxesDims)); - } - Operand aReshaped = Reshape.create(scope, Transpose.create(scope, a, perm), newShape); - return new Object[] {aReshaped, freeDims, freeDimsStatic}; - } - } - - /** - * Gets an int array from an Operand<TInt32> operand. + * Tensor contraction of a and b along specified axes and outer product. * - * @param axes the Operand to fetch the values - * @return the int array from an Operand<TInt32> - */ - private int[] getIntArray(Operand axes) { - List result = new ArrayList<>(); - if (scope.env().isEager()) { - axes.asTensor().scalars().forEach(s -> result.add(s.getInt())); - } else { - try (Session session = new Session((Graph) scope.env()); - TInt32 tensor = (TInt32) session.runner().fetch(axes).run().get(0)) { - tensor.scalars().forEach(s -> result.add(s.getInt())); - } - } - return result.stream().mapToInt(i -> i).toArray(); - } - - /** - * Generates two sets of contraction axes for the two tensor arguments. + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. * - * @param a the Operand to analyze - * @param axis the axes - * @param the data type for the Operand - * @return the contraction axes - */ - @SuppressWarnings("unchecked") - private Operand[] tensordotAxes(Operand a, int axis) { - Shape aShape = a.asOutput().shape(); - if (axis < 0) { - throw new IllegalArgumentException("'axis' must be at least 0."); - } - int rank = aShape.numDimensions(); - Operand[] result = new Operand[2]; - if (rank != Shape.UNKNOWN_SIZE) { - if (axis > rank) { - throw new IllegalArgumentException( - String.format( - "'axis' must not be larger than the number of dimensions of tensor %s.", rank)); - } - int min = rank - axis; - int postRange = rank - min; - int[] postAxis = new int[postRange]; - for (int i = 0; i < postRange; i++) postAxis[i] = i + min; - - int[] preAxis = new int[axis]; - for (int i = 0; i < axis; i++) preAxis[i] = i; - - result[0] = Constant.vectorOf(scope, postAxis); - result[1] = Constant.vectorOf(scope, preAxis); - } else { - Rank rankT = Rank.create(scope, a); - Constant axisT = Constant.scalarOf(scope, axis); - Constant one = Constant.scalarOf(scope, 1); - Constant zero = Constant.scalarOf(scope, 0); - AssertThat assertion = - AssertThat.create( - scope, - Less.create(scope, axisT, rankT), - Arrays.asList( - Constant.scalarOf( - scope, "'axes' must not be larger than the number of dimensions of tensor "), - rankT)); - Scope scope1 = scope.withControlDependencies(Collections.singletonList(assertion)); - result[0] = Range.create(scope1, Sub.create(scope, rankT, axisT), rankT, one); - result[1] = Range.create(scope1, zero, axisT, one); - } - return result; - } - - /** - * Generates two sets of contraction axes for the two tensor arguments. + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. * - * @param a the Operand to analyze - * @param axes the axes - * @param the data type for the Operand - * @return the contraction axes - */ - @SuppressWarnings({"unchecked", "unused"}) - private Operand[] tensordotAxes(Operand a, int[] axes) { - if (axes.length != 2) - throw new IllegalArgumentException( - "'axes' must have length 1 or 2, provided with " + axes.length); - int[] aAxis = new int[] {axes[0]}; - int[] bAxis = new int[] {axes[1]}; - Operand[] result = new Operand[2]; - result[0] = Constant.vectorOf(scope, aAxis); - result[1] = Constant.vectorOf(scope, bAxis); - - return result; - } - - /** - * Generates two sets of contraction axes for the two tensor arguments. + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. * - * @param a the Operand to analyze - * @param axes the axes - * @param the data type for the Operand - * @return the contraction axes - */ - @SuppressWarnings({"unchecked", "unused"}) - private Operand[] tensordotAxes(Operand a, int[][] axes) { - if (axes.length != 2) - throw new IllegalArgumentException( - "'axes' must have length 1 or 2, provided with " + axes.length); - int[] aAxis = axes[0]; - int[] bAxis = axes[1]; - if (aAxis.length != bAxis.length) - throw new IllegalArgumentException( - String.format( - "Different number of contraction axes 'a' and 'b', %d != %d", - aAxis.length, bAxis.length)); - Operand[] result = new Operand[2]; - result[0] = Constant.vectorOf(scope, aAxis); - result[1] = Constant.vectorOf(scope, bAxis); - return result; - } - - /** - * Generates two sets of contraction axes for the two tensor arguments. + *

      Example 2: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. * - * @param a the Operand to analyze - * @param axes the axes - * @param the data type for the Operand - * @return the contraction axes - */ - @SuppressWarnings({"unchecked", "unused"}) - private Operand[] tensordotAxes(Operand a, Operand axes) { - - Constant one = Constant.scalarOf(scope, 1); - Constant zero = Constant.scalarOf(scope, 0); - Operand[] result = new Operand[2]; - result[0] = - Slice.create( - scope, - axes, - Cast.create(scope, zero, TInt32.class), - Cast.create(scope, one, TInt32.class)); - result[1] = - Slice.create( - scope, - axes, - Cast.create(scope, one, TInt32.class), - Cast.create(scope, one, TInt32.class)); - return result; - } - - /** - * Tensor contraction of a and b along specified axes and outer product. - *

      - * Tensordot (also known as tensor contraction) sums the product of elements - * from {@code a} and {@code b} over the indices specified by - * {@code a_axes} and {@code b_axes}. The lists - * {@code a_axes} and {@code b_axes} specify those pairs of axes - * along which to contract the tensors. The axis {@code a_axes[i]} of - * {@code a} must have the same dimension as axis - * {@code b_axes[i]} of {@code b} for all {@code i} in - * {@code range(0, len(a_axes))}. The lists - * {@code a_axes} and {@code b_axes} must have identical length - * and consist of unique integers that specify valid axes for each of the - * tensors. Additionally outer product is supported by passing - * {@code axes=0}. - *

      - * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. - *

      - * Example 1: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes = 1} is equivalent to matrix multiplication. - *

      - * Example 2: When {@code a} and {@code b} are matrices (order 2), - * the case - * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. - *

      - * Example 3: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes=0} gives the outer product, a tensor of order - * 4. - *

      - * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor - * cjklm whose entry corresponding to the indices - * (j,k,l,m) is given by: - * cjklm = Σi aijk - * blmi . - *

      - * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. * * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. * @param b {@code Operand} with the same type as {@code a}. - * @param axis sum over the last N axes of a and the - * first N axes of b in order. If {@code axis=0}, computes the outer - * product between {@code a} and {@code b}. - * @param the datatype of the Operands, must be either TFloat32 or - * TFloat64 + * @param axis sum over the last N axes of a and the first N axes of b in order. If {@code + * axis=0}, computes the outer product between {@code a} and {@code b}. + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 * @return A {@code Operand} with the same type as {@code a}. - * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type */ - @Endpoint(name = "tensordot") public Operand tensordot(Operand a, Operand b, int axis) { - - Operand[] abAxis = tensordotAxes(a, axis); - Operand aAxis = abAxis[0]; - Operand bAxis = abAxis[1]; - return tensordot(a, b, aAxis, bAxis); + return TensorDot.tensordot(scope, a, b, axis); } /** * Tensor contraction of a and b along specified axes and outer product. - *

      - * Tensordot (also known as tensor contraction) sums the product of elements - * from {@code a} and {@code b} over the indices specified by - * {@code a_axes} and {@code b_axes}. The lists - * {@code a_axes} and {@code b_axes} specify those pairs of axes - * along which to contract the tensors. The axis {@code a_axes[i]} of - * {@code a} must have the same dimension as axis - * {@code b_axes[i]} of {@code b} for all {@code i} in - * {@code range(0, len(a_axes))}. The lists - * {@code a_axes} and {@code b_axes} must have identical length - * and consist of unique integers that specify valid axes for each of the - * tensors. Additionally outer product is supported by passing - * {@code axes=0}. - *

      - * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. - *

      - * Example 1: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes = 1} is equivalent to matrix multiplication. - *

      - * Example 2: When {@code a} and {@code b} are matrices (order 2), - * the case - * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. - *

      - * Example 3: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes=0} gives the outer product, a tensor of order - * 4. - *

      - * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor - * cjklm whose entry corresponding to the indices - * (j,k,l,m) is given by: - *

      - * cjklm = Σi aijk - * blmi . - *

      - * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * + *

      cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * *

      * * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. * @param b {@code Operand} with the same type as {@code a}. - * @param axes If axes is a scalar, sum over the last N axes of a and the - * first N axes of b in order. If axes is a list, the first and second row - * contain the set of unique integers specifying axes along which the - * contraction is computed, for {@code a} and {@code b}, respectively. The number of - * axes for {@code a} and {@code b} must be equal. If {@code axis=0}, computes the outer - * product between {@code a} and {@code b}. - * @param the datatype of the Operands, must be either TFloat32 or - * TFloat64 + * @param axes If axes is a scalar, sum over the last N axes of a and the first N axes of b in + * order. If axes is a list, the first and second row contain the set of unique integers + * specifying axes along which the contraction is computed, for {@code a} and {@code b}, + * respectively. The number of axes for {@code a} and {@code b} must be equal. If {@code + * axis=0}, computes the outer product between {@code a} and {@code b}. + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 * @return A {@code Operand} with the same type as {@code a}. - * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type */ - @Endpoint(name = "tensordot") public Operand tensordot( Operand a, Operand b, Operand axes) { - - Operand[] abAxis = tensordotAxes(a, axes); - Operand aAxis = abAxis[0]; - Operand bAxis = abAxis[1]; - - return tensordot(a, b, aAxis, bAxis); + return TensorDot.tensordot(scope, a, b, axes); } /** * Tensor contraction of a and b along specified axes and outer product. - *

      - * Tensordot (also known as tensor contraction) sums the product of elements - * from {@code a} and {@code b} over the indices specified by - * {@code a_axes} and {@code b_axes}. The lists - * {@code a_axes} and {@code b_axes} specify those pairs of axes - * along which to contract the tensors. The axis {@code a_axes[i]} of - * {@code a} must have the same dimension as axis - * {@code b_axes[i]} of {@code b} for all {@code i} in - * {@code range(0, len(a_axes))}. The lists - * {@code a_axes} and {@code b_axes} must have identical length - * and consist of unique integers that specify valid axes for each of the - * tensors. Additionally outer product is supported by passing - * {@code axes=0}. - *

      - * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. - *

      - * Example 1: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes = 1} is equivalent to matrix multiplication. - *

      - * Example 2: When {@code a} and{@code b} are matrices (order 2), - * the case - * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. - *

      - * Example 3: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes=0} gives the outer product, a tensor of order - * 4. - *

      - * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor - * cjklm whose entry corresponding to the indices - * (j,k,l,m) is given by: - *

      - * cjklm = Σi aijk - * blmi . - *

      - * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and{@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * + *

      cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * *

      * * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. * @param b {@code Operand} with the same type as {@code a}. - * @param axes the first and second row - * contain the set of unique integers specifying axes along which the - * contraction is computed, for {@code a} and {@code b}, respectively. The number of - * axes for {@code a} and {@code b} must be equal. I - * @param the datatype of the Operands, must be either TFloat32 or - * TFloat64 + * @param axes the first and second row contain the set of unique integers specifying axes along + * which the contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. I + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 * @return A {@code Operand} with the same type as {@code a}. - * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type */ - @Endpoint(name = "tensordot") public Operand tensordot(Operand a, Operand b, int[] axes) { - - Operand[] abAxis = tensordotAxes(a, axes); - Operand aAxis = abAxis[0]; - Operand bAxis = abAxis[1]; - - return tensordot(a, b, aAxis, bAxis); + return TensorDot.tensordot(scope, a, b, axes); } /** * Tensor contraction of a and b along specified axes and outer product. - *

      - * Tensordot (also known as tensor contraction) sums the product of elements - * from {@code a} and {@code b} over the indices specified by - * {@code a_axes} and {@code b_axes}. The lists - * {@code a_axes} and {@code b_axes} specify those pairs of axes - * along which to contract the tensors. The axis {@code a_axes[i]} of - * {@code a} must have the same dimension as axis - * {@code b_axes[i]} of {@code b} for all {@code i} in - * {@code range(0, len(a_axes))}. The lists - * {@code a_axes} and {@code b_axes} must have identical length - * and consist of unique integers that specify valid axes for each of the - * tensors. Additionally outer product is supported by passing - * {@code axes=0}. - *

      - * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. - *

      - * Example 1: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes = 1} is equivalent to matrix multiplication. - *

      - * Example 2: When {@code a} and{@code b} are matrices (order 2), - * the case - * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. - *

      - * Example 3: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes=0} gives the outer product, a tensor of order - * 4. - *

      - * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor - * cjklm whose entry corresponding to the indices - * (j,k,l,m) is given by: - *

      - * cjklm = Σi aijk - * blmi . - *

      - * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and{@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * + *

      cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * *

      * * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. * @param b {@code Operand} with the same type as {@code a}. - * @param axes the first and second row - * contain the set of unique integers specifying axes along which the - * contraction is computed, for {@code a} and {@code b}, respectively. The number of - * axes for {@code a} and {@code b} must be equal. I - * @param the datatype of the Operands, must be either TFloat32 or - * TFloat64 + * @param axes the first and second row contain the set of unique integers specifying axes along + * which the contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. I + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 * @return A {@code Operand} with the same type as {@code a}. - * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type */ - @Endpoint(name = "tensordot") public Operand tensordot(Operand a, Operand b, int[][] axes) { - - Operand[] abAxis = tensordotAxes(a, axes); - Operand aAxis = abAxis[0]; - Operand bAxis = abAxis[1]; - - return tensordot(a, b, aAxis, bAxis); + return TensorDot.tensordot(scope, a, b, axes); } /** * Tensor contraction of a and b along specified axes and outer product. - *

      - * Tensordot (also known as tensor contraction) sums the product of elements - * from {@code a} and {@code b} over the indices specified by - * {@code a_axes} and {@code b_axes}. The lists - * {@code a_axes} and {@code b_axes} specify those pairs of axes - * along which to contract the tensors. The axis {@code a_axes[i]} of - * {@code a} must have the same dimension as axis - * {@code b_axes[i]} of {@code b} for all {@code i} in - * {@code range(0, len(a_axes))}. The lists - * {@code a_axes} and {@code b_axes} must have identical length - * and consist of unique integers that specify valid axes for each of the - * tensors. Additionally outer product is supported by passing - * {@code axes=0}. - *

      - * This operation corresponds to {@code numpy.tensordot(a, b, axes)}. - *

      - * Example 1: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes = 1} is equivalent to matrix multiplication. - *

      - * Example 2: When {@code a} and{@code b} are matrices (order 2), - * the case - * {@code axes = [[1], [0]]} is equivalent to matrix multiplication. - *

      - * Example 3: When {@code a} and {@code b} are matrices (order 2), - * the case {@code axes=0} gives the outer product, a tensor of order - * 4. - *

      - * Example 4: Suppose that aijk and blmn - * represent two tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor - * cjklm whose entry corresponding to the indices - * (j,k,l,m) is given by: - *

      - * cjklm = Σi aijk - * blmi . - *

      - * In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and{@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * + *

      cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * *

      * * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. * @param b {@code Operand} with the same type as {@code a}. * @param aAxis axes for the a Operand * @param bAxis axes for the b Operand - * @param the datatype of the Operands, must be either TFloat32 or - * TFloat64 + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 * @return A {@code Operand} with the same type as {@code a}. - * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are not the same data type + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type */ - @SuppressWarnings({"unchecked", "unused"}) - @Endpoint(name = "tensordot") public Operand tensordot( Operand a, Operand b, Operand aAxis, Operand bAxis) { - - if (a.type().equals(TBfloat16.class) || a.type().equals(TFloat16.class)) { - throw new IllegalArgumentException( - String.format( - "Operand 'a' must be either TFloat32 or TFloat64 DataType, 'a' is a %s DataType", - a.type().getSimpleName())); - } - if (!a.type().equals(b.type())) { - throw new IllegalArgumentException( - String.format( - "Operands a and b must be the same data type, a is %s DataType, b is %s DataType", - a.type().getSimpleName(), b.type().getSimpleName())); - } - - // first result is Operand, second result is Operand, third result is long[] and it is - // ignored here. - Object[] aResult = tensordotReshape(a, aAxis, false); - Operand reshapedA = (Operand) aResult[0]; - Operand aFreeDims = (Operand) aResult[1]; - long[] aFreeDimsStatic = (long[]) aResult[2]; - - // first result is Operand, second result is Operand, third result is long[] and it is - // ignored here. - Object[] bResult = tensordotReshape(b, bAxis, true); - Operand reshapedB = (Operand) bResult[0]; - Operand bFreeDims = (Operand) bResult[1]; - long[] bFreeDimsStatic = (long[]) bResult[2]; - - Operand abMatmul = frameworkOps.linalg.matmul(reshapedA, reshapedB); - long[] abDimsStatic = new long[aFreeDimsStatic.length + bFreeDimsStatic.length]; - System.arraycopy(aFreeDimsStatic, 0, abDimsStatic, 0, aFreeDimsStatic.length); - System.arraycopy( - bFreeDimsStatic, 0, abDimsStatic, aFreeDimsStatic.length, bFreeDimsStatic.length); - if (!abMatmul.shape().hasUnknownDimension() - && abMatmul.shape().equals(Shape.of(abDimsStatic))) { - return abMatmul; - } else { - return Reshape.create(scope, abMatmul, Constant.vectorOf(scope, abDimsStatic)); - } + return TensorDot.tensordot(scope, a, b, aAxis, bAxis); } /** * Computes log(sum(exp(elements across dimensions of a tensor))). Reduces {@code input_tensor} * along the dimensions given in {@code axes}. * - *

      Reduces {@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} - * is true, the rank of the tensor is reduced by 1 for each of the entries in {@code axes}, which + *

      Reduces {@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} is + * true, the rank of the tensor is reduced by 1 for each of the entries in {@code axes}, which * must be unique. If {@code keepdims} is true, the reduced dimensions are retained with length 1. * If {@code axes} has no entries, all dimensions are reduced, and a tensor with a single element * is returned. This function is more numerically stable than {@code log(sum(exp(input)))}. It @@ -1058,85 +449,8 @@ public Operand tensordot( * @param the data type for the input and the result * @return The reduced tensor. */ - @Endpoint(name = "reduceLogSumExp") public Operand reduceLogSumExp( Operand input, int[] axes, boolean keepDims) { - Operand reduceDims = reductionDims(input, axes); - Operand rawMax = reduceMaxWithDims(input, axes, keepDims, reduceDims); - Operand myMax = - StopGradient.create( - scope, - Select.create( - scope, IsFinite.create(scope, rawMax), rawMax, ZerosLike.create(scope, rawMax))); - - Operand result = - Log.create( - scope, - reduceSumWithDims( - Exp.create(scope, Sub.create(scope, input, myMax)), axes, keepDims, reduceDims)); - - if (!keepDims) { - myMax = Reshape.create(scope, myMax, org.tensorflow.op.core.Shape.create(scope, result)); - } - result = Add.create(scope, result, myMax); - return mayReduceToScalar(keepDims, axes, result); - } - - private Operand reduceSumWithDims( - Operand input, int[] axes, boolean keepDims, Operand dims) { - return mayReduceToScalar( - keepDims, axes, ReduceSum.create(scope, input, dims, ReduceSum.keepDims(keepDims))); - } - - private Operand reduceMaxWithDims( - Operand input, int[] axes, boolean keepDims, Operand dims) { - return mayReduceToScalar( - keepDims, axes, ReduceMax.create(scope, input, dims, ReduceMax.keepDims(keepDims))); - } - - /** - * Sets a reduction's output shape to be a scalar if possible. - * - * @return the operand, possibly reduced to a scalar. - */ - private Operand mayReduceToScalar( - boolean keepDims, int[] axes, Operand output) { - - if ((output.shape().numDimensions() == Shape.UNKNOWN_SIZE - || output.shape().hasUnknownDimension()) - && !keepDims - && axes == null) { - return Reshape.create(scope, output, Constant.tensorOf(scope, Shape.scalar())); - } else { - return output; - } - } - - /** - * Reduce dimensions based on axis - * - * @param input the input - * @param axes he dimensions to reduce, may be null - * @return the dimensions to be reduced. - */ - private Operand reductionDims(Operand input, int[] axes) { - if (axes != null) { - return Constant.vectorOf(scope, axes); - } - long rank = input.shape().numDimensions(); - if (rank != Shape.UNKNOWN_SIZE) { - int[] dims = new int[(int) rank]; - for (int i = 0; i < rank; i++) { - dims[i] = i; - } - return Constant.vectorOf(scope, dims); - - } else { - return Range.create( - scope, - Constant.scalarOf(scope, 0), - Rank.create(scope, input), - Constant.scalarOf(scope, 1)); - } + return ReduceLogSumExp.reduceLogSumExp(scope, input, axes, keepDims); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java index 4f5120a3dbf..96f023ffedf 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -18,12 +18,11 @@ import org.tensorflow.framework.op.nn.SigmoidCrossEntropyWithLogits; import org.tensorflow.framework.op.nn.SoftmaxCrossEntropyWithLogits; import org.tensorflow.framework.op.nn.SparseSoftmaxCrossEntropyWithLogits; -import org.tensorflow.op.Op; import org.tensorflow.op.Scope; import org.tensorflow.types.family.TNumber; /** - * An API for building {@code nn} operations as {@link Op Op}s + * Creates Framework nerual network Operations * *

      These are higher level ops that may invoke core ops. Higher level Ops may perform the * operation solely in the TensorFlow framework or do preprocessing of the Operands before invoking diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java index f76947018b5..832f0a2892c 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/SetOps.java @@ -15,15 +15,11 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; +import org.tensorflow.framework.op.sets.Sets; import org.tensorflow.op.Scope; -import org.tensorflow.op.SparseOps; -import org.tensorflow.op.core.Constant; -import org.tensorflow.op.dtypes.Cast; -import org.tensorflow.op.sparse.DenseToDenseSetOperation; -import org.tensorflow.op.sparse.SparseToDense; import org.tensorflow.types.family.TNumber; -/** Implementation of set operations */ +/** Creates Framework set Operations */ public class SetOps { private final Scope scope; @@ -54,7 +50,8 @@ public class SetOps { * operation. */ public Operand difference(Operand a, Operand b) { - return difference(a, b, true); + + return Sets.difference(scope, a, b, true); } /** @@ -71,7 +68,7 @@ public Operand difference(Operand a, Operand b) { * operation. */ public Operand difference(Operand a, Operand b, boolean aMinusB) { - return setOperation(a, b, aMinusB ? Operation.A_MINUS_B : Operation.B_MINUS_A); + return Sets.difference(scope, a, b, aMinusB); } /** @@ -85,7 +82,7 @@ public Operand difference(Operand a, Operand b, boo * operation. */ public Operand union(Operand a, Operand b) { - return setOperation(a, b, Operation.UNION); + return Sets.union(scope, a, b); } /** @@ -99,63 +96,6 @@ public Operand union(Operand a, Operand b) { * operation. */ public Operand intersection(Operand a, Operand b) { - return setOperation(a, b, Operation.INTERSECTION); - } - - /** - * Compute set operation of elements in last dimension of a and b. - * - * @param a The first set operation operand - * @param b The other et operation operand - * @param setOperation The set operation to perform, {@link Operation}. - * @param the data type for the sets - * @return An Operand with the same rank as a and b, and all but the - * last dimension the same. Elements along the last dimension contain the results of the set - * operation. - */ - public Operand setOperation( - Operand a, Operand b, Operation setOperation) { - - DenseToDenseSetOperation setOperationResult = - DenseToDenseSetOperation.create( - scope, - a, - b, - setOperation.getSetOperation(), - DenseToDenseSetOperation.validateIndices(true)); - - return SparseToDense.create( - scope, - setOperationResult.resultIndices(), - setOperationResult.resultShape(), - setOperationResult.resultValues(), - Cast.create(scope, Constant.scalarOf(scope, 0), a.type())); - } - - /** - * Enumeration containing the string operation values to be passed to the TensorFlow Sparse Ops - * function {@link SparseOps#denseToDenseSetOperation} - */ - public enum Operation { - A_MINUS_B("a-b"), - B_MINUS_A("b-a"), - INTERSECTION("intersection"), - UNION("union"); - - private final String setOperation; - - Operation(String setOperation) { - this.setOperation = setOperation; - } - - /** - * Gets the set operation String value used to pass as the stringOperation value to {@link - * SparseOps#denseToDenseSetOperation} - * - * @return the set operation String value - */ - public String getSetOperation() { - return setOperation; - } + return Sets.intersection(scope, a, b); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/linalg/MatMul.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/linalg/MatMul.java new file mode 100644 index 00000000000..a156cdfd2b4 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/linalg/MatMul.java @@ -0,0 +1,289 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op.linalg; + +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.SparseTensor; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Scope; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Conj; +import org.tensorflow.op.sparse.SparseMatMul; +import org.tensorflow.op.train.BatchMatMul; +import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; + +/** Multiplication matrix operations */ +public class MatMul { + + /** + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b }. + * + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the + * inner 2 dimensions specify valid matrix multiplication dimensions, and any further outer + * dimensions specify matching batch size. + * + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, {@code + * TFloat32}, {@code TFloat64}, {@code TInt32}. + * + *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

      A simple 2-D tensor matrix multiplication: + * + *

      {@code
      +   * Operand a = tf.constant(new double[][] {
      +   *         {-8.944851},
      +   *         {4.1711287},
      +   *         {-0.22380222}
      +   *     });
      +   * Operand b = tf.constant( new double[][] {
      +   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
      +   *     });
      +   * Operand result = fops.linalg.matmul(a, b);
      +   * // result = {
      +   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
      +   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
      +   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
      +   * //  }
      +   *
      +   * }
      + * + *

      Note: This is matrix product, not element-wise product. + * + * @param scope The TensorFlow scope + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 }, {@code + * TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. + * @param the data type of the Operands + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most matrix is + * the product of the corresponding matrices in {@code a} and {@code b}. This is the matrix + * product not an element-wise product. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} , or + * {@code transposeB} and {@code adjointB} are both set to `true`. + */ + public static Operand matmul(Scope scope, Operand a, Operand b) { + return matmul(scope, a, b, false, false, false, false, false, false); + } + + /** + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b }. + * + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the + * inner 2 dimensions specify valid matrix multiplication dimensions, and any further outer + * dimensions specify matching batch size. + * + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, {@code + * TFloat32}, {@code TFloat64}, {@code TInt32}. + * + *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

      Note: This is matrix product, not element-wise product. + * + *

      A simple 2-D tensor matrix multiplication: + * + *

      {@code
      +   * Operand a = tf.constant(new double[][] {
      +   *         {-8.944851},
      +   *         {4.1711287},
      +   *         {-0.22380222}
      +   *     });
      +   * Operand b = tf.constant( new double[][] {
      +   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
      +   *     });
      +   * Operand result = fops.linalg.matmul(a, b);
      +   * // result = {
      +   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
      +   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
      +   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
      +   * //  }
      +   *
      +   * }
      + * + * @param scope The TensorFlow scope + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 }, {@code + * TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. + * @param transposeA If true, {@code a} is transposed before multiplication. + * @param transposeB If true, {@code b} is transposed before multiplication + * @param the data type of the Operands + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most matrix is + * the product of the corresponding matrices in {@code a} and {@code b}. This is the matrix + * product not an element-wise product. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} , or + * {@code transposeB} and {@code adjointB} are both set to `true`. + */ + public static Operand matmul( + Scope scope, Operand a, Operand b, boolean transposeA, boolean transposeB) { + return matmul(scope, a, b, transposeA, transposeB, false, false, false, false); + } + + /** + * Multiplies matrix {@code a} by matrix {@code b}, producing {@code a} * {@code b }. + * + *

      The inputs must, following any transpositions, be tensors of {@code rank >= 2} where the + * inner 2 dimensions specify valid matrix multiplication dimensions, and any further outer + * dimensions specify matching batch size. + * + *

      Both matrices must be of the same type. The supported types are: {@code TFloat16}, {@code + * TFloat32}, {@code TFloat64}, {@code TInt32}. + * + *

      Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by + * setting one of the corresponding flag to true. These are false by default. + * + *

      Note: This is matrix product, not element-wise product. + * + *

      A simple 2-D tensor matrix multiplication: + * + *

      {@code
      +   * Operand a = tf.constant(new double[][] {
      +   *         {-8.944851},
      +   *         {4.1711287},
      +   *         {-0.22380222}
      +   *     });
      +   * Operand b = tf.constant( new double[][] {
      +   *         {-14.276086, -12.433481, -2.2447076, -1.5775859, 1.8588694}
      +   *     });
      +   * Operand result = fops.linalg.matmul(a, b);
      +   * // result = {
      +   * //     {127.69746,  111.21564,  20.078575,  14.111271,  -16.62731},
      +   * //     {-59.547394, -51.861652, -9.362965,  -6.580314,    7.753584},
      +   * //     {  3.1950197,  2.7826407, 0.50237054, 0.35306725, -0.4160191}
      +   * //  }
      +   *
      +   * }
      + * + * @param scope The TensorFlow scope + * @param a an Operand of of type {@code TFloat16}, {@code TFloat32}, {@code TFloat64 }, {@code + * TInt32}. with a {@code rank > 1} + * @param b an Operand with same type and rank as {@code a}. + * @param transposeA If true, {@code a} is transposed before multiplication. + * @param transposeB If True, {@code b} is transposed before multiplication + * @param adjointA If true, {@code a} is conjugated and transposed before multiplication. + * @param adjointB If true, {@code b} is conjugated and transposed before multiplication. + * @param aIsSparse If true, {@code a} is treated as a sparse matrix. Notice, this does not + * support {@link SparseTensor}, it just makes optimizations that assume most values in + * {@code a} are zero. + * @param bIsSparse If true, {@code b} is treated as a sparse matrix. Notice, this does not + * support {@link SparseTensor}, it just makes optimizations that assume most values in + * {@code b} are zero. + * @param the data type of the Operands + * @return A Operand of the same type as {@code a} and {@code b} where each inner-most matrix is + * the product of the corresponding matrices in {@code a} and {@code b}. This is the matrix + * product not an element-wise product. + * @throws java.lang.IllegalArgumentException If {@code transposeA} and {@code adjointA} , or + * {@code transposeB} and {@code adjointB} are both set to `true`. + */ + @SuppressWarnings("unchecked") + public static Operand matmul( + Scope scope, + Operand a, + Operand b, + boolean transposeA, + boolean transposeB, + boolean adjointA, + boolean adjointB, + boolean aIsSparse, + boolean bIsSparse) { + Scope lscope = scope.withSubScope("MatMul"); + if (transposeA && adjointA) + throw new IllegalArgumentException("Only one of transposeA and adjointA can be true."); + if (transposeB && adjointB) + throw new IllegalArgumentException("Only one of transposeB and adjointB can be true."); + if (!(TFloating.class.isAssignableFrom(a.type()) || a.type().equals(TInt32.class))) + throw new IllegalArgumentException( + String.format( + "Operand 'a' must be of type 'TBfloat16','TFloat16', 'TFloat32', 'TFloat64' or 'TInt32'. found type : %s", + a.type().getSimpleName())); + if (!(TFloating.class.isAssignableFrom(a.type()) || b.type().equals(TInt32.class))) + throw new IllegalArgumentException( + String.format( + "Operand 'b' must be of type 'TBfloat16', 'TFloat32', 'TFloat64' or 'TInt32'. found type : %s", + b.type().getSimpleName())); + + Shape aShape = a.shape(); + Shape bShape = b.shape(); + if (aShape.numDimensions() != bShape.numDimensions()) + throw new IllegalArgumentException( + String.format( + "Parameters 'a' and 'b' must the same rank: found a rank = %d, b rank = %d", + aShape.numDimensions(), bShape.numDimensions())); + boolean outputMayHaveNonEmptyBatchShape = + aShape.numDimensions() == Shape.UNKNOWN_SIZE + || aShape.numDimensions() > 2 + || bShape.numDimensions() == Shape.UNKNOWN_SIZE; + + if ((!aIsSparse && !bIsSparse) && outputMayHaveNonEmptyBatchShape) { + // BatchMatmul does not support transpose, so we conjugate the matrix and + // use adjoint instead. Conj() is a noop for real matrices. + if (transposeA) { + a = Conj.create(scope, a); + adjointA = true; + } + if (transposeB) { + b = Conj.create(scope, b); + adjointB = true; + } + return BatchMatMul.create( + lscope, a, b, BatchMatMul.adjX(adjointA), BatchMatMul.adjY(adjointB)); + } + + // Neither matmul nor sparse_matmul support adjoint, so we conjugate + // the matrix and use transpose instead. Conj() is a noop for real + // matrices. + if (adjointA) { + a = Conj.create(scope, a); + transposeA = true; + } + if (adjointB) { + b = Conj.create(scope, b); + transposeB = true; + } + + boolean useSparseMatmul = false; + if (aIsSparse || bIsSparse) { + useSparseMatmul = + (a.type().equals(TBfloat16.class) || a.type().equals(TFloat32.class)) + && (b.type().equals(TBfloat16.class) || b.type().equals(TFloat32.class)); + } + if ((a.type().equals(TBfloat16.class) || b.type().equals(TBfloat16.class)) + && !a.type().equals(b.type())) useSparseMatmul = true; + + if (useSparseMatmul) { + Operand result = + SparseMatMul.create( + lscope, + a, + b, + SparseMatMul.transposeA(transposeA), + SparseMatMul.transposeB(transposeB), + SparseMatMul.aIsSparse(aIsSparse), + SparseMatMul.bIsSparse(bIsSparse)); + if (a.type().equals(TFloat32.class)) return (Operand) result; + else return Cast.create(scope, result, a.type()); + } + + return org.tensorflow.op.linalg.MatMul.create( + lscope, + a, + b, + org.tensorflow.op.linalg.MatMul.transposeA(transposeA), + org.tensorflow.op.linalg.MatMul.transposeB(transposeB)); + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/Axes.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/Axes.java new file mode 100644 index 00000000000..cea7c5f80bf --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/Axes.java @@ -0,0 +1,49 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op.math; + +import org.tensorflow.Operand; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TType; + +/** Axes Operations */ +public class Axes { + + /** + * Creates an Operand that has all axes contained in the Operand's shape. + * + * @param scope The TensorFlow scope + * @param op the Operand + * @return an Operand that has all axes contained in the Operand's shape.. + */ + public static Operand allAxes(Scope scope, Operand op) { + int rank = op.shape().numDimensions(); + if (rank != Shape.UNKNOWN_SIZE) { + int[] axes = new int[rank]; + for (int i = 0; i < rank; i++) { + axes[i] = i; + } + return Constant.vectorOf(scope, axes); + } else { + return Range.create( + scope, Constant.scalarOf(scope, 0), Rank.create(scope, op), Constant.scalarOf(scope, 1)); + } + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/ConfusionMatrix.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/ConfusionMatrix.java new file mode 100644 index 00000000000..71103133066 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/ConfusionMatrix.java @@ -0,0 +1,318 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op.math; + +import java.util.Arrays; +import java.util.Collections; +import org.tensorflow.Operand; +import org.tensorflow.framework.losses.impl.LossTuple; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.AssertThat; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Identity; +import org.tensorflow.op.core.OnesLike; +import org.tensorflow.op.core.ReduceAll; +import org.tensorflow.op.core.ReduceMax; +import org.tensorflow.op.core.ScatterNd; +import org.tensorflow.op.core.Squeeze; +import org.tensorflow.op.core.Stack; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.Less; +import org.tensorflow.op.math.Maximum; +import org.tensorflow.types.TBool; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TNumber; + +/** Confusion Matrix Operations */ +public class ConfusionMatrix { + + /** + * Computes the confusion matrix from predictions and labels. + * + *

      The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the + * number of valid labels for a given classification task. Both prediction and labels must be 1-D + * arrays of the same shape in order for this function to work. + * + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum + * value in either predictions or labels. Class labels are expected to start at 0. For example, if + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. + * + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to + * the total value of the confusion matrix cell. + * + *

      For example: + * + *

      {@code
      +   * fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *     [[0 0 0 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 0 0 0]
      +   *      [0 0 0 0 1]]
      +   * }
      + * + *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param scope The TensorFlow scope + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public static Operand confusionMatrix( + Scope scope, Operand labels, Operand predictions) { + return confusionMatrix(scope, labels, predictions, null, null); + } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

      The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the + * number of valid labels for a given classification task. Both prediction and labels must be 1-D + * arrays of the same shape in order for this function to work. + * + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum + * value in either predictions or labels. Class labels are expected to start at 0. For example, if + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. + * + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to + * the total value of the confusion matrix cell. + * + *

      For example: + * + *

      {@code
      +   * fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *     [[0 0 0 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 0 0 0]
      +   *      [0 0 0 0 1]]
      +   * }
      + * + *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param scope The TensorFlow scope + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param weights An optional Operand whose shape matches {@code predictions}. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public static Operand confusionMatrix( + Scope scope, Operand labels, Operand predictions, Operand weights) { + return confusionMatrix(scope, labels, predictions, weights, null); + } + + /** + * Computes the confusion matrix from predictions and labels. + * + *

      The matrix columns represent the prediction labels and the rows represent the real labels. + * The confusion matrix is always a 2-D array of shape {@code [n,n]}, where {@code n} is the + * number of valid labels for a given classification task. Both prediction and labels must be 1-D + * arrays of the same shape in order for this function to work. + * + *

      If {@code numClasses} is null, then {@code numClasses} will be set to one plus the maximum + * value in either predictions or labels. Class labels are expected to start at 0. For example, if + * {@code numClasses} is 3, then the possible labels would be {@code [0, 1, 2]}. + * + *

      If {@code weights} is not null, then each prediction contributes its corresponding weight to + * the total value of the confusion matrix cell. + * + *

      For example: + * + *

      {@code
      +   * fops.math.confusion_matrix(tf.constant(new int[] {1, 2, 4}), tf.constant(new int[] {2, 2, 4})) ==>
      +   *     [[0 0 0 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 1 0 0]
      +   *      [0 0 0 0 0]
      +   *      [0 0 0 0 1]]
      +   * }
      + * + *

      Note that the possible labels are assumed to be {@code [0, 1, 2, 3, 4]}, resulting in a 5x5 + * confusion matrix. + * + * @param scope The TensorFlow scope + * @param labels 1-D Operand of real labels for the classification task. + * @param predictions 1-D Operand of predictions for a given classification. + * @param weights An optional Operand whose shape matches {@code predictions}. + * @param numClasses The possible number of labels the classification task can have. If this value + * is null, it will be calculated using both predictions and labels. + * @param Data type of the confusion matrix. + * @return An Operand of type {@code type} with shape {@code [n, n]} representing the confusion + * matrix, where {@code n} is the number of possible labels in the classification task. + * @throws IllegalArgumentException If both predictions and labels are not 1-D vectors and have + * mismatched shapes, or if {@code weights} is not null and its shape doesn't match {@code + * predictions}. + */ + public static Operand confusionMatrix( + Scope scope, + Operand labels, + Operand predictions, + Operand weights, + Operand numClasses) { + Scope lScope = scope.withSubScope("confusionMatrix"); + LossTuple tuple = removeSqueezableDimensions(scope, labels, predictions, 0); + Operand lLabels = Cast.create(lScope, tuple.getLabels(), TInt64.class); + Operand lPredictions = Cast.create(lScope, tuple.getTarget(), TInt64.class); + + Operand zero = Constant.scalarOf(lScope, 0L); + Operand one = Constant.scalarOf(lScope, 1L); + + AssertThat labelsNonNegative = + AssertThat.create( + lScope, + ReduceAll.create( + lScope, GreaterEqual.create(lScope, lLabels, zero), Axes.allAxes(scope, lLabels)), + Collections.singletonList( + Constant.scalarOf(lScope, "labels contains negative values"))); + lLabels = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(labelsNonNegative)), lLabels); + + AssertThat predictionsNonNegative = + AssertThat.create( + lScope, + ReduceAll.create( + lScope, + GreaterEqual.create(lScope, lPredictions, zero), + Axes.allAxes(scope, lPredictions)), + Collections.singletonList( + Constant.scalarOf(lScope, "predictions contains negative values"))); + lPredictions = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(predictionsNonNegative)), + lPredictions); + + Operand lNumClasses; + if (numClasses == null) { + lNumClasses = + Add.create( + lScope, + Maximum.create( + lScope, + ReduceMax.create(lScope, lPredictions, zero), + ReduceMax.create(lScope, lLabels, zero)), + one); + } else { + lNumClasses = Cast.create(lScope, numClasses, TInt64.class); + Operand less = Less.create(lScope, lLabels, lNumClasses); + AssertThat labelsLess = + AssertThat.create( + lScope, + ReduceAll.create(scope, less, Axes.allAxes(scope, less), ReduceAll.keepDims(false)), + Collections.singletonList(Constant.scalarOf(lScope, "labels out of bounds"))); + lLabels = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(labelsLess)), lLabels); + + less = Less.create(lScope, lPredictions, lNumClasses); + AssertThat predictionsLess = + AssertThat.create( + lScope, + ReduceAll.create(scope, less, Axes.allAxes(scope, less), ReduceAll.keepDims(false)), + Collections.singletonList(Constant.scalarOf(lScope, "predictions out of bounds"))); + lPredictions = + Identity.create( + lScope.withControlDependencies(Collections.singletonList(predictionsLess)), + lPredictions); + } + + if (weights != null) { + if (!predictions.shape().isCompatibleWith(weights.shape())) { + throw new IllegalArgumentException( + String.format( + "predictions.shape() [%s], is not compatible with weights.shape() [ %s].", + predictions.shape(), weights.shape())); + } + } + + Operand shape = Stack.create(lScope, Arrays.asList(lNumClasses, lNumClasses)); + Operand indices = + Stack.create(lScope, Arrays.asList(lLabels, lPredictions), Stack.axis(1L)); + Operand values = weights == null ? OnesLike.create(lScope, predictions) : weights; + /// Operand zeroMatrix = Zeros.create(lScope, Cast.create(lScope, shape, TInt32.class), + // type); + + return ScatterNd.create(lScope, indices, values, shape); + } + + /** + * Squeeze last dim if ranks differ from expected by exactly 1. + * + * @param scope The TensorFlow scope + * @param labels Label values, a {@code Operand} whose dimensions match {@code predictions }. + * @param predictions Predicted values, a {@code Tensor} of arbitrary dimensions. + * @param expectedRankDiff Expected result of {@code rank(predictions) - rank(labels)}. + * @param the data type for the labels, predictions and result + * @return {@code labels} and {@code predictions}, possibly with last dim squeezed. + */ + private static LossTuple removeSqueezableDimensions( + Scope scope, Operand labels, Operand predictions, int expectedRankDiff) { + Scope lScope = scope.withSubScope("removeSqueezableDimensions"); + Shape predictionsShape = predictions.shape(); + int predictionsRank = predictionsShape.numDimensions(); + Shape labelsShape = labels.shape(); + int labelsRank = labelsShape.numDimensions(); + + if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { + // Use rank. + int rankDiff = predictionsRank - labelsRank; + if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { + predictions = Squeeze.create(lScope, predictions); + } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.size(-1), 1)) { + labels = Squeeze.create(lScope, labels); + } + return new LossTuple<>(labels, predictions); + } + // Use dynamic rank. + + // TODO: hold for lazy select feature, + // Operand rankDiff = tf.math.sub(tf.rank(predictions), tf.rank(labels)); + if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.size(-1), 1)) { + /* + * TODO, if we ever get a select that does lazy evaluation, but for now do the tf.squeeze + * predictions = tf.select( tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), + * tf.squeeze(predictions, Squeeze.axis(Arrays.asList(-1L))), predictions ); * + */ + predictions = + Squeeze.create(lScope, predictions, Squeeze.axis(Collections.singletonList(-1L))); + } + if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.size(-1), 1)) { + /* + * TODO, if we ever get a select that does lazy evaluation labels = tf.select( + * tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), tf.squeeze(labels, + * Squeeze.axis(Arrays.asList(-1L))), predictions ); * + */ + labels = Squeeze.create(lScope, labels, Squeeze.axis(Collections.singletonList(-1L))); + } + return new LossTuple<>(labels, predictions); + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/L2Normalize.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/L2Normalize.java new file mode 100644 index 00000000000..f8cdfe29026 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/L2Normalize.java @@ -0,0 +1,54 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op.math; + +import org.tensorflow.Operand; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Maximum; +import org.tensorflow.op.math.Mul; +import org.tensorflow.op.math.Rsqrt; +import org.tensorflow.op.math.Square; +import org.tensorflow.types.family.TNumber; + +/** L2 Normalization Operations */ +public class L2Normalize { + + /** + * Normalizes along dimension axis using an L2 norm. + * + * @param scope The TensorFlow scope + * @param x the input + * @param axis Dimension along which to normalize. + * @param the data type for the input and the result + * @return the normalized values based on L2 norm + */ + public static Operand l2Normalize(Scope scope, Operand x, int[] axis) { + Operand squareSum = + ReduceSum.create( + scope, + Square.create(scope, x), + Constant.vectorOf(scope, axis), + ReduceSum.keepDims(Boolean.TRUE)); + Operand invNorm = + Rsqrt.create( + scope, + Maximum.create( + scope, squareSum, Cast.create(scope, Constant.scalarOf(scope, 1e-12F), x.type()))); + return Mul.create(scope, x, invNorm); + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/ReduceLogSumExp.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/ReduceLogSumExp.java new file mode 100644 index 00000000000..83660befe3a --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/ReduceLogSumExp.java @@ -0,0 +1,171 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op.math; + +import org.tensorflow.Operand; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.ReduceMax; +import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.core.Reshape; +import org.tensorflow.op.core.Select; +import org.tensorflow.op.core.StopGradient; +import org.tensorflow.op.core.ZerosLike; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.Exp; +import org.tensorflow.op.math.IsFinite; +import org.tensorflow.op.math.Log; +import org.tensorflow.op.math.Sub; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TFloating; + +/** Reduce Log Sum Exp Operations */ +public class ReduceLogSumExp { + + /** + * Computes log(sum(exp(elements across dimensions of a tensor))). Reduces {@code input_tensor} + * along the dimensions given in {@code axes}. + * + *

      Reduces {@code input} along the dimensions given in {@code axes}. Unless {@code keepdims} is + * true, the rank of the tensor is reduced by 1 for each of the entries in {@code axes}, which + * must be unique. If {@code keepdims} is true, the reduced dimensions are retained with length 1. + * If {@code axes} has no entries, all dimensions are reduced, and a tensor with a single element + * is returned. This function is more numerically stable than {@code log(sum(exp(input)))}. It + * avoids overflows caused by taking the exp of large inputs and underflows caused by taking the + * log of small inputs. + * + * @param scope The TensorFlow scope + * @param input The tensor to reduce. + * @param axes The dimensions to reduce. If null, reduces all dimensions. Must be in the range + * {@code [-rank(input_tensor), rank(input_tensor)]}. + * @param keepDims If true, retains reduced dimensions with length 1. + * @param the data type for the input and the result + * @return The reduced tensor. + */ + public static Operand reduceLogSumExp( + Scope scope, Operand input, int[] axes, boolean keepDims) { + Operand reduceDims = reductionDims(scope, input, axes); + Operand rawMax = reduceMaxWithDims(scope, input, axes, keepDims, reduceDims); + Operand myMax = + StopGradient.create( + scope, + Select.create( + scope, IsFinite.create(scope, rawMax), rawMax, ZerosLike.create(scope, rawMax))); + + Operand result = + Log.create( + scope, + reduceSumWithDims( + scope, + Exp.create(scope, Sub.create(scope, input, myMax)), + axes, + keepDims, + reduceDims)); + + if (!keepDims) { + myMax = Reshape.create(scope, myMax, org.tensorflow.op.core.Shape.create(scope, result)); + } + result = Add.create(scope, result, myMax); + return mayReduceToScalar(scope, keepDims, axes, result); + } + + /** + * @param scope The TensorFlow scope + * @param input The tensor to reduce. + * @param axes The dimensions to reduce. If null, reduces all dimensions. Must be in the range + * {@code [-rank(input_tensor), rank(input_tensor)]}. + * @param keepDims If true, retains reduced dimensions with length 1. + * @param dims the reduction dimensions + * @param the data type for the input and the result + * @return the reduced sum + */ + private static Operand reduceSumWithDims( + Scope scope, Operand input, int[] axes, boolean keepDims, Operand dims) { + return mayReduceToScalar( + scope, keepDims, axes, ReduceSum.create(scope, input, dims, ReduceSum.keepDims(keepDims))); + } + + /** + * @param scope The TensorFlow scope + * @param input The tensor to reduce. + * @param axes The dimensions to reduce. If null, reduces all dimensions. Must be in the range + * {@code [-rank(input_tensor), rank(input_tensor)]}. + * @param keepDims If true, retains reduced dimensions with length 1. + * @param dims the reduction dimensions + * @param the data type for the input and the result + * @return the reduced maximum input value + */ + private static Operand reduceMaxWithDims( + Scope scope, Operand input, int[] axes, boolean keepDims, Operand dims) { + return mayReduceToScalar( + scope, keepDims, axes, ReduceMax.create(scope, input, dims, ReduceMax.keepDims(keepDims))); + } + + /** + * Sets a reduction's output shape to be a scalar if possible. + * + * @param scope The TensorFlow scope + * @param keepDims If true, retains reduced dimensions with length 1. + * @param axes The dimensions to reduce. If null, reduces all dimensions. Must be in the range + * {@code [-rank(input_tensor), rank(input_tensor)]}. + * @param output the output, possibly reduced to a scalar + * @param the datat type of the Operands. + * @return the operand, possibly reduced to a scalar. + */ + private static Operand mayReduceToScalar( + Scope scope, boolean keepDims, int[] axes, Operand output) { + + if ((output.shape().numDimensions() == Shape.UNKNOWN_SIZE + || output.shape().hasUnknownDimension()) + && !keepDims + && axes == null) { + return Reshape.create(scope, output, Constant.tensorOf(scope, Shape.scalar())); + } else { + return output; + } + } + + /** + * Reduce dimensions based on axis + * + * @param input the input + * @param axes he dimensions to reduce, may be null + * @return the dimensions to be reduced. + */ + private static Operand reductionDims( + Scope scope, Operand input, int[] axes) { + if (axes != null) { + return Constant.vectorOf(scope, axes); + } + long rank = input.shape().numDimensions(); + if (rank != Shape.UNKNOWN_SIZE) { + int[] dims = new int[(int) rank]; + for (int i = 0; i < rank; i++) { + dims[i] = i; + } + return Constant.vectorOf(scope, dims); + + } else { + return Range.create( + scope, + Constant.scalarOf(scope, 0), + Rank.create(scope, input), + Constant.scalarOf(scope, 1)); + } + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/TensorDot.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/TensorDot.java new file mode 100644 index 00000000000..a222f64679e --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/math/TensorDot.java @@ -0,0 +1,663 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op.math; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.tensorflow.Graph; +import org.tensorflow.Operand; +import org.tensorflow.Session; +import org.tensorflow.framework.op.linalg.MatMul; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Scope; +import org.tensorflow.op.core.AssertThat; +import org.tensorflow.op.core.Concat; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.core.Gather; +import org.tensorflow.op.core.Range; +import org.tensorflow.op.core.Rank; +import org.tensorflow.op.core.ReduceProd; +import org.tensorflow.op.core.Reshape; +import org.tensorflow.op.core.Select; +import org.tensorflow.op.core.SetDiff1d; +import org.tensorflow.op.core.Slice; +import org.tensorflow.op.core.Stack; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.linalg.Transpose; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.GreaterEqual; +import org.tensorflow.op.math.Less; +import org.tensorflow.op.math.Sub; +import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TFloat16; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; + +/** tensor contraction Operations */ +public class TensorDot { + + /** + * Transpose and reshape the input for contraction op. + * + *

      This method is helpful in reducing {@code math.tensordot} to {@code math_ops.matmul} using + * {@code array_ops.transpose} and {@code array_ops.reshape}. The method takes a tensor and + * performs the correct transpose and reshape operation for a given set of indices. It returns the + * reshaped tensor as well as a list of indices necessary to reshape the tensor again after matrix + * multiplication. + * + * @param the type of Operand + * @param a the Tensor + * @param axis unique indices specifying valid axes of {@code a}. + * @param flipped whether to flip the dimensions or not + * @return A tuple (reshapedA, freeDims, freeDimsStatic) where reshapedA is a reshaped to allow + * contraction via matmul, freeDims is a TInt32 Operand, depending on whether the shape of a + * is fully specified, and freeDimsStatic is either a list of integers and null values, or + * None, representing the inferred shape of the free dimensions + */ + private static Object[] tensordotReshape( + Scope scope, Operand a, Operand axis, boolean flipped) { + Shape aShape = a.shape(); + + if (!aShape.hasUnknownDimension()) { // calculate using values + long[] aShapeDims = aShape.asArray(); + if (aShapeDims == null) aShapeDims = new long[0]; + long[] aDimsIndex = new long[aShapeDims.length]; + for (int i = 0; i < aDimsIndex.length; i++) aDimsIndex[i] = i; + + // get int array from axis Operand + int[] iAxes = getIntArray(scope, axis); + // Convert negative axes to positive + for (int i = 0; i < iAxes.length; i++) + iAxes[i] = iAxes[i] >= 0 ? iAxes[i] : Math.floorMod(iAxes[i], iAxes.length); + + // convert integer axis to long axis + long[] lAxes = Arrays.stream(iAxes).mapToLong(i -> i).toArray(); + + // create list of the axes, dims, and free axes + List axesList = Arrays.stream(lAxes).boxed().collect(Collectors.toList()); + List freeList = Arrays.stream(aDimsIndex).boxed().collect(Collectors.toList()); + freeList.removeAll(axesList); + + // create array of free dims + long[] free = freeList.stream().mapToLong(i -> i).toArray(); + long[] freeDims = new long[free.length]; + for (int i = 0; i < free.length; i++) freeDims[i] = aShapeDims[(int) free[i]]; + + // Calculate the free dim by doing a reduce prod + long prodFree = 1; + for (long i : freeDims) { + prodFree *= i; + } + + // calculate the used dims by doing a reduce prod + long prodAxis = 1; + for (long i : lAxes) { + prodAxis *= aShapeDims[(int) i]; + } + + // setup the permutations array for the transpose + long[] perm = new long[freeDims.length + lAxes.length]; + Shape newShape; + if (flipped) { + System.arraycopy(lAxes, 0, perm, 0, lAxes.length); + System.arraycopy(free, 0, perm, lAxes.length, free.length); + newShape = Shape.of(prodAxis, prodFree); + } else { + System.arraycopy(free, 0, perm, 0, free.length); + System.arraycopy(lAxes, 0, perm, freeDims.length, lAxes.length); + newShape = Shape.of(prodFree, prodAxis); + } + + Operand aTrans; + long[] arrange = new long[lAxes.length]; + for (int i = 0; i < arrange.length; i++) arrange[i] = i; + + // if the permutations is not equals to the natural order of the dims, then do a transpose + if (!Arrays.equals(perm, arrange)) { + aTrans = Transpose.create(scope, a, Constant.vectorOf(scope, perm)); + } else { + aTrans = a; + } + + // reshape the final result to the new Shape, if necessary + Operand aReshaped = + aTrans.asOutput().shape().equals(newShape) + ? aTrans + : Reshape.create(scope, aTrans, Constant.vectorOf(scope, newShape.asArray())); + // return a tuple for the reshaped Operand, and Operand for the free dimensions, and a long + // array for the free dimensions + return new Object[] {aReshaped, Constant.vectorOf(scope, freeDims), freeDims}; + + } else { // calculate dynamically + + long[] freeDimsStatic = null; + Operand one = Constant.scalarOf(scope, 1); + Operand minusOne = Constant.scalarOf(scope, -1); + Operand zero = Constant.scalarOf(scope, 0); + org.tensorflow.op.core.Shape tShape = org.tensorflow.op.core.Shape.create(scope, a); + Operand axesT; + Operand freeT; + if (aShape.numDimensions() + != Shape.UNKNOWN_SIZE) { // we know the rank, but there are unknown dimensions + long[] aShapeDims = aShape.asArray(); + if (aShapeDims == null) aShapeDims = new long[0]; + + // get int array from axis Operand + int[] iAxes = getIntArray(scope, axis); + // Convert negative axes to positive + for (int i = 0; i < iAxes.length; i++) + iAxes[i] = iAxes[i] >= 0 ? iAxes[i] : Math.floorMod(iAxes[i], iAxes.length); + + // convert integer axis to long axis + long[] lAxes = Arrays.stream(iAxes).mapToLong(i -> i).toArray(); + + // create list of the axes, dims, and free axes + List axesList = Arrays.stream(lAxes).boxed().collect(Collectors.toList()); + List dimsList = Arrays.stream(aShapeDims).boxed().collect(Collectors.toList()); + List freeList = new ArrayList<>(axesList); + freeList.removeAll(dimsList); + + // create array of free dims + long[] freeDims = freeList.stream().mapToLong(i -> i).toArray(); + freeDimsStatic = freeDims; + + axesT = Constant.vectorOf(scope, iAxes); + freeT = Cast.create(scope, Constant.vectorOf(scope, freeDims), TInt32.class); + + } else { // we don't know the rank yet + Rank rank = Rank.create(scope, a); + + // convert axis to positive + axesT = + Select.create( + scope, + GreaterEqual.create(scope, axis, Constant.scalarOf(scope, 0)), + axis, + Add.create(scope, axis, rank)); + + SetDiff1d diff = + SetDiff1d.create( + scope, Range.create(scope, Constant.scalarOf(scope, 0), rank, one), axesT); + freeT = diff.out(); + } + Operand freeDims = Gather.create(scope, tShape, freeT, zero); + Operand axesDims = Gather.create(scope, tShape, axesT, zero); + Operand prodFreeDims = ReduceProd.create(scope, freeDims, minusOne); + Operand prodAxesDims = ReduceProd.create(scope, axesDims, minusOne); + Operand perm; + Operand newShape; + if (flipped) { + perm = Concat.create(scope, Arrays.asList(axesT, freeT), zero); + newShape = Stack.create(scope, Arrays.asList(prodAxesDims, prodFreeDims)); + } else { + perm = Concat.create(scope, Arrays.asList(freeT, axesT), zero); + newShape = Stack.create(scope, Arrays.asList(prodFreeDims, prodAxesDims)); + } + Operand aReshaped = Reshape.create(scope, Transpose.create(scope, a, perm), newShape); + return new Object[] {aReshaped, freeDims, freeDimsStatic}; + } + } + + /** + * Gets an int array from an Operand<TInt32> operand. + * + * @param axes the Operand to fetch the values + * @return the int array from an Operand<TInt32> + */ + private static int[] getIntArray(Scope scope, Operand axes) { + List result = new ArrayList<>(); + if (scope.env().isEager()) { + axes.asTensor().scalars().forEach(s -> result.add(s.getInt())); + } else { + try (Session session = new Session((Graph) scope.env()); + TInt32 tensor = (TInt32) session.runner().fetch(axes).run().get(0)) { + tensor.scalars().forEach(s -> result.add(s.getInt())); + } + } + return result.stream().mapToInt(i -> i).toArray(); + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axis the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings("unchecked") + private static Operand[] tensordotAxes( + Scope scope, Operand a, int axis) { + Shape aShape = a.asOutput().shape(); + if (axis < 0) { + throw new IllegalArgumentException("'axis' must be at least 0."); + } + int rank = aShape.numDimensions(); + Operand[] result = new Operand[2]; + if (rank != Shape.UNKNOWN_SIZE) { + if (axis > rank) { + throw new IllegalArgumentException( + String.format( + "'axis' must not be larger than the number of dimensions of tensor %s.", rank)); + } + int min = rank - axis; + int postRange = rank - min; + int[] postAxis = new int[postRange]; + for (int i = 0; i < postRange; i++) postAxis[i] = i + min; + + int[] preAxis = new int[axis]; + for (int i = 0; i < axis; i++) preAxis[i] = i; + + result[0] = Constant.vectorOf(scope, postAxis); + result[1] = Constant.vectorOf(scope, preAxis); + } else { + Rank rankT = Rank.create(scope, a); + Constant axisT = Constant.scalarOf(scope, axis); + Constant one = Constant.scalarOf(scope, 1); + Constant zero = Constant.scalarOf(scope, 0); + AssertThat assertion = + AssertThat.create( + scope, + Less.create(scope, axisT, rankT), + Arrays.asList( + Constant.scalarOf( + scope, "'axes' must not be larger than the number of dimensions of tensor "), + rankT)); + Scope scope1 = scope.withControlDependencies(Collections.singletonList(assertion)); + result[0] = Range.create(scope1, Sub.create(scope, rankT, axisT), rankT, one); + result[1] = Range.create(scope1, zero, axisT, one); + } + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private static Operand[] tensordotAxes( + Scope scope, Operand a, int[] axes) { + if (axes.length != 2) + throw new IllegalArgumentException( + "'axes' must have length 1 or 2, provided with " + axes.length); + int[] aAxis = new int[] {axes[0]}; + int[] bAxis = new int[] {axes[1]}; + Operand[] result = new Operand[2]; + result[0] = Constant.vectorOf(scope, aAxis); + result[1] = Constant.vectorOf(scope, bAxis); + + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private static Operand[] tensordotAxes( + Scope scope, Operand a, int[][] axes) { + if (axes.length != 2) + throw new IllegalArgumentException( + "'axes' must have length 1 or 2, provided with " + axes.length); + int[] aAxis = axes[0]; + int[] bAxis = axes[1]; + if (aAxis.length != bAxis.length) + throw new IllegalArgumentException( + String.format( + "Different number of contraction axes 'a' and 'b', %d != %d", + aAxis.length, bAxis.length)); + Operand[] result = new Operand[2]; + result[0] = Constant.vectorOf(scope, aAxis); + result[1] = Constant.vectorOf(scope, bAxis); + return result; + } + + /** + * Generates two sets of contraction axes for the two tensor arguments. + * + * @param a the Operand to analyze + * @param axes the axes + * @param the data type for the Operand + * @return the contraction axes + */ + @SuppressWarnings({"unchecked", "unused"}) + private static Operand[] tensordotAxes( + Scope scope, Operand a, Operand axes) { + + Constant one = Constant.scalarOf(scope, 1); + Constant zero = Constant.scalarOf(scope, 0); + Operand[] result = new Operand[2]; + result[0] = + Slice.create( + scope, + axes, + Cast.create(scope, zero, TInt32.class), + Cast.create(scope, one, TInt32.class)); + result[1] = + Slice.create( + scope, + axes, + Cast.create(scope, one, TInt32.class), + Cast.create(scope, one, TInt32.class)); + return result; + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. + * @param axis sum over the last N axes of a and the first N axes of b in order. If {@code + * axis=0}, computes the outer product between {@code a} and {@code b}. + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 + * @return A {@code Operand} with the same type as {@code a}. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type + */ + public static Operand tensordot( + Scope scope, Operand a, Operand b, int axis) { + + Operand[] abAxis = tensordotAxes(scope, a, axis); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + return tensordot(scope, a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * + *

      cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + *

      + * + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. + * @param axes If axes is a scalar, sum over the last N axes of a and the first N axes of b in + * order. If axes is a list, the first and second row contain the set of unique integers + * specifying axes along which the contraction is computed, for {@code a} and {@code b}, + * respectively. The number of axes for {@code a} and {@code b} must be equal. If {@code + * axis=0}, computes the outer product between {@code a} and {@code b}. + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 + * @return A {@code Operand} with the same type as {@code a}. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type + */ + public static Operand tensordot( + Scope scope, Operand a, Operand b, Operand axes) { + + Operand[] abAxis = tensordotAxes(scope, a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(scope, a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and{@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * + *

      cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + *

      + * + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. + * @param axes the first and second row contain the set of unique integers specifying axes along + * which the contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. I + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 + * @return A {@code Operand} with the same type as {@code a}. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type + */ + public static Operand tensordot( + Scope scope, Operand a, Operand b, int[] axes) { + + Operand[] abAxis = tensordotAxes(scope, a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(scope, a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and{@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * + *

      cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + *

      + * + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. + * @param axes the first and second row contain the set of unique integers specifying axes along + * which the contraction is computed, for {@code a} and {@code b}, respectively. The number of + * axes for {@code a} and {@code b} must be equal. I + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 + * @return A {@code Operand} with the same type as {@code a}. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type + */ + public static Operand tensordot( + Scope scope, Operand a, Operand b, int[][] axes) { + + Operand[] abAxis = tensordotAxes(scope, a, axes); + Operand aAxis = abAxis[0]; + Operand bAxis = abAxis[1]; + + return tensordot(scope, a, b, aAxis, bAxis); + } + + /** + * Tensor contraction of a and b along specified axes and outer product. + * + *

      Tensordot (also known as tensor contraction) sums the product of elements from {@code a} and + * {@code b} over the indices specified by {@code a_axes} and {@code b_axes}. The lists {@code + * a_axes} and {@code b_axes} specify those pairs of axes along which to contract the tensors. The + * axis {@code a_axes[i]} of {@code a} must have the same dimension as axis {@code b_axes[i]} of + * {@code b} for all {@code i} in {@code range(0, len(a_axes))}. The lists {@code a_axes} and + * {@code b_axes} must have identical length and consist of unique integers that specify valid + * axes for each of the tensors. Additionally outer product is supported by passing {@code + * axes=0}. + * + *

      This operation corresponds to {@code numpy.tensordot(a, b, axes)}. + * + *

      Example 1: When {@code a} and {@code b} are matrices (order 2), the case {@code axes = 1} is + * equivalent to matrix multiplication. + * + *

      Example 2: When {@code a} and{@code b} are matrices (order 2), the case {@code axes = [[1], + * [0]]} is equivalent to matrix multiplication. + * + *

      Example 3: When {@code a} and {@code b} are matrices (order 2), the case {@code axes=0} + * gives the outer product, a tensor of order 4. + * + *

      Example 4: Suppose that aijk and blmn represent two + * tensors of order 3. Then, {@code contract(a, b, [[0], [2]])} is the order 4 tensor + * cjklm whose entry corresponding to the indices (j,k,l,m) is given by: + * + *

      cjklm = Σi aijk blmi . + * + *

      In general, {@code order(c) = order(a) + order(b) - 2*len(axes[0])}. + * + *

      + * + * @param a {@code Operand} of type {@code TFloat32} or {@code TFloat64}. + * @param b {@code Operand} with the same type as {@code a}. + * @param aAxis axes for the a Operand + * @param bAxis axes for the b Operand + * @param the datatype of the Operands, must be either TFloat32 or TFloat64 + * @return A {@code Operand} with the same type as {@code a}. + * @throws IllegalArgumentException if a is not a float32 or float64 data type and if a and b are + * not the same data type + */ + @SuppressWarnings({"unchecked", "unused"}) + public static Operand tensordot( + Scope scope, Operand a, Operand b, Operand aAxis, Operand bAxis) { + + if (a.type().equals(TBfloat16.class) || a.type().equals(TFloat16.class)) { + throw new IllegalArgumentException( + String.format( + "Operand 'a' must be either TFloat32 or TFloat64 DataType, 'a' is a %s DataType", + a.type().getSimpleName())); + } + if (!a.type().equals(b.type())) { + throw new IllegalArgumentException( + String.format( + "Operands a and b must be the same data type, a is %s DataType, b is %s DataType", + a.type().getSimpleName(), b.type().getSimpleName())); + } + + // first result is Operand, second result is Operand, third result is long[] and it is + // ignored here. + Object[] aResult = tensordotReshape(scope, a, aAxis, false); + Operand reshapedA = (Operand) aResult[0]; + Operand aFreeDims = (Operand) aResult[1]; + long[] aFreeDimsStatic = (long[]) aResult[2]; + + // first result is Operand, second result is Operand, third result is long[] and it is + // ignored here. + Object[] bResult = tensordotReshape(scope, b, bAxis, true); + Operand reshapedB = (Operand) bResult[0]; + Operand bFreeDims = (Operand) bResult[1]; + long[] bFreeDimsStatic = (long[]) bResult[2]; + + Operand abMatmul = MatMul.matmul(scope, reshapedA, reshapedB); + long[] abDimsStatic = new long[aFreeDimsStatic.length + bFreeDimsStatic.length]; + System.arraycopy(aFreeDimsStatic, 0, abDimsStatic, 0, aFreeDimsStatic.length); + System.arraycopy( + bFreeDimsStatic, 0, abDimsStatic, aFreeDimsStatic.length, bFreeDimsStatic.length); + if (!abMatmul.shape().hasUnknownDimension() + && abMatmul.shape().equals(Shape.of(abDimsStatic))) { + return abMatmul; + } else { + return Reshape.create(scope, abMatmul, Constant.vectorOf(scope, abDimsStatic)); + } + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index fc3f7739363..b1e2ce6c928 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -16,7 +16,6 @@ import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; -// @Operator(group = "nn") public class SigmoidCrossEntropyWithLogits { /** @@ -26,8 +25,7 @@ public class SigmoidCrossEntropyWithLogits { * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - *

      For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in - * pseudo-code is + *

      For brevity, let {@code x = logits}, {@code z = labels}. The logistic loss in pseudo-code is * *

          * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
      @@ -64,7 +62,7 @@ public class SigmoidCrossEntropyWithLogits {
          * @return the component-wise logistic losses.
          * @throws IllegalArgumentException if logits' and labels' do not have the same shape
          */
      -  // @Endpoint(name = "sigmoidCrossEntropyWithLogits")
      +  // @(name = "sigmoidCrossEntropyWithLogits")
         public static  Operand sigmoidCrossEntropyWithLogits(
             Scope scope, Operand labels, Operand logits) {
           if (!isCompatible(labels.shape(), logits.shape())) {
      diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java
      index 7d59941f27a..a95110c9a96 100644
      --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java
      +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java
      @@ -1,5 +1,7 @@
       package org.tensorflow.framework.op.nn;
       
      +import java.util.Arrays;
      +import java.util.List;
       import org.tensorflow.Operand;
       import org.tensorflow.ndarray.Shape;
       import org.tensorflow.op.Scope;
      @@ -19,10 +21,6 @@
       import org.tensorflow.types.TInt64;
       import org.tensorflow.types.family.TNumber;
       
      -import java.util.Arrays;
      -import java.util.List;
      -
      -// @Operator(group = "nn")
       public class SoftmaxCrossEntropyWithLogits {
       
         /**
      diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
      index 0b2d29d6092..5299efcce22 100644
      --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
      +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java
      @@ -1,10 +1,12 @@
       package org.tensorflow.framework.op.nn;
       
      +import java.util.ArrayList;
      +import java.util.Collections;
      +import java.util.List;
       import org.tensorflow.Operand;
       import org.tensorflow.ndarray.Shape;
       import org.tensorflow.op.Op;
       import org.tensorflow.op.Scope;
      -import org.tensorflow.op.annotation.Endpoint;
       import org.tensorflow.op.core.AssertThat;
       import org.tensorflow.op.core.Constant;
       import org.tensorflow.op.core.Reshape;
      @@ -17,11 +19,6 @@
       import org.tensorflow.types.TInt32;
       import org.tensorflow.types.family.TNumber;
       
      -import java.util.ArrayList;
      -import java.util.Collections;
      -import java.util.List;
      -
      -// @Operator(group = "nn")
       public class SparseSoftmaxCrossEntropyWithLogits {
       
         /**
      @@ -34,42 +31,39 @@ public class SparseSoftmaxCrossEntropyWithLogits {
          * 

      NOTE: * *

      For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the {@code labels} vector must provide a single specific - * index for the true class for each row of {@code logits} (each minibatch entry). For soft - * softmax classification with a probability distribution for each entry, {@link + * classes are not allowed, and the {@code labels} vector must provide a single specific index for + * the true class for each row of {@code logits} (each minibatch entry). For soft softmax + * classification with a probability distribution for each entry, {@link * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits}. * *

      WARNING: * - *

      This op expects unscaled logits, since it performs a {@code softmax} on {@code logits - * } internally for efficiency. Do not call this op with the output of {@code softmax}, - * as it will produce incorrect results. + *

      This op expects unscaled logits, since it performs a {@code softmax} on {@code logits } + * internally for efficiency. Do not call this op with the output of {@code softmax}, as it will + * produce incorrect results. * - *

      A common use case is to have logits of shape {@code [batchSize, numClasses]} and have - * labels of shape {@code [batchSize]}, but higher dimensions are supported, in which case - * the {@code dim}-th dimension is assumed to be of size {@code numClasses}. {@code - * logits} must have the {@code dataType} of {@code TFloat16}, {@code TFloat32} - * , or {@code TFloat64}, and {@code labels} must have the dtype of {@code TInt32} - * or {@code TInt64}. + *

      A common use case is to have logits of shape {@code [batchSize, numClasses]} and have labels + * of shape {@code [batchSize]}, but higher dimensions are supported, in which case the {@code + * dim}-th dimension is assumed to be of size {@code numClasses}. {@code logits} must have the + * {@code dataType} of {@code TFloat16}, {@code TFloat32} , or {@code TFloat64}, and {@code + * labels} must have the dtype of {@code TInt32} or {@code TInt64}. * * @param scope current scope - * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r - * } is rank of {@code labels} and result) and the dataType is {@code TInt32} - * or {@code TInt64}. Each entry in {@code labels} must be an index in {@code [0, - * numClasses)}. Other values will raise an exception when this op is run on CPU, and - * return {@code NaN} for corresponding loss and gradient rows on GPU. + * @param labels {@code Tensor} of shape {@code [d_0, d_1, ..., d_{r-1}]} (where {@code r } is + * rank of {@code labels} and result) and the dataType is {@code TInt32} or {@code TInt64}. + * Each entry in {@code labels} must be an index in {@code [0, numClasses)}. Other values will + * raise an exception when this op is run on CPU, and return {@code NaN} for corresponding + * loss and gradient rows on GPU. * @param logits Per-label activations (typically a linear output) of shape {@code [d_0, d_1, ..., - * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, - * or {@code TFloat64}. These activation energies are interpreted as unnormalized log - * probabilities. + * d_{r-1}, numClasses]} and dataType of {@code TFloat16}, {@code TFloat32}, or {@code + * TFloat64}. These activation energies are interpreted as unnormalized log probabilities. * @param the data type for the labels * @param the data tyoe for the loss and logits. * @return the loss - * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if the rank - * of the labels is not equal to the rank of the logits minus one. + * @throws IllegalArgumentException If logits are scalars (need to have {@code rank >= 1}) or if + * the rank of the labels is not equal to the rank of the logits minus one. */ @SuppressWarnings("unchecked") - @Endpoint(name = "sparseSoftmaxCrossEntropyWithLogits") public static Operand sparseSoftmaxCrossEntropyWithLogits( Scope scope, Operand labels, Operand logits) { diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/sets/Sets.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/sets/Sets.java new file mode 100644 index 00000000000..0cbfa74e770 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/sets/Sets.java @@ -0,0 +1,148 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op.sets; + +import org.tensorflow.Operand; +import org.tensorflow.op.Scope; +import org.tensorflow.op.SparseOps; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.sparse.DenseToDenseSetOperation; +import org.tensorflow.op.sparse.SparseToDense; +import org.tensorflow.types.family.TNumber; + +public class Sets { + + /** + * Computes set difference of elements in last dimension of a and b with + * aMinusB set to true. + * + *

      All but the last dimension of a and b must match + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public static Operand difference(Scope scope, Operand a, Operand b) { + return difference(scope, a, b, true); + } + + /** + * Computes set difference of elements in last dimension of a and b. + * + *

      All but the last dimension of a and b must match + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param aMinusB whether to subtract b from a, vs vice versa. + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public static Operand difference( + Scope scope, Operand a, Operand b, boolean aMinusB) { + return setOperation(scope, a, b, aMinusB ? Operation.A_MINUS_B : Operation.B_MINUS_A); + } + + /** + * Computes set union of elements in last dimension of a and b. + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public static Operand union(Scope scope, Operand a, Operand b) { + return setOperation(scope, a, b, Operation.UNION); + } + + /** + * Computes set intersection of elements in last dimension of a and b. + * + * @param a The first operand representing set a + * @param b The other operand representing set b + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the * same. Elements along the last dimension contain the results of the set + * operation. + */ + public static Operand intersection( + Scope scope, Operand a, Operand b) { + return setOperation(scope, a, b, Operation.INTERSECTION); + } + + /** + * Compute set operation of elements in last dimension of a and b. + * + * @param a The first set operation operand + * @param b The other et operation operand + * @param setOperation The set operation to perform, {@link Operation}. + * @param the data type for the sets + * @return An Operand with the same rank as a and b, and all but the + * last dimension the same. Elements along the last dimension contain the results of the set + * operation. + */ + public static Operand setOperation( + Scope scope, Operand a, Operand b, Operation setOperation) { + + DenseToDenseSetOperation setOperationResult = + DenseToDenseSetOperation.create( + scope, + a, + b, + setOperation.getSetOperation(), + DenseToDenseSetOperation.validateIndices(true)); + + return SparseToDense.create( + scope, + setOperationResult.resultIndices(), + setOperationResult.resultShape(), + setOperationResult.resultValues(), + Cast.create(scope, Constant.scalarOf(scope, 0), a.type())); + } + + /** + * Enumeration containing the string operation values to be passed to the TensorFlow Sparse Ops + * function {@link SparseOps#denseToDenseSetOperation} + */ + public enum Operation { + A_MINUS_B("a-b"), + B_MINUS_A("b-a"), + INTERSECTION("intersection"), + UNION("union"); + + private final String setOperation; + + Operation(String setOperation) { + this.setOperation = setOperation; + } + + /** + * Gets the set operation String value used to pass as the stringOperation value to {@link + * SparseOps#denseToDenseSetOperation} + * + * @return the set operation String value + */ + public String getSetOperation() { + return setOperation; + } + } +} diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java deleted file mode 100644 index e10f016bd94..00000000000 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/SetOpsTest.java +++ /dev/null @@ -1,126 +0,0 @@ -package org.tensorflow.framework.metrics.impl; - -import org.junit.jupiter.api.Test; -import org.tensorflow.Operand; -import org.tensorflow.framework.op.FrameworkOps; -import org.tensorflow.framework.op.SetsOps; -import org.tensorflow.framework.utils.TestSession; -import org.tensorflow.ndarray.Shape; -import org.tensorflow.op.Ops; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.TUint8; -import org.tensorflow.types.family.TType; - -import java.util.Arrays; -import java.util.List; - -import static org.tensorflow.framework.utils.CastHelper.cast; - -class SetOpsTest { - - private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; - - List> types = Arrays.asList(TInt32.class, TInt64.class, TUint8.class); - - @Test - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSetIntersectionMultirow2() { - - for (TestSession.Mode tfMode : tfModes) - try (TestSession session = TestSession.createTestSession(tfMode)) { - Ops tf = session.getTF(); - FrameworkOps fops = FrameworkOps.create(tf); - Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); - Operand b = tf.constant(new int[][] {{1, 9}, {1, 5}}); - int[][] expected = new int[][] {{1, 9}, {0, 0}}; - Shape expectedShape = Shape.of(2, 2); - for (Class type : types) { - Operand aa = cast(tf, a, type); - Operand bb = cast(tf, b, type); - Operand intersection = fops.sets.intersection(aa, bb); - session.evaluate(cast(tf, tf.constant(expected), type), intersection); - session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); - } - } - } - - @Test - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSetIntersectionDuplicates2d() { - - for (TestSession.Mode tfMode : tfModes) - try (TestSession session = TestSession.createTestSession(tfMode)) { - Ops tf = session.getTF(); - FrameworkOps fops = FrameworkOps.create(tf); - Operand a = tf.constant(new int[][] {{1, 1, 3}}); - Operand b = tf.constant(new int[][] {{1, 1}}); - int[][] expected = {{1}}; - Shape expectedShape = Shape.of(1, 1); - for (Class type : types) { - Operand aa = cast(tf, a, type); - Operand bb = cast(tf, b, type); - Operand intersection = fops.sets.intersection(aa, bb); - - session.evaluate(cast(tf, tf.constant(expected), type), intersection); - - session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); - } - } - } - - @Test - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testDenseSetDifferenceMultirow2d() { - - for (TestSession.Mode tfMode : tfModes) - try (TestSession session = TestSession.createTestSession(tfMode)) { - Ops tf = session.getTF(); - FrameworkOps fops = FrameworkOps.create(tf); - Operand a = tf.constant(new int[][] {{1, 5, 9}, {4, 5, 3}}); - Operand b = tf.constant(new int[][] {{1, 2, 6}, {1, 2, 2}}); - - for (Class type : types) { - Operand aa = cast(tf, a, type); - Operand bb = cast(tf, b, type); - int[][] expected = {{5, 9, 0}, {3, 4, 5}}; - // a- b - Shape expectedShape = Shape.of(2, 3); - Operand intersection = fops.sets.difference(aa, bb); - session.evaluate(cast(tf, tf.constant(expected), type), intersection); - session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); - - // b - a - expected = new int[][] {{2, 6}, {1, 2}}; - expectedShape = Shape.of(2, 2); - intersection = fops.sets.difference(aa, bb, false); - - session.evaluate(cast(tf, tf.constant(expected), type), intersection); - session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); - } - } - } - - @Test - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testDenseUnionMultirow2d() { - - for (TestSession.Mode tfMode : tfModes) - try (TestSession session = TestSession.createTestSession(tfMode)) { - Ops tf = session.getTF(); - FrameworkOps fops = FrameworkOps.create(tf); - Operand a = tf.constant(new int[][] {{9, 1, 5}, {2, 4, 3}}); - Operand b = tf.constant(new int[][] {{1, 9}, {1, 2}}); - int[][] expected = new int[][] {{5, 0}, {3, 4}}; - for (Class type : types) { - Operand aa = cast(tf, a, type); - Operand bb = cast(tf, b, type); - Shape expectedShape = Shape.of(2, 2); - // a- b - Operand intersection = fops.sets.difference(aa, bb); - session.evaluate(cast(tf, tf.constant(expected), type), intersection); - session.evaluate(tf.constant(expectedShape), tf.shape(intersection, TInt64.class)); - } - } - } -} diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java index dda5a7c6eaa..852dc0f5dcc 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/MathOpsTest.java @@ -1,5 +1,7 @@ package org.tensorflow.framework.op; +import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -9,8 +11,6 @@ import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt64; -import static org.junit.jupiter.api.Assertions.assertThrows; - class MathOpsTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -436,8 +436,6 @@ public void testTensorDotValid() { expected = tf.constant(new float[][][] {{{6}}, {{6}}, {{6}}}); session.evaluate(expected, ans); - ans = fops.math.tensordot(a, b, axes3); - float[][][][][] expectedArray = new float[][][][][] { {{{{2, 3, 1}}}, {{{2, 3, 1}}}, {{{2, 3, 1}}}}, diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java index 7dee866abf2..41b2dfe4e4f 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/op/SetOpsTest.java @@ -1,5 +1,9 @@ package org.tensorflow.framework.op; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.Arrays; +import java.util.List; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -10,11 +14,6 @@ import org.tensorflow.types.TUint8; import org.tensorflow.types.family.TType; -import java.util.Arrays; -import java.util.List; - -import static org.tensorflow.framework.utils.CastHelper.cast; - class SetOpsTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java index 17188499ee7..835ed8fdcaa 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java @@ -163,7 +163,7 @@ public void testDeterminism() { tf.withName("output").placeholder(TFloat32.class, Placeholder.shape(Shape.of(-1, 2))); Mean loss = tf.math.mean( - tf.nn.raw.softmaxCrossEntropyWithLogits(output, placeholder).loss(), tf.constant(0)); + tf.nn.softmaxCrossEntropyWithLogits(output, placeholder).loss(), tf.constant(0)); lossName = loss.op().name(); GradientDescent gd = new GradientDescent(g, 10.0f);